LLVM 22.0.0git
AArch64InstrInfo.h
Go to the documentation of this file.
1//===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
14#define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
15
16#include "AArch64.h"
17#include "AArch64RegisterInfo.h"
20#include <optional>
21
22#define GET_INSTRINFO_HEADER
23#include "AArch64GenInstrInfo.inc"
24
25namespace llvm {
26
27class AArch64Subtarget;
28
33
34#define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
35
36// AArch64 MachineCombiner patterns
38 // These are patterns used to reduce the length of dependence chain.
41
42 // These are multiply-add patterns matched by the AArch64 machine combiner.
55 // NEON integers vectors
68
81
90
99
100 // Floating Point
162
173
175
181 const AArch64RegisterInfo RI;
182 const AArch64Subtarget &Subtarget;
183
184public:
185 explicit AArch64InstrInfo(const AArch64Subtarget &STI);
186
187 /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
188 /// such, whenever a client has an instance of instruction info, it should
189 /// always be able to get register info as well (through this method).
190 const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
191
192 unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
193
194 bool isAsCheapAsAMove(const MachineInstr &MI) const override;
195
196 bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
197 Register &DstReg, unsigned &SubIdx) const override;
198
199 bool
201 const MachineInstr &MIb) const override;
202
204 int &FrameIndex) const override;
206 int &FrameIndex) const override;
207
208 /// Does this instruction set its full destination register to zero?
209 static bool isGPRZero(const MachineInstr &MI);
210
211 /// Does this instruction rename a GPR without modifying bits?
212 static bool isGPRCopy(const MachineInstr &MI);
213
214 /// Does this instruction rename an FPR without modifying bits?
215 static bool isFPRCopy(const MachineInstr &MI);
216
217 /// Return true if pairing the given load or store is hinted to be
218 /// unprofitable.
219 static bool isLdStPairSuppressed(const MachineInstr &MI);
220
221 /// Return true if the given load or store is a strided memory access.
222 static bool isStridedAccess(const MachineInstr &MI);
223
224 /// Return true if it has an unscaled load/store offset.
225 static bool hasUnscaledLdStOffset(unsigned Opc);
227 return hasUnscaledLdStOffset(MI.getOpcode());
228 }
229
230 /// Returns the unscaled load/store for the scaled load/store opcode,
231 /// if there is a corresponding unscaled variant available.
232 static std::optional<unsigned> getUnscaledLdSt(unsigned Opc);
233
234 /// Scaling factor for (scaled or unscaled) load or store.
235 static int getMemScale(unsigned Opc);
236 static int getMemScale(const MachineInstr &MI) {
237 return getMemScale(MI.getOpcode());
238 }
239
240 /// Returns whether the instruction is a pre-indexed load.
241 static bool isPreLd(const MachineInstr &MI);
242
243 /// Returns whether the instruction is a pre-indexed store.
244 static bool isPreSt(const MachineInstr &MI);
245
246 /// Returns whether the instruction is a pre-indexed load/store.
247 static bool isPreLdSt(const MachineInstr &MI);
248
249 /// Returns whether the instruction is a paired load/store.
250 static bool isPairedLdSt(const MachineInstr &MI);
251
252 /// Returns the base register operator of a load/store.
253 static const MachineOperand &getLdStBaseOp(const MachineInstr &MI);
254
255 /// Returns the immediate offset operator of a load/store.
256 static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI);
257
258 /// Returns whether the physical register is FP or NEON.
259 static bool isFpOrNEON(Register Reg);
260
261 /// Returns the shift amount operator of a load/store.
262 static const MachineOperand &getLdStAmountOp(const MachineInstr &MI);
263
264 /// Returns whether the instruction is FP or NEON.
265 static bool isFpOrNEON(const MachineInstr &MI);
266
267 /// Returns whether the instruction is in H form (16 bit operands)
268 static bool isHForm(const MachineInstr &MI);
269
270 /// Returns whether the instruction is in Q form (128 bit operands)
271 static bool isQForm(const MachineInstr &MI);
272
273 /// Returns whether the instruction can be compatible with non-zero BTYPE.
274 static bool hasBTISemantics(const MachineInstr &MI);
275
276 /// Returns the index for the immediate for a given instruction.
277 static unsigned getLoadStoreImmIdx(unsigned Opc);
278
279 /// Return true if pairing the given load or store may be paired with another.
280 static bool isPairableLdStInst(const MachineInstr &MI);
281
282 /// Returns true if MI is one of the TCRETURN* instructions.
283 static bool isTailCallReturnInst(const MachineInstr &MI);
284
285 /// Return the opcode that set flags when possible. The caller is
286 /// responsible for ensuring the opc has a flag setting equivalent.
287 static unsigned convertToFlagSettingOpc(unsigned Opc);
288
289 /// Return true if this is a load/store that can be potentially paired/merged.
290 bool isCandidateToMergeOrPair(const MachineInstr &MI) const;
291
292 /// Hint that pairing the given load or store is unprofitable.
293 static void suppressLdStPair(MachineInstr &MI);
294
295 std::optional<ExtAddrMode>
297 const TargetRegisterInfo *TRI) const override;
298
300 const MachineInstr &AddrI,
301 ExtAddrMode &AM) const override;
302
304 const ExtAddrMode &AM) const override;
305
308 int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
309 const TargetRegisterInfo *TRI) const override;
310
311 /// If \p OffsetIsScalable is set to 'true', the offset is scaled by `vscale`.
312 /// This is true for some SVE instructions like ldr/str that have a
313 /// 'reg + imm' addressing mode where the immediate is an index to the
314 /// scalable vector located at 'reg + imm * vscale x #bytes'.
316 const MachineOperand *&BaseOp,
317 int64_t &Offset, bool &OffsetIsScalable,
318 TypeSize &Width,
319 const TargetRegisterInfo *TRI) const;
320
321 /// Return the immediate offset of the base register in a load/store \p LdSt.
323
324 /// Returns true if opcode \p Opc is a memory operation. If it is, set
325 /// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly.
326 ///
327 /// For unscaled instructions, \p Scale is set to 1. All values are in bytes.
328 /// MinOffset/MaxOffset are the un-scaled limits of the immediate in the
329 /// instruction, the actual offset limit is [MinOffset*Scale,
330 /// MaxOffset*Scale].
331 static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, TypeSize &Width,
332 int64_t &MinOffset, int64_t &MaxOffset);
333
335 int64_t Offset1, bool OffsetIsScalable1,
337 int64_t Offset2, bool OffsetIsScalable2,
338 unsigned ClusterSize,
339 unsigned NumBytes) const override;
340
342 const DebugLoc &DL, MCRegister DestReg,
343 MCRegister SrcReg, bool KillSrc, unsigned Opcode,
344 llvm::ArrayRef<unsigned> Indices) const;
346 const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
347 bool KillSrc, unsigned Opcode, unsigned ZeroReg,
348 llvm::ArrayRef<unsigned> Indices) const;
350 const DebugLoc &DL, Register DestReg, Register SrcReg,
351 bool KillSrc, bool RenamableDest = false,
352 bool RenamableSrc = false) const override;
353
356 bool isKill, int FrameIndex, const TargetRegisterClass *RC,
357 const TargetRegisterInfo *TRI, Register VReg,
358 MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
359
362 Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
363 const TargetRegisterInfo *TRI, Register VReg,
364 MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
365
366 // This tells target independent code that it is okay to pass instructions
367 // with subreg operands to foldMemoryOperandImpl.
368 bool isSubregFoldable() const override { return true; }
369
374 MachineBasicBlock::iterator InsertPt, int FrameIndex,
375 LiveIntervals *LIS = nullptr,
376 VirtRegMap *VRM = nullptr) const override;
377
378 /// \returns true if a branch from an instruction with opcode \p BranchOpc
379 /// bytes is capable of jumping to a position \p BrOffset bytes away.
380 bool isBranchOffsetInRange(unsigned BranchOpc,
381 int64_t BrOffset) const override;
382
383 MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
384
386 MachineBasicBlock &NewDestBB,
387 MachineBasicBlock &RestoreBB, const DebugLoc &DL,
388 int64_t BrOffset, RegScavenger *RS) const override;
389
391 MachineBasicBlock *&FBB,
393 bool AllowModify = false) const override;
395 MachineBranchPredicate &MBP,
396 bool AllowModify) const override;
398 int *BytesRemoved = nullptr) const override;
401 const DebugLoc &DL,
402 int *BytesAdded = nullptr) const override;
403
404 std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
405 analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override;
406
407 bool
410 Register, Register, Register, int &, int &,
411 int &) const override;
413 const DebugLoc &DL, Register DstReg,
415 Register FalseReg) const override;
416
418 MachineBasicBlock::iterator MI) const override;
419
420 MCInst getNop() const override;
421
423 const MachineBasicBlock *MBB,
424 const MachineFunction &MF) const override;
425
426 /// analyzeCompare - For a comparison instruction, return the source registers
427 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
428 /// Return true if the comparison instruction can be analyzed.
429 bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
430 Register &SrcReg2, int64_t &CmpMask,
431 int64_t &CmpValue) const override;
432 /// optimizeCompareInstr - Convert the instruction supplying the argument to
433 /// the comparison into one that sets the zero bit in the flags register.
434 bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
435 Register SrcReg2, int64_t CmpMask, int64_t CmpValue,
436 const MachineRegisterInfo *MRI) const override;
437 bool optimizeCondBranch(MachineInstr &MI) const override;
438
439 CombinerObjective getCombinerObjective(unsigned Pattern) const override;
440 /// Return true when a code sequence can improve throughput. It
441 /// should be called only for instructions in loops.
442 /// \param Pattern - combiner pattern
443 bool isThroughputPattern(unsigned Pattern) const override;
444 /// Return true when there is potentially a faster code sequence
445 /// for an instruction chain ending in ``Root``. All potential patterns are
446 /// listed in the ``Patterns`` array.
449 bool DoRegPressureReduce) const override;
450 /// Return true when Inst is associative and commutative so that it can be
451 /// reassociated. If Invert is true, then the inverse of Inst operation must
452 /// be checked.
454 bool Invert) const override;
455
456 /// Returns true if \P Opcode is an instruction which performs accumulation
457 /// into a destination register.
458 bool isAccumulationOpcode(unsigned Opcode) const override;
459
460 /// Returns an opcode which defines the accumulator used by \P Opcode.
461 unsigned getAccumulationStartOpcode(unsigned Opcode) const override;
462
463 unsigned
464 getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const override;
465
466 /// When getMachineCombinerPatterns() finds patterns, this function
467 /// generates the instructions that could replace the original code
468 /// sequence
470 MachineInstr &Root, unsigned Pattern,
473 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const override;
474 /// AArch64 supports MachineCombiner.
475 bool useMachineCombiner() const override;
476
477 bool expandPostRAPseudo(MachineInstr &MI) const override;
478
479 std::pair<unsigned, unsigned>
480 decomposeMachineOperandsTargetFlags(unsigned TF) const override;
487
489 bool OutlineFromLinkOnceODRs) const override;
490 std::optional<std::unique_ptr<outliner::OutlinedFunction>>
492 const MachineModuleInfo &MMI,
493 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
494 unsigned MinRepeats) const override;
496 Function &F, std::vector<outliner::Candidate> &Candidates) const override;
499 unsigned Flags) const override;
501 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
502 getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const override;
504 const outliner::OutlinedFunction &OF) const override;
508 outliner::Candidate &C) const override;
510
513 bool AllowSideEffects = true) const override;
514
515 /// Returns the vector element size (B, H, S or D) of an SVE opcode.
516 uint64_t getElementSizeForOpcode(unsigned Opc) const;
517 /// Returns true if the opcode is for an SVE instruction that sets the
518 /// condition codes as if it's results had been fed to a PTEST instruction
519 /// along with the same general predicate.
520 bool isPTestLikeOpcode(unsigned Opc) const;
521 /// Returns true if the opcode is for an SVE WHILE## instruction.
522 bool isWhileOpcode(unsigned Opc) const;
523 /// Returns true if the instruction has a shift by immediate that can be
524 /// executed in one cycle less.
525 static bool isFalkorShiftExtFast(const MachineInstr &MI);
526 /// Return true if the instructions is a SEH instruction used for unwinding
527 /// on Windows.
528 static bool isSEHInstruction(const MachineInstr &MI);
529
530 std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
531 Register Reg) const override;
532
533 bool isFunctionSafeToSplit(const MachineFunction &MF) const override;
534
535 bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const override;
536
537 std::optional<ParamLoadedValue>
538 describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
539
540 unsigned int getTailDuplicateSize(CodeGenOptLevel OptLevel) const override;
541
543 MachineRegisterInfo &MRI) const override;
544
546 int64_t &NumBytes,
547 int64_t &NumPredicateVectors,
548 int64_t &NumDataVectors);
550 int64_t &ByteSized,
551 int64_t &VGSized);
552
553 // Return true if address of the form BaseReg + Scale * ScaledReg + Offset can
554 // be used for a load/store of NumBytes. BaseReg is always present and
555 // implicit.
556 bool isLegalAddressingMode(unsigned NumBytes, int64_t Offset,
557 unsigned Scale) const;
558
559 // Decrement the SP, issuing probes along the way. `TargetReg` is the new top
560 // of the stack. `FrameSetup` is passed as true, if the allocation is a part
561 // of constructing the activation frame of a function.
563 Register TargetReg,
564 bool FrameSetup) const;
565
566#define GET_INSTRINFO_HELPER_DECLS
567#include "AArch64GenInstrInfo.inc"
568
569protected:
570 /// If the specific machine instruction is an instruction that moves/copies
571 /// value from one register to another register return destination and source
572 /// registers as machine operands.
573 std::optional<DestSourcePair>
574 isCopyInstrImpl(const MachineInstr &MI) const override;
575 std::optional<DestSourcePair>
576 isCopyLikeInstrImpl(const MachineInstr &MI) const override;
577
578private:
579 unsigned getInstBundleLength(const MachineInstr &MI) const;
580
581 /// Sets the offsets on outlined instructions in \p MBB which use SP
582 /// so that they will be valid post-outlining.
583 ///
584 /// \param MBB A \p MachineBasicBlock in an outlined function.
585 void fixupPostOutline(MachineBasicBlock &MBB) const;
586
587 void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
588 MachineBasicBlock *TBB,
589 ArrayRef<MachineOperand> Cond) const;
590 bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
591 const MachineRegisterInfo &MRI) const;
592 bool removeCmpToZeroOrOne(MachineInstr &CmpInstr, unsigned SrcReg,
593 int CmpValue, const MachineRegisterInfo &MRI) const;
594
595 /// Returns an unused general-purpose register which can be used for
596 /// constructing an outlined call if one exists. Returns 0 otherwise.
597 Register findRegisterToSaveLRTo(outliner::Candidate &C) const;
598
599 /// Remove a ptest of a predicate-generating operation that already sets, or
600 /// can be made to set, the condition codes in an identical manner
601 bool optimizePTestInstr(MachineInstr *PTest, unsigned MaskReg,
602 unsigned PredReg,
603 const MachineRegisterInfo *MRI) const;
604 std::optional<unsigned>
605 canRemovePTestInstr(MachineInstr *PTest, MachineInstr *Mask,
606 MachineInstr *Pred, const MachineRegisterInfo *MRI) const;
607
608 /// verifyInstruction - Perform target specific instruction verification.
609 bool verifyInstruction(const MachineInstr &MI,
610 StringRef &ErrInfo) const override;
611};
612
613struct UsedNZCV {
614 bool N = false;
615 bool Z = false;
616 bool C = false;
617 bool V = false;
618
619 UsedNZCV() = default;
620
621 UsedNZCV &operator|=(const UsedNZCV &UsedFlags) {
622 this->N |= UsedFlags.N;
623 this->Z |= UsedFlags.Z;
624 this->C |= UsedFlags.C;
625 this->V |= UsedFlags.V;
626 return *this;
627 }
628};
629
630/// \returns Conditions flags used after \p CmpInstr in its MachineBB if NZCV
631/// flags are not alive in successors of the same \p CmpInstr and \p MI parent.
632/// \returns std::nullopt otherwise.
633///
634/// Collect instructions using that flags in \p CCUseInstrs if provided.
635std::optional<UsedNZCV>
636examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
637 const TargetRegisterInfo &TRI,
638 SmallVectorImpl<MachineInstr *> *CCUseInstrs = nullptr);
639
640/// Return true if there is an instruction /after/ \p DefMI and before \p UseMI
641/// which either reads or clobbers NZCV.
642bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
643 const MachineInstr &UseMI,
644 const TargetRegisterInfo *TRI);
645
646MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg,
647 unsigned Reg, const StackOffset &Offset,
648 bool LastAdjustmentWasScalable = true);
649MCCFIInstruction
650createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg,
651 const StackOffset &OffsetFromDefCFA,
652 std::optional<int64_t> IncomingVGOffsetFromDefCFA);
653
654/// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
655/// plus Offset. This is intended to be used from within the prolog/epilog
656/// insertion (PEI) pass, where a virtual scratch register may be allocated
657/// if necessary, to be replaced by the scavenger at the end of PEI.
658void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
659 const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
660 StackOffset Offset, const TargetInstrInfo *TII,
662 bool SetNZCV = false, bool NeedsWinCFI = false,
663 bool *HasWinCFI = nullptr, bool EmitCFAOffset = false,
664 StackOffset InitialOffset = {},
665 unsigned FrameReg = AArch64::SP);
666
667/// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
668/// FP. Return false if the offset could not be handled directly in MI, and
669/// return the left-over portion by reference.
670bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
671 unsigned FrameReg, StackOffset &Offset,
672 const AArch64InstrInfo *TII);
673
674/// Use to report the frame offset status in isAArch64FrameOffsetLegal.
676 AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
677 AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal.
678 AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly.
680
681/// Check if the @p Offset is a valid frame offset for @p MI.
682/// The returned value reports the validity of the frame offset for @p MI.
683/// It uses the values defined by AArch64FrameOffsetStatus for that.
684/// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
685/// use an offset.eq
686/// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
687/// rewritten in @p MI.
688/// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
689/// amount that is off the limit of the legal offset.
690/// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
691/// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
692/// If set, @p EmittableOffset contains the amount that can be set in @p MI
693/// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
694/// is a legal offset.
695int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset,
696 bool *OutUseUnscaledOp = nullptr,
697 unsigned *OutUnscaledOp = nullptr,
698 int64_t *EmittableOffset = nullptr);
699
700static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
701
702static inline bool isCondBranchOpcode(int Opc) {
703 switch (Opc) {
704 case AArch64::Bcc:
705 case AArch64::CBZW:
706 case AArch64::CBZX:
707 case AArch64::CBNZW:
708 case AArch64::CBNZX:
709 case AArch64::TBZW:
710 case AArch64::TBZX:
711 case AArch64::TBNZW:
712 case AArch64::TBNZX:
713 case AArch64::CBWPri:
714 case AArch64::CBXPri:
715 case AArch64::CBWPrr:
716 case AArch64::CBXPrr:
717 return true;
718 default:
719 return false;
720 }
721}
722
723static inline bool isIndirectBranchOpcode(int Opc) {
724 switch (Opc) {
725 case AArch64::BR:
726 case AArch64::BRAA:
727 case AArch64::BRAB:
728 case AArch64::BRAAZ:
729 case AArch64::BRABZ:
730 return true;
731 }
732 return false;
733}
734
735static inline bool isIndirectCallOpcode(unsigned Opc) {
736 switch (Opc) {
737 case AArch64::BLR:
738 case AArch64::BLRAA:
739 case AArch64::BLRAB:
740 case AArch64::BLRAAZ:
741 case AArch64::BLRABZ:
742 return true;
743 default:
744 return false;
745 }
746}
747
748static inline bool isPTrueOpcode(unsigned Opc) {
749 switch (Opc) {
750 case AArch64::PTRUE_B:
751 case AArch64::PTRUE_H:
752 case AArch64::PTRUE_S:
753 case AArch64::PTRUE_D:
754 return true;
755 default:
756 return false;
757 }
758}
759
760/// Return opcode to be used for indirect calls.
761unsigned getBLRCallOpcode(const MachineFunction &MF);
762
763/// Return XPAC opcode to be used for a ptrauth strip using the given key.
764static inline unsigned getXPACOpcodeForKey(AArch64PACKey::ID K) {
765 using namespace AArch64PACKey;
766 switch (K) {
767 case IA: case IB: return AArch64::XPACI;
768 case DA: case DB: return AArch64::XPACD;
769 }
770 llvm_unreachable("Unhandled AArch64PACKey::ID enum");
771}
772
773/// Return AUT opcode to be used for a ptrauth auth using the given key, or its
774/// AUT*Z variant that doesn't take a discriminator operand, using zero instead.
775static inline unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
776 using namespace AArch64PACKey;
777 switch (K) {
778 case IA: return Zero ? AArch64::AUTIZA : AArch64::AUTIA;
779 case IB: return Zero ? AArch64::AUTIZB : AArch64::AUTIB;
780 case DA: return Zero ? AArch64::AUTDZA : AArch64::AUTDA;
781 case DB: return Zero ? AArch64::AUTDZB : AArch64::AUTDB;
782 }
783 llvm_unreachable("Unhandled AArch64PACKey::ID enum");
784}
785
786/// Return PAC opcode to be used for a ptrauth sign using the given key, or its
787/// PAC*Z variant that doesn't take a discriminator operand, using zero instead.
788static inline unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
789 using namespace AArch64PACKey;
790 switch (K) {
791 case IA: return Zero ? AArch64::PACIZA : AArch64::PACIA;
792 case IB: return Zero ? AArch64::PACIZB : AArch64::PACIB;
793 case DA: return Zero ? AArch64::PACDZA : AArch64::PACDA;
794 case DB: return Zero ? AArch64::PACDZB : AArch64::PACDB;
795 }
796 llvm_unreachable("Unhandled AArch64PACKey::ID enum");
797}
798
799// struct TSFlags {
800#define TSFLAG_ELEMENT_SIZE_TYPE(X) (X) // 3-bits
801#define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 4-bits
802#define TSFLAG_FALSE_LANE_TYPE(X) ((X) << 7) // 2-bits
803#define TSFLAG_INSTR_FLAGS(X) ((X) << 9) // 2-bits
804#define TSFLAG_SME_MATRIX_TYPE(X) ((X) << 11) // 3-bits
805// }
806
807namespace AArch64 {
808
816};
817
831};
832
837};
838
839// NOTE: This is a bit field.
842
852};
853
854#undef TSFLAG_ELEMENT_SIZE_TYPE
855#undef TSFLAG_DESTRUCTIVE_INST_TYPE
856#undef TSFLAG_FALSE_LANE_TYPE
857#undef TSFLAG_INSTR_FLAGS
858#undef TSFLAG_SME_MATRIX_TYPE
859
863
865}
866
867} // end namespace llvm
868
869#endif
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
#define TSFLAG_DESTRUCTIVE_INST_TYPE(X)
#define TSFLAG_SME_MATRIX_TYPE(X)
#define TSFLAG_FALSE_LANE_TYPE(X)
#define TSFLAG_INSTR_FLAGS(X)
#define TSFLAG_ELEMENT_SIZE_TYPE(X)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Register Reg
Register const TargetRegisterInfo * TRI
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isHForm(const MachineInstr &MI)
Returns whether the instruction is in H form (16 bit operands)
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
static bool hasBTISemantics(const MachineInstr &MI)
Returns whether the instruction can be compatible with non-zero BTYPE.
static bool isQForm(const MachineInstr &MI)
Returns whether the instruction is in Q form (128 bit operands)
static void decomposeStackOffsetForFrameOffsets(const StackOffset &Offset, int64_t &NumBytes, int64_t &NumPredicateVectors, int64_t &NumDataVectors)
Returns the offset in parts to which this frame offset can be decomposed for the purpose of describin...
static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, TypeSize &Width, int64_t &MinOffset, int64_t &MaxOffset)
Returns true if opcode Opc is a memory operation.
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
static bool isFPRCopy(const MachineInstr &MI)
Does this instruction rename an FPR without modifying bits?
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is an instruction that moves/copies value from one register to an...
static int getMemScale(const MachineInstr &MI)
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool isSubregFoldable() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
uint64_t getElementSizeForOpcode(unsigned Opc) const
Returns the vector element size (B, H, S or D) of an SVE opcode.
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
static bool isGPRCopy(const MachineInstr &MI)
Does this instruction rename a GPR without modifying bits?
static unsigned convertToFlagSettingOpc(unsigned Opc)
Return the opcode that set flags when possible.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
static const MachineOperand & getLdStOffsetOp(const MachineInstr &MI)
Returns the immediate offset operator of a load/store.
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
bool isWhileOpcode(unsigned Opc) const
Returns true if the opcode is for an SVE WHILE## instruction.
static std::optional< unsigned > getUnscaledLdSt(unsigned Opc)
Returns the unscaled load/store for the scaled load/store opcode, if there is a corresponding unscale...
static bool hasUnscaledLdStOffset(unsigned Opc)
Return true if it has an unscaled load/store offset.
static const MachineOperand & getLdStAmountOp(const MachineInstr &MI)
Returns the shift amount operator of a load/store.
static bool hasUnscaledLdStOffset(MachineInstr &MI)
static bool isPreLdSt(const MachineInstr &MI)
Returns whether the instruction is a pre-indexed load/store.
bool isAccumulationOpcode(unsigned Opcode) const override
Returns true if \P Opcode is an instruction which performs accumulation into a destination register.
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
std::optional< ExtAddrMode > getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
bool analyzeBranchPredicate(MachineBasicBlock &MBB, MachineBranchPredicate &MBP, bool AllowModify) const override
static bool isSEHInstruction(const MachineInstr &MI)
Return true if the instructions is a SEH instruction used for unwinding on Windows.
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
SmallVector< std::pair< MachineBasicBlock::iterator, MachineBasicBlock::iterator > > getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const override
static bool isPairableLdStInst(const MachineInstr &MI)
Return true if pairing the given load or store may be paired with another.
const AArch64RegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
static bool isPreSt(const MachineInstr &MI)
Returns whether the instruction is a pre-indexed store.
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
static bool isPairedLdSt(const MachineInstr &MI)
Returns whether the instruction is a paired load/store.
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool useMachineCombiner() const override
AArch64 supports MachineCombiner.
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
bool isExtendLikelyToBeFolded(MachineInstr &ExtMI, MachineRegisterInfo &MRI) const override
static bool isFalkorShiftExtFast(const MachineInstr &MI)
Returns true if the instruction has a shift by immediate that can be executed in one cycle less.
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, TypeSize &Width, const TargetRegisterInfo *TRI) const
If OffsetIsScalable is set to 'true', the offset is scaled by vscale.
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
unsigned getAccumulationStartOpcode(unsigned Opcode) const override
Returns an opcode which defines the accumulator used by \P Opcode.
static bool isStridedAccess(const MachineInstr &MI)
Return true if the given load or store is a strided memory access.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
Detect opportunities for ldp/stp formation.
bool expandPostRAPseudo(MachineInstr &MI) const override
unsigned int getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
bool isThroughputPattern(unsigned Pattern) const override
Return true when a code sequence can improve throughput.
void buildClearRegister(Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator Iter, DebugLoc &DL, bool AllowSideEffects=true) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
MachineOperand & getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const
Return the immediate offset of the base register in a load/store LdSt.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
unsigned getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
static bool isLdStPairSuppressed(const MachineInstr &MI)
Return true if pairing the given load or store is hinted to be unprofitable.
bool isFunctionSafeToSplit(const MachineFunction &MF) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
Return true when Inst is associative and commutative so that it can be reassociated.
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
MachineBasicBlock::iterator probedStackAlloc(MachineBasicBlock::iterator MBBI, Register TargetReg, bool FrameSetup) const
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction supplying the argument to the comparison into one that...
static unsigned getLoadStoreImmIdx(unsigned Opc)
Returns the index for the immediate for a given instruction.
static bool isGPRZero(const MachineInstr &MI)
Does this instruction set its full destination register to zero?
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, unsigned Opcode, unsigned ZeroReg, llvm::ArrayRef< unsigned > Indices) const
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2,...
CombinerObjective getCombinerObjective(unsigned Pattern) const override
static bool isFpOrNEON(Register Reg)
Returns whether the physical register is FP or NEON.
bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool isLegalAddressingMode(unsigned NumBytes, int64_t Offset, unsigned Scale) const
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
std::optional< DestSourcePair > isCopyLikeInstrImpl(const MachineInstr &MI) const override
static void suppressLdStPair(MachineInstr &MI)
Hint that pairing the given load or store is unprofitable.
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
static bool isPreLd(const MachineInstr &MI)
Returns whether the instruction is a pre-indexed load.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, unsigned Opcode, llvm::ArrayRef< unsigned > Indices) const
bool optimizeCondBranch(MachineInstr &MI) const override
Replace csincr-branch sequence by simple conditional branch.
static int getMemScale(unsigned Opc)
Scaling factor for (scaled or unscaled) load or store.
bool isCandidateToMergeOrPair(const MachineInstr &MI) const
Return true if this is a load/store that can be potentially paired/merged.
MCInst getNop() const override
static const MachineOperand & getLdStBaseOp(const MachineInstr &MI)
Returns the base register operator of a load/store.
bool isPTestLikeOpcode(unsigned Opc) const
Returns true if the opcode is for an SVE instruction that sets the condition codes as if it's results...
void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const override
static void decomposeStackOffsetForDwarfOffsets(const StackOffset &Offset, int64_t &ByteSized, int64_t &VGSized)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
A debug info location.
Definition: DebugLoc.h:124
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:188
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
MachineInstrBundleIterator< MachineInstr > iterator
Representation of each machine instruction.
Definition: MachineInstr.h:72
Flags
Flags values. These may be or'd together.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:34
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
int getSVERevInstr(uint16_t Opcode)
int getSMEPseudoMap(uint16_t Opcode)
static const uint64_t InstrFlagIsWhile
static const uint64_t InstrFlagIsPTestLike
int getSVEPseudoMap(uint16_t Opcode)
int getSVENonRevInstr(uint16_t Opcode)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:477
static bool isCondBranchOpcode(int Opc)
MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg, unsigned Reg, const StackOffset &Offset, bool LastAdjustmentWasScalable=true)
static bool isPTrueOpcode(unsigned Opc)
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
static bool isIndirectBranchOpcode(int Opc)
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
AArch64FrameOffsetStatus
Use to report the frame offset status in isAArch64FrameOffsetLegal.
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
@ AArch64FrameOffsetCannotUpdate
Offset cannot apply.
static bool isIndirectCallOpcode(unsigned Opc)
AArch64MachineCombinerPattern
@ MULSUBv8i16_OP2
@ GATHER_LANE_i16
@ FMULv4i16_indexed_OP1
@ FMLSv1i32_indexed_OP2
@ MULSUBv2i32_indexed_OP1
@ MULADDXI_OP1
@ FMLAv2i32_indexed_OP2
@ MULADDv4i16_indexed_OP2
@ FMLAv1i64_indexed_OP1
@ MULSUBv16i8_OP1
@ FMLAv8i16_indexed_OP2
@ FMULv2i32_indexed_OP1
@ MULSUBv8i16_indexed_OP2
@ FMLAv1i64_indexed_OP2
@ MULSUBv4i16_indexed_OP2
@ FMLAv1i32_indexed_OP1
@ FMLAv2i64_indexed_OP2
@ FMLSv8i16_indexed_OP1
@ MULSUBv2i32_OP1
@ FMULv4i16_indexed_OP2
@ MULSUBv4i32_indexed_OP2
@ FMULv2i64_indexed_OP2
@ MULSUBXI_OP1
@ FMLAv4i32_indexed_OP1
@ MULADDWI_OP1
@ MULADDv4i16_OP2
@ FMULv8i16_indexed_OP2
@ MULSUBv4i16_OP1
@ MULADDv4i32_OP2
@ MULADDv8i8_OP1
@ MULADDv2i32_OP2
@ MULADDv16i8_OP2
@ MULADDv8i8_OP2
@ FMLSv4i16_indexed_OP1
@ MULADDv16i8_OP1
@ FMLAv2i64_indexed_OP1
@ FMLAv1i32_indexed_OP2
@ FMLSv2i64_indexed_OP2
@ MULADDv2i32_OP1
@ MULADDv4i32_OP1
@ MULADDv2i32_indexed_OP1
@ MULSUBv16i8_OP2
@ MULADDv4i32_indexed_OP1
@ MULADDv2i32_indexed_OP2
@ FMLAv4i16_indexed_OP2
@ MULSUBv8i16_OP1
@ FMULv2i32_indexed_OP2
@ GATHER_LANE_i8
@ FMLSv2i32_indexed_OP2
@ FMLSv4i32_indexed_OP1
@ FMULv2i64_indexed_OP1
@ MULSUBv4i16_OP2
@ FMLSv4i16_indexed_OP2
@ FMLAv2i32_indexed_OP1
@ GATHER_LANE_i32
@ FMLSv2i32_indexed_OP1
@ FMLAv8i16_indexed_OP1
@ MULSUBv4i16_indexed_OP1
@ FMLSv4i32_indexed_OP2
@ MULADDv4i32_indexed_OP2
@ MULSUBv4i32_OP2
@ MULSUBv8i16_indexed_OP1
@ MULADDv8i16_OP2
@ MULSUBv2i32_indexed_OP2
@ FMULv4i32_indexed_OP2
@ FMLSv2i64_indexed_OP1
@ MULADDv4i16_OP1
@ FMLAv4i32_indexed_OP2
@ MULADDv8i16_indexed_OP1
@ FMULv4i32_indexed_OP1
@ FMLAv4i16_indexed_OP1
@ FMULv8i16_indexed_OP1
@ MULSUBv8i8_OP1
@ MULADDv8i16_OP1
@ MULSUBv4i32_indexed_OP1
@ MULSUBv4i32_OP1
@ FMLSv8i16_indexed_OP2
@ MULADDv8i16_indexed_OP2
@ MULSUBWI_OP1
@ MULSUBv2i32_OP2
@ FMLSv1i64_indexed_OP2
@ MULADDv4i16_indexed_OP1
@ MULSUBv8i8_OP2
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
std::optional< UsedNZCV > examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr, const TargetRegisterInfo &TRI, SmallVectorImpl< MachineInstr * > *CCUseInstrs=nullptr)
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:82
MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg, const StackOffset &OffsetFromDefCFA, std::optional< int64_t > IncomingVGOffsetFromDefCFA)
static bool isUncondBranchOpcode(int Opc)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
static const MachineMemOperand::Flags MOSuppressPair
bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI, const MachineInstr &UseMI, const TargetRegisterInfo *TRI)
Return true if there is an instruction /after/ DefMI and before UseMI which either reads or clobbers ...
static const MachineMemOperand::Flags MOStridedAccess
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
UsedNZCV & operator|=(const UsedNZCV &UsedFlags)
UsedNZCV()=default
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.