LLVM 21.0.0git
AArch64ISelLowering.h
Go to the documentation of this file.
1//==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that AArch64 uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15#define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16
21#include "llvm/IR/CallingConv.h"
22#include "llvm/IR/Instruction.h"
23
24namespace llvm {
25
26namespace AArch64ISD {
27
28// For predicated nodes where the result is a vector, the operation is
29// controlled by a governing predicate and the inactive lanes are explicitly
30// defined with a value, please stick the following naming convention:
31//
32// _MERGE_OP<n> The result value is a vector with inactive lanes equal
33// to source operand OP<n>.
34//
35// _MERGE_ZERO The result value is a vector with inactive lanes
36// actively zeroed.
37//
38// _MERGE_PASSTHRU The result value is a vector with inactive lanes equal
39// to the last source operand which only purpose is being
40// a passthru value.
41//
42// For other cases where no explicit action is needed to set the inactive lanes,
43// or when the result is not a vector and it is needed or helpful to
44// distinguish a node from similar unpredicated nodes, use:
45//
46// _PRED
47//
48enum NodeType : unsigned {
50 WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
51 CALL, // Function call.
52
53 // Pseudo for a OBJC call that gets emitted together with a special `mov
54 // x29, x29` marker instruction.
56
57 CALL_BTI, // Function call followed by a BTI instruction.
58
59 // Function call, authenticating the callee value first:
60 // AUTH_CALL chain, callee, auth key #, int disc, addr disc, operands.
62 // AUTH_TC_RETURN chain, callee, fpdiff, auth key #, int disc, addr disc,
63 // operands.
65
66 // Authenticated variant of CALL_RVMARKER.
68
70
73
79
80 // A call with the callee in x16, i.e. "blr x16".
82
83 // Produces the full sequence of instructions for getting the thread pointer
84 // offset of a variable into X0, using the TLSDesc model.
87 ADRP, // Page address of a TargetGlobalAddress operand.
88 ADR, // ADR
89 ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
90 LOADgot, // Load from automatically generated descriptor (e.g. Global
91 // Offset Table, TLS record).
92 RET_GLUE, // Return with a glue operand. Operand 0 is the chain operand.
93 BRCOND, // Conditional branch instruction; "b.cond".
95 CSINV, // Conditional select invert.
96 CSNEG, // Conditional select negate.
97 CSINC, // Conditional select increment.
98
99 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
100 // ELF.
103 SBC, // adc, sbc instructions
104
105 // To avoid stack clash, allocation is performed by block and each block is
106 // probed.
108
109 // Predicated instructions where inactive lanes produce undefined results.
137
138 // Unpredicated vector instructions
140
142
143 // Predicated instructions with the result of inactive lanes provided by the
144 // last operand.
167
169
170 // Arithmetic instructions which write flags.
176
177 // Conditional compares. Operands: left,right,falsecc,cc,flags
181
182 // Floating point comparison
184
185 // Scalar-to-vector duplication
192
193 // Vector immedate moves
201
202 // Vector immediate ops
205
206 // Vector bitwise select: similar to ISD::VSELECT but not all bits within an
207 // element must be identical.
209
210 // Vector shuffles
222
223 // Vector shift by scalar
227
228 // Vector shift by scalar (again)
235
236 // Vector narrowing shift by immediate (bottom)
238
239 // Vector shift by constant and insert
242
243 // Vector comparisons
252
253 // Vector zero comparisons
264
265 // Round wide FP to narrow FP with inexact results to odd.
267
268 // Vector across-lanes addition
269 // Only the lower result lane is defined.
272
273 // Unsigned sum Long across Vector
276
277 // Wide adds
282
283 // Add Pairwise of two vectors
285 // Add Long Pairwise
288
289 // udot/sdot/usdot instructions
293
294 // Vector across-lanes min/max
295 // Only the lower result lane is defined.
300
310
311 // Compare-and-branch
316
317 // Tail calls
319
320 // Custom prefetch handling
322
323 // {s|u}int to FP within a FP register.
326
327 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
328 /// world w.r.t vectors; which causes additional REV instructions to be
329 /// generated to compensate for the byte-swapping. But sometimes we do
330 /// need to re-interpret the data in SIMD vector registers in big-endian
331 /// mode without emitting such REV instructions.
333
334 MRS, // MRS, also sets the flags via a glue.
335
338
340
341 // Reciprocal estimates and steps.
346
351
357
358 // Floating-point reductions.
365
370
372
381
382 // Cast between vectors of the same element type but differ in length.
384
385 // Nodes to build an LD64B / ST64B 64-bit quantity out of i64, and vice versa
388
397
398 // Structured loads.
402
403 // Unsigned gather loads.
413
414 // Signed gather loads
422
423 // Unsigned gather loads.
431
432 // Signed gather loads.
440
441 // Non-temporal gather loads
445
446 // Contiguous masked store.
448
449 // Scatter store
459
460 // Non-temporal scatter store
463
464 // SME
469
470 // Needed for __arm_agnostic("sme_za_state")
473
474 // Asserts that a function argument (i32) is zero-extended to i8 by
475 // the caller
477
478 // 128-bit system register accesses
479 // lo64, hi64, chain = MRRS(chain, sysregname)
481 // chain = MSRR(chain, sysregname, lo64, hi64)
483
484 // Strict (exception-raising) floating point comparison
489
490 // NEON Load/Store with post-increment base updates
515
520
528
529 // SME ZA loads and stores
532};
533
534} // end namespace AArch64ISD
535
536namespace AArch64 {
537/// Possible values of current rounding mode, which is specified in bits
538/// 23:22 of FPCR.
540 RN = 0, // Round to Nearest
541 RP = 1, // Round towards Plus infinity
542 RM = 2, // Round towards Minus infinity
543 RZ = 3, // Round towards Zero
544 rmMask = 3 // Bit mask selecting rounding mode
546
547// Bit position of rounding mode bits in FPCR.
548const unsigned RoundingBitsPos = 22;
549
550// Reserved bits should be preserved when modifying FPCR.
551const uint64_t ReservedFPControlBits = 0xfffffffff80040f8;
552
553// Registers used to pass function arguments.
556
557/// Maximum allowed number of unprobed bytes above SP at an ABI
558/// boundary.
559const unsigned StackProbeMaxUnprobedStack = 1024;
560
561/// Maximum number of iterations to unroll for a constant size probing loop.
562const unsigned StackProbeMaxLoopUnroll = 4;
563
564} // namespace AArch64
565
566namespace ARM64AS {
567enum : unsigned { PTR32_SPTR = 270, PTR32_UPTR = 271, PTR64 = 272 };
568}
569
570class AArch64Subtarget;
571
573public:
574 explicit AArch64TargetLowering(const TargetMachine &TM,
575 const AArch64Subtarget &STI);
576
577 /// Control the following reassociation of operands: (op (op x, c1), y) -> (op
578 /// (op x, y), c1) where N0 is (op x, c1) and N1 is y.
580 SDValue N1) const override;
581
582 /// Selects the correct CCAssignFn for a given CallingConvention value.
583 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
584
585 /// Selects the correct CCAssignFn for a given CallingConvention value.
587
588 /// Determine which of the bits specified in Mask are known to be either zero
589 /// or one and return them in the KnownZero/KnownOne bitsets.
591 const APInt &DemandedElts,
592 const SelectionDAG &DAG,
593 unsigned Depth = 0) const override;
594
596 const APInt &DemandedElts,
597 const SelectionDAG &DAG,
598 unsigned Depth) const override;
599
600 MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override {
601 if ((AS == ARM64AS::PTR32_SPTR) || (AS == ARM64AS::PTR32_UPTR)) {
602 // These are 32-bit pointers created using the `__ptr32` extension or
603 // similar. They are handled by marking them as being in a different
604 // address space, and will be extended to 64-bits when used as the target
605 // of a load or store operation, or cast to a 64-bit pointer type.
606 return MVT::i32;
607 } else {
608 // Returning i64 unconditionally here (i.e. even for ILP32) means that the
609 // *DAG* representation of pointers will always be 64-bits. They will be
610 // truncated and extended when transferred to memory, but the 64-bit DAG
611 // allows us to use AArch64's addressing modes much more easily.
612 return MVT::i64;
613 }
614 }
615
617 const APInt &DemandedElts,
618 TargetLoweringOpt &TLO) const override;
619
620 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
621
622 /// Returns true if the target allows unaligned memory accesses of the
623 /// specified type.
625 EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
627 unsigned *Fast = nullptr) const override;
628 /// LLT variant.
629 bool allowsMisalignedMemoryAccesses(LLT Ty, unsigned AddrSpace,
630 Align Alignment,
632 unsigned *Fast = nullptr) const override;
633
634 /// Provide custom lowering hooks for some operations.
635 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
636
637 const char *getTargetNodeName(unsigned Opcode) const override;
638
639 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
640
641 /// This method returns a target specific FastISel object, or null if the
642 /// target does not support "fast" ISel.
644 const TargetLibraryInfo *libInfo) const override;
645
646 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
647
648 bool isFPImmLegal(const APFloat &Imm, EVT VT,
649 bool ForCodeSize) const override;
650
651 /// Return true if the given shuffle mask can be codegen'd directly, or if it
652 /// should be stack expanded.
653 bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
654
655 /// Similar to isShuffleMaskLegal. Return true is the given 'select with zero'
656 /// shuffle mask can be codegen'd directly.
657 bool isVectorClearMaskLegal(ArrayRef<int> M, EVT VT) const override;
658
659 /// Return the ISD::SETCC ValueType.
661 EVT VT) const override;
662
664
666 MachineBasicBlock *BB) const;
667
669 MachineBasicBlock *BB) const;
670
672 MachineBasicBlock *MBB) const;
673
674 MachineBasicBlock *EmitTileLoad(unsigned Opc, unsigned BaseReg,
676 MachineBasicBlock *BB) const;
678 MachineBasicBlock *EmitZAInstr(unsigned Opc, unsigned BaseReg,
679 MachineInstr &MI, MachineBasicBlock *BB) const;
681 unsigned Opcode, bool Op0IsDef) const;
684 MachineBasicBlock *BB) const;
686 MachineBasicBlock *BB) const;
688 MachineBasicBlock *BB) const;
690 MachineBasicBlock *BB) const;
691
694 MachineBasicBlock *MBB) const override;
695
696 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
697 MachineFunction &MF,
698 unsigned Intrinsic) const override;
699
701 EVT NewVT) const override;
702
703 bool shouldRemoveRedundantExtend(SDValue Op) const override;
704
705 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
706 bool isTruncateFree(EVT VT1, EVT VT2) const override;
707
708 bool isProfitableToHoist(Instruction *I) const override;
709
710 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
711 bool isZExtFree(EVT VT1, EVT VT2) const override;
712 bool isZExtFree(SDValue Val, EVT VT2) const override;
713
715 Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override;
716
717 bool hasPairedLoad(EVT LoadedType, Align &RequiredAligment) const override;
718
719 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
720
723 ArrayRef<unsigned> Indices,
724 unsigned Factor) const override;
726 unsigned Factor) const override;
727
729 LoadInst *LI, ArrayRef<Value *> DeinterleaveValues) const override;
730
732 StoreInst *SI, ArrayRef<Value *> InterleaveValues) const override;
733
734 bool isLegalAddImmediate(int64_t) const override;
735 bool isLegalAddScalableImmediate(int64_t) const override;
736 bool isLegalICmpImmediate(int64_t) const override;
737
739 SDValue ConstNode) const override;
740
741 bool shouldConsiderGEPOffsetSplit() const override;
742
744 const AttributeList &FuncAttributes) const override;
745
747 const AttributeList &FuncAttributes) const override;
748
749 /// Return true if the addressing mode represented by AM is legal for this
750 /// target, for a load/store of the specified type.
751 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
752 unsigned AS,
753 Instruction *I = nullptr) const override;
754
755 int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset,
756 int64_t MaxOffset) const override;
757
758 /// Return true if an FMA operation is faster than a pair of fmul and fadd
759 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
760 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
762 EVT VT) const override;
763 bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override;
764
766 CodeGenOptLevel OptLevel) const override;
767
768 /// Return true if the target has native support for
769 /// the specified value type and it is 'desirable' to use the type for the
770 /// given node type.
771 bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
772
773 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
775
776 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
778 CombineLevel Level) const override;
779
780 bool isDesirableToPullExtFromShl(const MachineInstr &MI) const override {
781 return false;
782 }
783
784 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
785 bool isDesirableToCommuteXorWithShift(const SDNode *N) const override;
786
787 /// Return true if it is profitable to fold a pair of shifts into a mask.
789 CombineLevel Level) const override;
790
791 bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
792 EVT VT) const override;
793
794 /// Returns true if it is beneficial to convert a load of a constant
795 /// to just the constant itself.
797 Type *Ty) const override;
798
799 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
800 /// with this index.
801 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
802 unsigned Index) const override;
803
804 bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
805 bool MathUsed) const override {
806 // Using overflow ops for overflow checks only should beneficial on
807 // AArch64.
808 return TargetLowering::shouldFormOverflowOp(Opcode, VT, true);
809 }
810
811 Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
812 AtomicOrdering Ord) const override;
814 AtomicOrdering Ord) const override;
815
816 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override;
817
818 bool isOpSuitableForLDPSTP(const Instruction *I) const;
819 bool isOpSuitableForLSE128(const Instruction *I) const;
820 bool isOpSuitableForRCPC3(const Instruction *I) const;
821 bool shouldInsertFencesForAtomic(const Instruction *I) const override;
822 bool
824
826 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
828 shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
830 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
831
834
835 bool useLoadStackGuardNode(const Module &M) const override;
837 getPreferredVectorAction(MVT VT) const override;
838
839 /// If the target has a standard location for the stack protector cookie,
840 /// returns the address of that location. Otherwise, returns nullptr.
841 Value *getIRStackGuard(IRBuilderBase &IRB) const override;
842
843 void insertSSPDeclarations(Module &M) const override;
844 Value *getSDagStackGuard(const Module &M) const override;
845 Function *getSSPStackGuardCheck(const Module &M) const override;
846
847 /// If the target has a standard location for the unsafe stack pointer,
848 /// returns the address of that location. Otherwise, returns nullptr.
849 Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const override;
850
851 /// If a physical register, this returns the register that receives the
852 /// exception address on entry to an EH pad.
854 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
855
856 /// If a physical register, this returns the register that receives the
857 /// exception typeid on entry to a landing pad.
859 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
860
861 bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
862
863 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
864 const MachineFunction &MF) const override;
865
866 bool isCheapToSpeculateCttz(Type *) const override {
867 return true;
868 }
869
870 bool isCheapToSpeculateCtlz(Type *) const override {
871 return true;
872 }
873
874 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
875
876 bool hasAndNotCompare(SDValue V) const override {
877 // We can use bics for any scalar.
878 return V.getValueType().isScalarInteger();
879 }
880
881 bool hasAndNot(SDValue Y) const override {
882 EVT VT = Y.getValueType();
883
884 if (!VT.isVector())
885 return hasAndNotCompare(Y);
886
887 TypeSize TS = VT.getSizeInBits();
888 // TODO: We should be able to use bic/bif too for SVE.
889 return !TS.isScalable() && TS.getFixedValue() >= 64; // vector 'bic'
890 }
891
894 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
895 SelectionDAG &DAG) const override;
896
899 unsigned ExpansionFactor) const override;
900
902 unsigned KeptBits) const override {
903 // For vectors, we don't have a preference..
904 if (XVT.isVector())
905 return false;
906
907 auto VTIsOk = [](EVT VT) -> bool {
908 return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
909 VT == MVT::i64;
910 };
911
912 // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
913 // XVT will be larger than KeptBitsVT.
914 MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
915 return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
916 }
917
918 bool preferIncOfAddToSubOfNot(EVT VT) const override;
919
920 bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
921
922 bool shouldExpandCmpUsingSelects(EVT VT) const override;
923
924 bool isComplexDeinterleavingSupported() const override;
926 ComplexDeinterleavingOperation Operation, Type *Ty) const override;
927
930 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
931 Value *Accumulator = nullptr) const override;
932
933 bool supportSplitCSR(MachineFunction *MF) const override {
935 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
936 }
937 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
939 MachineBasicBlock *Entry,
940 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
941
942 bool supportSwiftError() const override {
943 return true;
944 }
945
946 bool supportPtrAuthBundles() const override { return true; }
947
948 bool supportKCFIBundles() const override { return true; }
949
952 const TargetInstrInfo *TII) const override;
953
954 /// Enable aggressive FMA fusion on targets that want it.
955 bool enableAggressiveFMAFusion(EVT VT) const override;
956
957 /// Returns the size of the platform's va_list object.
958 unsigned getVaListSizeInBits(const DataLayout &DL) const override;
959
960 /// Returns true if \p VecTy is a legal interleaved access type. This
961 /// function checks the vector element type and the overall width of the
962 /// vector.
964 bool &UseScalable) const;
965
966 /// Returns the number of interleaved accesses that will be generated when
967 /// lowering accesses of the given type.
968 unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL,
969 bool UseScalable) const;
970
972 const Instruction &I) const override;
973
975 Type *Ty, CallingConv::ID CallConv, bool isVarArg,
976 const DataLayout &DL) const override;
977
978 /// Used for exception handling on Win64.
979 bool needsFixedCatchObjects() const override;
980
981 bool fallBackToDAGISel(const Instruction &Inst) const override;
982
983 /// SVE code generation for fixed length vectors does not custom lower
984 /// BUILD_VECTOR. This makes BUILD_VECTOR legalisation a source of stores to
985 /// merge. However, merging them creates a BUILD_VECTOR that is just as
986 /// illegal as the original, thus leading to an infinite legalisation loop.
987 /// NOTE: Once BUILD_VECTOR is legal or can be custom lowered for all legal
988 /// vector types this override can be removed.
989 bool mergeStoresAfterLegalization(EVT VT) const override;
990
991 // If the platform/function should have a redzone, return the size in bytes.
992 unsigned getRedZoneSize(const Function &F) const {
993 if (F.hasFnAttribute(Attribute::NoRedZone))
994 return 0;
995 return 128;
996 }
997
998 bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const;
1000
1002 bool AllowUnknown = false) const override;
1003
1004 bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override;
1005
1006 bool
1008
1009 bool shouldExpandCttzElements(EVT VT) const override;
1010
1011 bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const override;
1012
1013 /// If a change in streaming mode is required on entry to/return from a
1014 /// function call it emits and returns the corresponding SMSTART or SMSTOP
1015 /// node. \p Condition should be one of the enum values from
1016 /// AArch64SME::ToggleCondition.
1018 SDValue Chain, SDValue InGlue, unsigned Condition,
1019 SDValue PStateSM = SDValue()) const;
1020
1021 bool isVScaleKnownToBeAPowerOfTwo() const override { return true; }
1022
1023 // Normally SVE is only used for byte size vectors that do not fit within a
1024 // NEON vector. This changes when OverrideNEON is true, allowing SVE to be
1025 // used for 64bit and 128bit vectors as well.
1026 bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON = false) const;
1027
1028 // Follow NEON ABI rules even when using SVE for fixed length vectors.
1030 EVT VT) const override;
1033 EVT VT) const override;
1036 EVT &IntermediateVT,
1037 unsigned &NumIntermediates,
1038 MVT &RegisterVT) const override;
1039
1040 /// True if stack clash protection is enabled for this functions.
1041 bool hasInlineStackProbe(const MachineFunction &MF) const override;
1042
1043#ifndef NDEBUG
1044 void verifyTargetSDNode(const SDNode *N) const override;
1045#endif
1046
1047private:
1048 /// Keep a pointer to the AArch64Subtarget around so that we can
1049 /// make the right decision when generating code for different targets.
1050 const AArch64Subtarget *Subtarget;
1051
1052 llvm::BumpPtrAllocator BumpAlloc;
1053 llvm::StringSaver Saver{BumpAlloc};
1054
1055 bool isExtFreeImpl(const Instruction *Ext) const override;
1056
1057 void addTypeForNEON(MVT VT);
1058 void addTypeForFixedLengthSVE(MVT VT);
1059 void addDRType(MVT VT);
1060 void addQRType(MVT VT);
1061
1062 bool shouldExpandBuildVectorWithShuffles(EVT, unsigned) const override;
1063
1064 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
1065 bool isVarArg,
1066 const SmallVectorImpl<ISD::InputArg> &Ins,
1067 const SDLoc &DL, SelectionDAG &DAG,
1068 SmallVectorImpl<SDValue> &InVals) const override;
1069
1070 void AdjustInstrPostInstrSelection(MachineInstr &MI,
1071 SDNode *Node) const override;
1072
1073 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
1074 SmallVectorImpl<SDValue> &InVals) const override;
1075
1076 SDValue LowerCallResult(SDValue Chain, SDValue InGlue,
1077 CallingConv::ID CallConv, bool isVarArg,
1078 const SmallVectorImpl<CCValAssign> &RVLocs,
1079 const SDLoc &DL, SelectionDAG &DAG,
1080 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
1081 SDValue ThisVal, bool RequiresSMChange) const;
1082
1083 SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
1084 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
1085 SDValue LowerStore128(SDValue Op, SelectionDAG &DAG) const;
1086 SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const;
1087
1088 SDValue LowerMGATHER(SDValue Op, SelectionDAG &DAG) const;
1089 SDValue LowerMSCATTER(SDValue Op, SelectionDAG &DAG) const;
1090
1091 SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) const;
1092
1093 SDValue LowerVECTOR_COMPRESS(SDValue Op, SelectionDAG &DAG) const;
1094
1095 SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
1096 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
1097 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
1098
1099 bool
1100 isEligibleForTailCallOptimization(const CallLoweringInfo &CLI) const;
1101
1102 /// Finds the incoming stack arguments which overlap the given fixed stack
1103 /// object and incorporates their load into the current chain. This prevents
1104 /// an upcoming store from clobbering the stack argument before it's used.
1105 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
1106 MachineFrameInfo &MFI, int ClobberedFI) const;
1107
1108 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
1109
1110 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
1111 SDValue &Chain) const;
1112
1113 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1114 bool isVarArg,
1115 const SmallVectorImpl<ISD::OutputArg> &Outs,
1116 LLVMContext &Context, const Type *RetTy) const override;
1117
1118 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1119 const SmallVectorImpl<ISD::OutputArg> &Outs,
1120 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1121 SelectionDAG &DAG) const override;
1122
1123 SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
1124 unsigned Flag) const;
1125 SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
1126 unsigned Flag) const;
1127 SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
1128 unsigned Flag) const;
1129 SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
1130 unsigned Flag) const;
1131 SDValue getTargetNode(ExternalSymbolSDNode *N, EVT Ty, SelectionDAG &DAG,
1132 unsigned Flag) const;
1133 template <class NodeTy>
1134 SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
1135 template <class NodeTy>
1136 SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
1137 template <class NodeTy>
1138 SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
1139 template <class NodeTy>
1140 SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
1141 SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
1142 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
1143 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1144 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1145 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1146 SDValue LowerELFTLSLocalExec(const GlobalValue *GV, SDValue ThreadBase,
1147 const SDLoc &DL, SelectionDAG &DAG) const;
1148 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
1149 SelectionDAG &DAG) const;
1150 SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1151 SDValue LowerPtrAuthGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
1152 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
1153 SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const;
1154 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
1155 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
1156 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
1157 SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
1158 SDValue TVal, SDValue FVal, const SDLoc &dl,
1159 SelectionDAG &DAG) const;
1160 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
1161 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
1162 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
1163 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
1164 SDValue LowerBRIND(SDValue Op, SelectionDAG &DAG) const;
1165 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
1166 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
1167 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
1168 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
1169 SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
1170 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
1171 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
1172 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
1173 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
1174 SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
1175 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
1176 SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
1177 SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
1178 SDValue LowerGET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
1179 SDValue LowerSET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
1180 SDValue LowerRESET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
1181 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
1182 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
1183 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1184 SDValue LowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const;
1185 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
1186 SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1187 SDValue LowerDUPQLane(SDValue Op, SelectionDAG &DAG) const;
1188 SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG,
1189 unsigned NewOp) const;
1190 SDValue LowerToScalableOp(SDValue Op, SelectionDAG &DAG) const;
1191 SDValue LowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const;
1192 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
1193 SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
1194 SDValue LowerVECTOR_DEINTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
1195 SDValue LowerVECTOR_INTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
1196 SDValue LowerVECTOR_HISTOGRAM(SDValue Op, SelectionDAG &DAG) const;
1197 SDValue LowerDIV(SDValue Op, SelectionDAG &DAG) const;
1198 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
1199 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
1200 SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) const;
1201 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
1202 SDValue LowerCTPOP_PARITY(SDValue Op, SelectionDAG &DAG) const;
1203 SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const;
1204 SDValue LowerBitreverse(SDValue Op, SelectionDAG &DAG) const;
1205 SDValue LowerMinMax(SDValue Op, SelectionDAG &DAG) const;
1206 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
1207 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
1208 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
1209 SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
1210 SDValue LowerVectorFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
1211 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
1212 SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
1213 SDValue LowerVectorXRINT(SDValue Op, SelectionDAG &DAG) const;
1214 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1215 SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1216 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
1217 SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const;
1218 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
1219 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
1220 SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
1221 SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
1222 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
1223 SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
1224 SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
1225 SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
1226 SDValue LowerInlineDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
1227 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
1228
1229 SDValue LowerAVG(SDValue Op, SelectionDAG &DAG, unsigned NewOp) const;
1230
1231 SDValue LowerFixedLengthVectorIntDivideToSVE(SDValue Op,
1232 SelectionDAG &DAG) const;
1233 SDValue LowerFixedLengthVectorIntExtendToSVE(SDValue Op,
1234 SelectionDAG &DAG) const;
1235 SDValue LowerFixedLengthVectorLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
1236 SDValue LowerFixedLengthVectorMLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
1237 SDValue LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp, SelectionDAG &DAG) const;
1238 SDValue LowerPredReductionToSVE(SDValue ScalarOp, SelectionDAG &DAG) const;
1239 SDValue LowerReductionToSVE(unsigned Opcode, SDValue ScalarOp,
1240 SelectionDAG &DAG) const;
1241 SDValue LowerFixedLengthVectorSelectToSVE(SDValue Op, SelectionDAG &DAG) const;
1242 SDValue LowerFixedLengthVectorSetccToSVE(SDValue Op, SelectionDAG &DAG) const;
1243 SDValue LowerFixedLengthVectorStoreToSVE(SDValue Op, SelectionDAG &DAG) const;
1244 SDValue LowerFixedLengthVectorMStoreToSVE(SDValue Op,
1245 SelectionDAG &DAG) const;
1246 SDValue LowerFixedLengthVectorTruncateToSVE(SDValue Op,
1247 SelectionDAG &DAG) const;
1248 SDValue LowerFixedLengthExtractVectorElt(SDValue Op, SelectionDAG &DAG) const;
1249 SDValue LowerFixedLengthInsertVectorElt(SDValue Op, SelectionDAG &DAG) const;
1250 SDValue LowerFixedLengthBitcastToSVE(SDValue Op, SelectionDAG &DAG) const;
1251 SDValue LowerFixedLengthConcatVectorsToSVE(SDValue Op,
1252 SelectionDAG &DAG) const;
1253 SDValue LowerFixedLengthFPExtendToSVE(SDValue Op, SelectionDAG &DAG) const;
1254 SDValue LowerFixedLengthFPRoundToSVE(SDValue Op, SelectionDAG &DAG) const;
1255 SDValue LowerFixedLengthIntToFPToSVE(SDValue Op, SelectionDAG &DAG) const;
1256 SDValue LowerFixedLengthFPToIntToSVE(SDValue Op, SelectionDAG &DAG) const;
1257 SDValue LowerFixedLengthVECTOR_SHUFFLEToSVE(SDValue Op,
1258 SelectionDAG &DAG) const;
1259 SDValue LowerFixedLengthBuildVectorToSVE(SDValue Op, SelectionDAG &DAG) const;
1260
1261 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
1262 SmallVectorImpl<SDNode *> &Created) const override;
1263 SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
1264 SmallVectorImpl<SDNode *> &Created) const override;
1265 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1266 int &ExtraSteps, bool &UseOneConst,
1267 bool Reciprocal) const override;
1268 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1269 int &ExtraSteps) const override;
1270 SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
1271 const DenormalMode &Mode) const override;
1272 SDValue getSqrtResultForDenormInput(SDValue Operand,
1273 SelectionDAG &DAG) const override;
1274 unsigned combineRepeatedFPDivisors() const override;
1275
1276 ConstraintType getConstraintType(StringRef Constraint) const override;
1277 Register getRegisterByName(const char* RegName, LLT VT,
1278 const MachineFunction &MF) const override;
1279
1280 /// Examine constraint string and operand type and determine a weight value.
1281 /// The operand object must already have been set up with the operand type.
1283 getSingleConstraintMatchWeight(AsmOperandInfo &info,
1284 const char *constraint) const override;
1285
1286 std::pair<unsigned, const TargetRegisterClass *>
1287 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1288 StringRef Constraint, MVT VT) const override;
1289
1290 const char *LowerXConstraint(EVT ConstraintVT) const override;
1291
1292 void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
1293 std::vector<SDValue> &Ops,
1294 SelectionDAG &DAG) const override;
1295
1297 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
1298 if (ConstraintCode == "Q")
1300 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
1301 // followed by llvm_unreachable so we'll leave them unimplemented in
1302 // the backend for now.
1303 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
1304 }
1305
1306 /// Handle Lowering flag assembly outputs.
1307 SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag,
1308 const SDLoc &DL,
1309 const AsmOperandInfo &Constraint,
1310 SelectionDAG &DAG) const override;
1311
1312 bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const override;
1313 bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const override;
1314 bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
1315 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
1316 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
1317 bool getIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
1318 SDValue &Offset, SelectionDAG &DAG) const;
1319 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
1321 SelectionDAG &DAG) const override;
1322 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
1323 SDValue &Offset, ISD::MemIndexedMode &AM,
1324 SelectionDAG &DAG) const override;
1325 bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset,
1326 bool IsPre, MachineRegisterInfo &MRI) const override;
1327
1328 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
1329 SelectionDAG &DAG) const override;
1330 void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
1331 SelectionDAG &DAG) const;
1332 void ReplaceExtractSubVectorResults(SDNode *N,
1333 SmallVectorImpl<SDValue> &Results,
1334 SelectionDAG &DAG) const;
1335
1336 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
1337
1338 void finalizeLowering(MachineFunction &MF) const override;
1339
1340 bool shouldLocalize(const MachineInstr &MI,
1341 const TargetTransformInfo *TTI) const override;
1342
1343 bool SimplifyDemandedBitsForTargetNode(SDValue Op,
1344 const APInt &OriginalDemandedBits,
1345 const APInt &OriginalDemandedElts,
1346 KnownBits &Known,
1347 TargetLoweringOpt &TLO,
1348 unsigned Depth) const override;
1349
1350 bool isTargetCanonicalConstantNode(SDValue Op) const override;
1351
1352 // With the exception of data-predicate transitions, no instructions are
1353 // required to cast between legal scalable vector types. However:
1354 // 1. Packed and unpacked types have different bit lengths, meaning BITCAST
1355 // is not universally useable.
1356 // 2. Most unpacked integer types are not legal and thus integer extends
1357 // cannot be used to convert between unpacked and packed types.
1358 // These can make "bitcasting" a multiphase process. REINTERPRET_CAST is used
1359 // to transition between unpacked and packed types of the same element type,
1360 // with BITCAST used otherwise.
1361 // This function does not handle predicate bitcasts.
1362 SDValue getSVESafeBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) const;
1363
1364 // Returns the runtime value for PSTATE.SM by generating a call to
1365 // __arm_sme_state.
1366 SDValue getRuntimePStateSM(SelectionDAG &DAG, SDValue Chain, SDLoc DL,
1367 EVT VT) const;
1368
1369 bool preferScalarizeSplat(SDNode *N) const override;
1370
1371 unsigned getMinimumJumpTableEntries() const override;
1372
1373 bool softPromoteHalfType() const override { return true; }
1374
1375 bool shouldScalarizeBinop(SDValue VecOp) const override {
1376 return VecOp.getOpcode() == ISD::SETCC;
1377 }
1378};
1379
1380namespace AArch64 {
1381FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
1382 const TargetLibraryInfo *libInfo);
1383} // end namespace AArch64
1384
1385} // end namespace llvm
1386
1387#endif
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
return RetTy
uint64_t Addr
uint32_t Index
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define RegName(no)
lazy value info
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
PowerPC Reduce CR logical Operation
static cl::opt< RegAllocEvictionAdvisorAnalysis::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Development, "development", "for training")))
This file describes how to lower LLVM code to machine code.
Value * RHS
Value * LHS
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT) const override
Return true if pulling a binary operation into a select with an identity constant is profitable.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
bool shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const override
Return true if the @llvm.experimental.vector.partial.reduce.
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void initializeSplitCSR(MachineBasicBlock *Entry) const override
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool hasAndNotCompare(SDValue V) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
Return true if the given shuffle mask can be codegen'd directly, or if it should be stack expanded.
unsigned getVaListSizeInBits(const DataLayout &DL) const override
Returns the size of the platform's va_list object.
MachineBasicBlock * EmitZAInstr(unsigned Opc, unsigned BaseReg, MachineInstr &MI, MachineBasicBlock *BB) const
void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const override
Insert explicit copies in entry and exit blocks.
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const override
Return the prefered common base offset.
bool shouldInsertTrailingFenceForAtomicStore(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert a trailing fence without reducing the ordering f...
bool shouldExpandCttzElements(EVT VT) const override
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
MachineBasicBlock * EmitInitTPIDR2Object(MachineInstr &MI, MachineBasicBlock *BB) const
MachineBasicBlock * EmitTileLoad(unsigned Opc, unsigned BaseReg, MachineInstr &MI, MachineBasicBlock *BB) const
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL, bool UseScalable) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
bool shouldExpandCmpUsingSelects(EVT VT) const override
Should we expand [US]CMP nodes using two selects and two compares, or by doing arithmetic on boolean ...
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const MachineFunction &MF) const override
Returns if it's reasonable to merge stores to MemVT size.
bool isIntDivCheap(EVT VT, AttributeList Attr) const override
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool shouldRemoveRedundantExtend(SDValue Op) const override
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
bool supportPtrAuthBundles() const override
Return true if the target supports ptrauth operand bundles.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ISD::SETCC ValueType.
bool optimizeExtendOrTruncateConversion(Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override
Try to optimize extending or truncating conversion instructions (like zext, trunc,...
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const override
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
bool isLegalICmpImmediate(int64_t) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
ShiftLegalizationStrategy preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const override
bool isOpSuitableForLSE128(const Instruction *I) const
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a ldN intrinsic.
bool isVScaleKnownToBeAPowerOfTwo() const override
Return true only if vscale must be a power of two.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
bool fallBackToDAGISel(const Instruction &Inst) const override
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override
Return true if the target has native support for the specified value type and it is 'desirable' to us...
bool isLegalAddScalableImmediate(int64_t) const override
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
Function * getSSPStackGuardCheck(const Module &M) const override
If the target has a standard stack protection check function that performs validation and error handl...
bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const override
Try to convert math with an overflow comparison into the corresponding DAG node operation.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Value * createComplexDeinterleavingIR(IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, Value *Accumulator=nullptr) const override
Create the IR node for the given complex deinterleaving operation.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Returns true if the target allows unaligned memory accesses of the specified type.
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
bool isLegalInterleavedAccessType(VectorType *VecTy, const DataLayout &DL, bool &UseScalable) const
Returns true if VecTy is a legal interleaved access type.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
For some targets, an LLVM struct type must be broken down into multiple simple types,...
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
MachineBasicBlock * EmitLoweredCatchRet(MachineInstr &MI, MachineBasicBlock *BB) const
bool isComplexDeinterleavingSupported() const override
Does this target support complex deinterleaving.
bool isZExtFree(Type *Ty1, Type *Ty2) const override
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const override
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const override
If the target has a standard location for the unsafe stack pointer, returns the address of that locat...
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
bool isProfitableToHoist(Instruction *I) const override
Check if it is profitable to hoist instruction in then/else to if.
bool isOpSuitableForRCPC3(const Instruction *I) const
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const override
Return true if it is profitable to reduce a load to a smaller type.
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isCheapToSpeculateCttz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a stN intrinsic.
unsigned getRedZoneSize(const Function &F) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
MachineBasicBlock * EmitZTInstr(MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode, bool Op0IsDef) const
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
MachineBasicBlock * EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const
bool isCheapToSpeculateCtlz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const override
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, SDValue N1) const override
Control the following reassociation of operands: (op (op x, c1), y) -> (op (op x, y),...
void verifyTargetSDNode(const SDNode *N) const override
Check the given SDNode. Aborts if it is invalid.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
MachineBasicBlock * EmitF128CSEL(MachineInstr &MI, MachineBasicBlock *BB) const
LLT getOptimalMemOpLLT(const MemOp &Op, const AttributeList &FuncAttributes) const override
LLT returning variant.
bool isDesirableToPullExtFromShl(const MachineInstr &MI) const override
GlobalISel - return true if it's profitable to perform the combine: shl ([sza]ext x),...
bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const override
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
MachineBasicBlock * EmitAllocateSMESaveBuffer(MachineInstr &MI, MachineBasicBlock *BB) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool needsFixedCatchObjects() const override
Used for exception handling on Win64.
MachineBasicBlock * EmitAllocateZABuffer(MachineInstr &MI, MachineBasicBlock *BB) const
bool lowerInterleaveIntrinsicToStore(StoreInst *SI, ArrayRef< Value * > InterleaveValues) const override
Lower an interleave intrinsic to a target specific store intrinsic.
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
bool lowerDeinterleaveIntrinsicToLoad(LoadInst *LI, ArrayRef< Value * > DeinterleaveValues) const override
Lower a deinterleave intrinsic to a target specific load intrinsic.
Value * getIRStackGuard(IRBuilderBase &IRB) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const override
bool generateFMAsInMachineCombiner(EVT VT, CodeGenOptLevel OptLevel) const override
bool isComplexDeinterleavingOperationSupported(ComplexDeinterleavingOperation Operation, Type *Ty) const override
Does this target support complex deinterleaving with the given operation and type.
bool hasPairedLoad(EVT LoadedType, Align &RequiredAligment) const override
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
bool isOpSuitableForLDPSTP(const Instruction *I) const
bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to fold a pair of shifts into a mask.
MachineBasicBlock * EmitGetSMESaveSize(MachineInstr &MI, MachineBasicBlock *BB) const
bool isLegalAddImmediate(int64_t) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool shouldConsiderGEPOffsetSplit() const override
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool isVectorClearMaskLegal(ArrayRef< int > M, EVT VT) const override
Similar to isShuffleMaskLegal.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool useLoadStackGuardNode(const Module &M) const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
ArrayRef< MCPhysReg > getRoundingControlRegisters() const override
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.
MachineInstr * EmitKCFICheck(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const override
bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
This method can be implemented by targets that want to expose additional information about sign bits ...
bool isDesirableToCommuteXorWithShift(const SDNode *N) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
bool enableAggressiveFMAFusion(EVT VT) const override
Enable aggressive FMA fusion on targets that want it.
Value * getSDagStackGuard(const Module &M) const override
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
MachineBasicBlock * EmitDynamicProbedAlloc(MachineInstr &MI, MachineBasicBlock *MBB) const
SDValue changeStreamingMode(SelectionDAG &DAG, SDLoc DL, bool Enable, SDValue Chain, SDValue InGlue, unsigned Condition, SDValue PStateSM=SDValue()) const
If a change in streaming mode is required on entry to/return from a function call it emits and return...
bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
bool supportKCFIBundles() const override
Return true if the target supports kcfi operand bundles.
bool isMulAddWithConstProfitable(SDValue AddNode, SDValue ConstNode) const override
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x,...
bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON=false) const
bool mergeStoresAfterLegalization(EVT VT) const override
SVE code generation for fixed length vectors does not custom lower BUILD_VECTOR.
Class for arbitrary precision integers.
Definition: APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:277
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:731
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:113
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:176
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
Machine Value Type.
static MVT getIntegerVT(unsigned BitWidth)
Instructions::iterator instr_iterator
Function & getFunction()
Return the LLVM function that this machine code represents.
Representation of each machine instruction.
Definition: MachineInstr.h:71
Flags
Flags values. These may be or'd together.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:228
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
An instruction for storing to memory.
Definition: Instructions.h:292
Saves strings in the provided stable storage and returns a StringRef with a stable character pointer.
Definition: StringSaver.h:21
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const
Try to convert math with an overflow comparison into the corresponding DAG node operation.
ShiftLegalizationStrategy
Return the preferred strategy to legalize tihs SHIFT instruction, with ExpansionFactor being the recu...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:80
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
Base class of all SIMD vector types.
Definition: DerivedTypes.h:427
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
@ NVCAST
Natural vector cast.
ArrayRef< MCPhysReg > getFPRArgRegs()
Rounding
Possible values of current rounding mode, which is specified in bits 23:22 of FPCR.
const unsigned StackProbeMaxLoopUnroll
Maximum number of iterations to unroll for a constant size probing loop.
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
const unsigned RoundingBitsPos
const uint64_t ReservedFPControlBits
ArrayRef< MCPhysReg > getGPRArgRegs()
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:780
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:1494
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:1559
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1610
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:1590
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
AtomicOrdering
Atomic ordering for LLVM's memory model.
TargetTransformInfo TTI
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
CombineLevel
Definition: DAGCombine.h:15
DWARFExpression::Operation Op
@ Enable
Enable colors.
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168