22#ifndef LLVM_CODEGEN_TARGETLOWERING_H
23#define LLVM_CODEGEN_TARGETLOWERING_H
75class FunctionLoweringInfo;
78class GISelValueTracking;
83class MachineBasicBlock;
86class MachineJumpTableInfo;
88class MachineRegisterInfo;
92class ProfileSummaryInfo;
93class TargetLibraryInfo;
95class TargetRegisterClass;
96class TargetRegisterInfo;
97class TargetTransformInfo;
122 bool DstAlignCanChange;
137 Align SrcAlign,
bool IsVolatile,
138 bool MemcpyStrSrc =
false) {
141 Op.DstAlignCanChange = DstAlignCanChange;
142 Op.DstAlign = DstAlign;
143 Op.AllowOverlap = !IsVolatile;
145 Op.ZeroMemset =
false;
146 Op.MemcpyStrSrc = MemcpyStrSrc;
147 Op.SrcAlign = SrcAlign;
152 bool IsZeroMemset,
bool IsVolatile) {
155 Op.DstAlignCanChange = DstAlignCanChange;
156 Op.DstAlign = DstAlign;
157 Op.AllowOverlap = !IsVolatile;
159 Op.ZeroMemset = IsZeroMemset;
160 Op.MemcpyStrSrc =
false;
166 assert(!DstAlignCanChange);
174 return isMemcpy() && !DstAlignCanChange;
240 ZeroOrNegativeOneBooleanContent
345 case UndefinedBooleanContent:
347 return ISD::ANY_EXTEND;
348 case ZeroOrOneBooleanContent:
350 return ISD::ZERO_EXTEND;
351 case ZeroOrNegativeOneBooleanContent:
353 return ISD::SIGN_EXTEND;
365 return IsStrictFPEnabled;
381 return MVT::getIntegerVT(
DL.getPointerSizeInBits(AS));
388 return MVT::getIntegerVT(
DL.getPointerSizeInBits(AS));
394 return getPointerTy(
DL,
DL.getAllocaAddrSpace());
400 return getPointerTy(
DL,
DL.getProgramAddressSpace());
406 return getPointerTy(
DL);
431 return DL.getPointerSizeInBits(0);
438 return MVT::getIntegerVT(getVectorIdxWidth(
DL));
445 return LLT::scalar(getVectorIdxWidth(
DL));
458 return MachineMemOperand::MONone;
465 return MachineMemOperand::MONone;
477 getVPIntrinsicMemOperandFlags(
const VPIntrinsic &VPIntrin)
const;
497 bool IsScalable)
const {
547 return TypeScalarizeVector;
550 return TypeWidenVector;
552 return TypePromoteInteger;
578 unsigned DefinedValues)
const {
579 return DefinedValues < 3;
636 return BypassSlowDivWidths;
677 const Value *)
const {
685 return PredictableSelectIsExpensive;
698 virtual bool isLoadBitCastBeneficial(
EVT LoadVT,
EVT BitcastVT,
708 return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO);
716 unsigned AddrSpace)
const {
750 return isOperationLegal(ISD::CTPOP, VT);
808 MVT VT = MVT::getIntegerVT(NumBits);
809 return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE;
834 return hasAndNotCompare(
X);
870 unsigned KeptBits)
const {
884 unsigned OldShiftOpcode,
unsigned NewShiftOpcode,
886 if (hasBitTest(
X,
Y)) {
892 if (OldShiftOpcode == ISD::SHL && CC->
isOne())
896 if (XC && NewShiftOpcode == ISD::SHL && XC->isOne())
922 return N->getOpcode() == ISD::FDIV;
937 EVT VT,
unsigned ShiftOpc,
bool MayTransformRotate,
938 const APInt &ShiftOrRotateAmt,
939 const std::optional<APInt> &AndMask)
const {
979 unsigned &
Cost)
const {
988 Type *VectorTy,
unsigned ElemSizeInBits,
unsigned &
Index)
const {
1033 return BooleanVectorContents;
1034 return isFloat ? BooleanFloatContents : BooleanContents;
1038 return getBooleanContents(
Type.isVector(),
Type.isFloatingPoint());
1051 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(ValVT));
1057 return SchedPreferenceInfo;
1072 assert(RC &&
"This value type is not natively supported!");
1080 const Value *)
const {
1099 return RepRegClassCostForVT[VT.
SimpleTy];
1109 virtual ShiftLegalizationStrategy
1111 unsigned ExpansionFactor)
const {
1112 if (ExpansionFactor == 1)
1113 return ShiftLegalizationStrategy::ExpandToParts;
1114 return ShiftLegalizationStrategy::ExpandThroughStack;
1135 return ValueTypeActions[VT.
SimpleTy];
1139 ValueTypeActions[VT.
SimpleTy] = Action;
1144 return ValueTypeActions;
1161 LegalizeKind getTypeConversion(
LLVMContext &Context,
EVT VT)
const;
1168 return getTypeConversion(
Context, VT).first;
1171 return ValueTypeActions.getTypeAction(VT);
1181 return getTypeConversion(
Context, VT).second;
1191 switch (getTypeAction(
Context, VT)) {
1194 case TypeExpandInteger:
1195 VT = getTypeToTransformTo(
Context, VT);
1212 EVT &IntermediateVT,
1213 unsigned &NumIntermediates,
1214 MVT &RegisterVT)
const;
1221 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
1222 return getVectorTypeBreakdown(
Context, VT, IntermediateVT, NumIntermediates,
1263 bool ForCodeSize =
false)
const {
1279 virtual bool canOpTrap(
unsigned Op,
EVT VT)
const;
1300 if (
Op >= std::size(OpActions[0]))
1311 unsigned Scale)
const {
1320 unsigned Scale)
const {
1321 auto Action = getOperationAction(
Op, VT);
1322 if (Action !=
Legal)
1332 case ISD::SMULFIXSAT:
1334 case ISD::UMULFIXSAT:
1336 case ISD::SDIVFIXSAT:
1338 case ISD::UDIVFIXSAT:
1339 Supported = isSupportedFixedPointOperation(
Op, VT, Scale);
1343 return Supported ? Action : Expand;
1352#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1353 case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
1354#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1355 case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
1356#include "llvm/IR/ConstrainedOps.def"
1359 return getOperationAction(EqOpc, VT);
1367 bool LegalOnly =
false)
const {
1369 return isOperationLegal(
Op, VT);
1371 return (VT == MVT::Other || isTypeLegal(VT)) &&
1372 (getOperationAction(
Op, VT) ==
Legal ||
1373 getOperationAction(
Op, VT) == Custom);
1381 bool LegalOnly =
false)
const {
1383 return isOperationLegal(
Op, VT);
1385 return (VT == MVT::Other || isTypeLegal(VT)) &&
1386 (getOperationAction(
Op, VT) ==
Legal ||
1387 getOperationAction(
Op, VT) == Promote);
1395 bool LegalOnly =
false)
const {
1397 return isOperationLegal(
Op, VT);
1399 return (VT == MVT::Other || isTypeLegal(VT)) &&
1400 (getOperationAction(
Op, VT) ==
Legal ||
1401 getOperationAction(
Op, VT) == Custom ||
1402 getOperationAction(
Op, VT) == Promote);
1408 return getOperationAction(
Op, VT) == Custom;
1416 return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1417 isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
1436 virtual MVT getPreferredSwitchConditionType(
LLVMContext &Context,
1437 EVT ConditionVT)
const;
1462 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
1463 (NumDests == 3 && NumCmps >= 6);
1470 return (!isTypeLegal(VT) || getOperationAction(
Op, VT) == Expand);
1475 return (VT == MVT::Other || isTypeLegal(VT)) &&
1476 getOperationAction(
Op, VT) ==
Legal;
1487 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE &&
1488 MemI < MVT::VALUETYPE_SIZE &&
"Table isn't big enough!");
1489 unsigned Shift = 4 * ExtType;
1490 return (
LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
1495 return getLoadExtAction(ExtType, ValVT, MemVT) ==
Legal;
1501 return getLoadExtAction(ExtType, ValVT, MemVT) ==
Legal ||
1502 getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
1511 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE &&
1512 MemI < MVT::VALUETYPE_SIZE &&
"Table isn't big enough!");
1513 unsigned Shift = 4 * ExtType;
1515 (
LegalizeAction)((AtomicLoadExtActions[ValI][MemI] >> Shift) & 0xf);
1517 "Unsupported atomic load extension action.");
1524 return getAtomicLoadExtAction(ExtType, ValVT, MemVT) ==
Legal;
1534 assert(ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE &&
1535 "Table isn't big enough!");
1536 return TruncStoreActions[ValI][MemI];
1542 return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) ==
Legal;
1548 return isTypeLegal(ValVT) &&
1549 (getTruncStoreAction(ValVT, MemVT) ==
Legal ||
1550 getTruncStoreAction(ValVT, MemVT) == Custom);
1554 bool LegalOnly)
const {
1556 return isTruncStoreLegal(ValVT, MemVT);
1558 return isTruncStoreLegalOrCustom(ValVT, MemVT);
1565 return getIndexedModeAction(IdxMode, VT, IMAB_Load);
1572 getIndexedLoadAction(IdxMode, VT.
getSimpleVT()) == Custom);
1579 return getIndexedModeAction(IdxMode, VT, IMAB_Store);
1586 getIndexedStoreAction(IdxMode, VT.
getSimpleVT()) == Custom);
1593 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad);
1600 getIndexedMaskedLoadAction(IdxMode, VT.
getSimpleVT()) == Custom);
1607 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore);
1614 getIndexedMaskedStoreAction(IdxMode, VT.
getSimpleVT()) == Custom);
1634 if (Scale != ElemSize && Scale != 1)
1644 assert((
unsigned)CC < std::size(CondCodeActions) &&
1645 ((
unsigned)VT.
SimpleTy >> 3) < std::size(CondCodeActions[0]) &&
1646 "Table isn't big enough!");
1651 assert(Action != Promote &&
"Can't promote condition code!");
1658 return getCondCodeAction(CC, VT) ==
Legal;
1664 return getCondCodeAction(CC, VT) ==
Legal ||
1665 getCondCodeAction(CC, VT) == Custom;
1673 EVT InputVT)
const {
1674 assert(
Opc == ISD::PARTIAL_REDUCE_SMLA ||
Opc == ISD::PARTIAL_REDUCE_UMLA ||
1675 Opc == ISD::PARTIAL_REDUCE_SUMLA);
1678 auto It = PartialReduceMLAActions.find(Key);
1679 return It != PartialReduceMLAActions.end() ? It->second : Expand;
1685 EVT InputVT)
const {
1687 return Action ==
Legal || Action == Custom;
1693 assert(getOperationAction(
Op, VT) == Promote &&
1694 "This operation isn't promoted!");
1697 std::map<std::pair<unsigned, MVT::SimpleValueType>,
1699 PromoteToType.find(std::make_pair(
Op, VT.
SimpleTy));
1700 if (PTTI != PromoteToType.end())
return PTTI->second;
1703 "Cannot autopromote this type, add it with AddPromotedToType.");
1711 "Didn't find type to promote to!");
1713 getOperationAction(
Op, NVT) == Promote);
1718 bool AllowUnknown =
false)
const {
1727 bool AllowUnknown =
false)
const {
1729 if (
auto *PTy = dyn_cast<PointerType>(Ty))
1730 return getPointerTy(
DL, PTy->getAddressSpace());
1732 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
1733 Type *EltTy = VTy->getElementType();
1735 if (
auto *PTy = dyn_cast<PointerType>(EltTy)) {
1739 return EVT::getVectorVT(Ty->
getContext(), EVT::getEVT(EltTy,
false),
1740 VTy->getElementCount());
1743 return EVT::getEVT(Ty, AllowUnknown);
1747 bool AllowUnknown =
false)
const {
1749 if (
auto *PTy = dyn_cast<PointerType>(Ty))
1750 return getPointerMemTy(
DL, PTy->getAddressSpace());
1752 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
1753 Type *EltTy = VTy->getElementType();
1754 if (
auto *PTy = dyn_cast<PointerType>(EltTy)) {
1758 return EVT::getVectorVT(Ty->
getContext(), EVT::getEVT(EltTy,
false),
1759 VTy->getElementCount());
1768 bool AllowUnknown =
false)
const {
1779 return RegisterTypeForVT[VT.
SimpleTy];
1789 unsigned NumIntermediates;
1790 (void)getVectorTypeBreakdown(
Context, VT, VT1,
1791 NumIntermediates, RegisterVT);
1795 return getRegisterType(
Context, getTypeToTransformTo(
Context, VT));
1813 std::optional<MVT> RegisterVT = std::nullopt)
const {
1816 std::size(NumRegistersForVT));
1822 unsigned NumIntermediates;
1823 return getVectorTypeBreakdown(
Context, VT, VT1, NumIntermediates, VT2);
1827 unsigned RegWidth = getRegisterType(
Context, VT).getSizeInBits();
1828 return (
BitWidth + RegWidth - 1) / RegWidth;
1838 return getRegisterType(
Context, VT);
1847 return getNumRegisters(
Context, VT);
1854 return DL.getABITypeAlign(ArgTy);
1869 std::optional<unsigned> ByteOffset = std::nullopt)
const {
1893 return DL.isBigEndian() || VT == MVT::ppcf128;
1899 assert(
unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
1900 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1904 return GatherAllAliasesMaxDepth;
1909 return getPointerTy(
DL).getSizeInBits();
1919 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
1929 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
1938 return MaxGluedStoresPerMemcpy;
1958 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
1974 unsigned * =
nullptr)
const {
1982 unsigned * =
nullptr)
const {
1990 bool allowsMemoryAccessForAlignment(
1992 unsigned AddrSpace = 0,
Align Alignment =
Align(1),
1994 unsigned *Fast =
nullptr)
const;
2000 bool allowsMemoryAccessForAlignment(
LLVMContext &Context,
2003 unsigned *Fast =
nullptr)
const;
2011 unsigned AddrSpace = 0,
Align Alignment =
Align(1),
2013 unsigned *Fast =
nullptr)
const;
2021 unsigned *Fast =
nullptr)
const;
2026 unsigned *Fast =
nullptr)
const;
2055 virtual unsigned getMinimumJumpTableEntries()
const;
2058 unsigned getMinimumJumpTableDensity(
bool OptForSize)
const;
2062 unsigned getMaximumJumpTableSize()
const;
2064 virtual bool isJumpTableRelative()
const;
2069 return StackPointerRegisterToSaveRestore;
2092 return MinStackArgumentAlignment;
2121 virtual void insertSSPDeclarations(
Module &M)
const;
2126 virtual Value *getSDagStackGuard(
const Module &M)
const;
2161 virtual bool isFreeAddrSpaceCast(
unsigned SrcAS,
unsigned DestAS)
const;
2177 int InstructionOpcodeToISD(
unsigned Opcode)
const;
2194 return MaxAtomicSizeInBitsSupported;
2200 return MaxDivRemBitWidthSupported;
2206 return MaxLargeFPConvertBitWidthSupported;
2233 return AtomicOrdering::Monotonic;
2268 llvm_unreachable(
"Masked atomicrmw expansion unimplemented on this target");
2276 "Generic atomicrmw expansion unimplemented on this target");
2282 "Generic atomic store expansion unimplemented on this target");
2288 "Generic atomic load expansion unimplemented on this target");
2293 llvm_unreachable(
"Generic cmpxchg expansion unimplemented on this target");
2301 "Bit test atomicrmw expansion unimplemented on this target");
2309 "Compare arith atomicrmw expansion unimplemented on this target");
2383 return AtomicExpansionKind::None;
2390 return AtomicExpansionKind::CastToInteger;
2391 return AtomicExpansionKind::None;
2398 return AtomicExpansionKind::None;
2405 if (
SI->getValueOperand()->getType()->isFloatingPointTy())
2406 return AtomicExpansionKind::CastToInteger;
2407 return AtomicExpansionKind::None;
2412 virtual AtomicExpansionKind
2414 return AtomicExpansionKind::None;
2421 AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None;
2426 virtual AtomicExpansionKind
2431 return AtomicExpansionKind::CastToInteger;
2433 return AtomicExpansionKind::None;
2455 return ISD::ZERO_EXTEND;
2467 return ISD::ANY_EXTEND;
2481 if (hasMultipleConditionRegisters(VT))
2486 return Action != TypeExpandInteger && Action != TypeExpandFloat &&
2487 Action != TypeSplitVector;
2529 bool IsSigned)
const {
2553 BooleanContents = Ty;
2554 BooleanFloatContents = Ty;
2560 BooleanContents = IntTy;
2561 BooleanFloatContents = FloatTy;
2567 BooleanVectorContents = Ty;
2572 SchedPreferenceInfo = Pref;
2576 void setMinimumJumpTableEntries(
unsigned Val);
2580 void setMaximumJumpTableSize(
unsigned);
2585 StackPointerRegisterToSaveRestore = R;
2593 HasExtractBitsInsn = hasExtractInsn;
2599 void setJumpIsExpensive(
bool isExpensive =
true);
2603 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
2616 virtual std::pair<const TargetRegisterClass *, uint8_t>
2627 assert(
Op < std::size(OpActions[0]) &&
"Table isn't big enough!");
2633 setOperationAction(
Op, VT, Action);
2638 setOperationAction(Ops, VT, Action);
2645 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.
isValid() &&
2646 MemVT.
isValid() &&
"Table isn't big enough!");
2647 assert((
unsigned)Action < 0x10 &&
"too many bits for bitfield array");
2648 unsigned Shift = 4 * ExtType;
2654 for (
auto ExtType : ExtTypes)
2655 setLoadExtAction(ExtType, ValVT, MemVT, Action);
2659 for (
auto MemVT : MemVTs)
2660 setLoadExtAction(ExtTypes, ValVT, MemVT, Action);
2667 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.
isValid() &&
2668 MemVT.
isValid() &&
"Table isn't big enough!");
2669 assert((
unsigned)Action < 0x10 &&
"too many bits for bitfield array");
2670 unsigned Shift = 4 * ExtType;
2678 for (
auto ExtType : ExtTypes)
2679 setAtomicLoadExtAction(ExtType, ValVT, MemVT, Action);
2683 for (
auto MemVT : MemVTs)
2684 setAtomicLoadExtAction(ExtTypes, ValVT, MemVT, Action);
2701 for (
auto IdxMode : IdxModes)
2702 setIndexedModeAction(IdxMode, VT, IMAB_Load, Action);
2708 setIndexedLoadAction(IdxModes, VT, Action);
2718 for (
auto IdxMode : IdxModes)
2719 setIndexedModeAction(IdxMode, VT, IMAB_Store, Action);
2725 setIndexedStoreAction(IdxModes, VT, Action);
2735 setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action);
2745 setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action);
2752 for (
auto CC : CCs) {
2753 assert(VT.
isValid() && (
unsigned)CC < std::size(CondCodeActions) &&
2754 "Table isn't big enough!");
2755 assert((
unsigned)Action < 0x10 &&
"too many bits for bitfield array");
2767 setCondCodeAction(CCs, VT, Action);
2776 assert(
Opc == ISD::PARTIAL_REDUCE_SMLA ||
Opc == ISD::PARTIAL_REDUCE_UMLA ||
2777 Opc == ISD::PARTIAL_REDUCE_SUMLA);
2779 "setPartialReduceMLAAction types aren't valid");
2781 PartialReduceMLAActions[Key] = Action;
2785 for (
unsigned Opc : Opcodes)
2786 setPartialReduceMLAAction(
Opc, AccVT, InputVT, Action);
2800 setOperationAction(
Opc, OrigVT, Promote);
2801 AddPromotedToType(
Opc, OrigVT, DestVT);
2805 for (
auto Op : Ops) {
2806 setOperationAction(
Op, OrigVT, Promote);
2807 AddPromotedToType(
Op, OrigVT, DestVT);
2815 for (
auto NT : NTs) {
2816 assert(
unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
2817 TargetDAGCombineArray[NT >> 3] |= 1 << (NT & 7);
2823 MinFunctionAlignment = Alignment;
2829 PrefFunctionAlignment = Alignment;
2837 MaxBytesForAlignment = MaxBytes;
2842 MinStackArgumentAlignment = Alignment;
2850 MaxAtomicSizeInBitsSupported = SizeInBits;
2856 MaxDivRemBitWidthSupported = SizeInBits;
2862 MaxLargeFPConvertBitWidthSupported = SizeInBits;
2867 MinCmpXchgSizeInBits = SizeInBits;
2872 SupportsUnalignedAtomics = UnalignedSupported;
2901 int64_t BaseOffs = 0;
2902 bool HasBaseReg =
false;
2904 int64_t ScalableOffset = 0;
2919 Type *Ty,
unsigned AddrSpace,
2930 int64_t MaxOffset)
const {
2975 return (
From->isIntegerTy() ||
From->isFloatingPointTy()) &&
2991 case ISD::SMUL_LOHI:
2992 case ISD::UMUL_LOHI:
3006 case ISD::FMINNUM_IEEE:
3007 case ISD::FMAXNUM_IEEE:
3010 case ISD::FMINIMUMNUM:
3011 case ISD::FMAXIMUMNUM:
3012 case ISD::AVGFLOORS:
3013 case ISD::AVGFLOORU:
3019 default:
return false;
3026 if (isCommutativeBinOp(Opcode))
3092 switch (
I->getOpcode()) {
3093 case Instruction::FPExt:
3094 if (isFPExtFree(EVT::getEVT(
I->getType()),
3095 EVT::getEVT(
I->getOperand(0)->getType())))
3098 case Instruction::ZExt:
3099 if (isZExtFree(
I->getOperand(0)->getType(),
I->getType()))
3102 case Instruction::SExt:
3107 return isExtFreeImpl(
I);
3123 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
3124 !isTruncateFree(Ext->getType(), Load->getType()))
3129 if (isa<ZExtInst>(Ext))
3130 LType = ISD::ZEXTLOAD;
3132 assert(isa<SExtInst>(Ext) &&
"Unexpected ext type!");
3133 LType = ISD::SEXTLOAD;
3136 return isLoadExtLegal(LType, VT, LoadVT);
3233 const APInt &GapMask)
const {
3251 const APInt &GapMask)
const {
3288 "invalid fpext types");
3296 LLT DestTy,
LLT SrcTy)
const {
3304 EVT DestVT,
EVT SrcVT)
const {
3306 "invalid fpext types");
3307 return isFPExtFree(DestVT, SrcVT);
3367 assert((
MI.getOpcode() == TargetOpcode::G_FADD ||
3368 MI.getOpcode() == TargetOpcode::G_FSUB ||
3369 MI.getOpcode() == TargetOpcode::G_FMUL) &&
3370 "unexpected node in FMAD forming combine");
3373 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f16);
3375 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f32);
3377 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f64);
3389 assert((
N->getOpcode() == ISD::FADD ||
N->getOpcode() == ISD::FSUB ||
3390 N->getOpcode() == ISD::FMUL) &&
3391 "unexpected node in FMAD forming combine");
3392 return isOperationLegal(ISD::FMAD,
N->getValueType(0));
3413 unsigned SelectOpcode,
3434 unsigned Index)
const {
3457 bool MathUsed)
const {
3460 if (Opcode != ISD::UADDO)
3470 return MathUsed && (VT.
isSimple() || !isOperationExpand(Opcode, VT));
3493 const APInt &AndMask)
const {
3495 return !shouldAvoidTransformToShift(VT, ShCt);
3505 return isOperationLegalOrCustom(
Op, VT);
3538 if (isOperationLegal(
Op, ToVT))
3541 case ISD::FP_TO_UINT:
3542 if (isOperationLegalOrCustom(ISD::FP_TO_SINT, ToVT))
3543 return ISD::FP_TO_SINT;
3545 case ISD::STRICT_FP_TO_UINT:
3546 if (isOperationLegalOrCustom(ISD::STRICT_FP_TO_SINT, ToVT))
3547 return ISD::STRICT_FP_TO_SINT;
3549 case ISD::VP_FP_TO_UINT:
3550 if (isOperationLegalOrCustom(ISD::VP_FP_TO_SINT, ToVT))
3551 return ISD::VP_FP_TO_SINT;
3570 Libcalls.setLibcallImpl(Call, Impl);
3575 return Libcalls.getLibcallImpl(Call);
3581 return Libcalls.getLibcallName(Call).data();
3586 return RTLIB::RuntimeLibcallsInfo::getLibcallImplName(Call);
3591 return Libcalls.getMemcpyName().data();
3597 return Libcalls.getSupportedLibcallImpl(FuncName);
3603 ISD::CondCode getSoftFloatCmpLibcallPredicate(RTLIB::LibcallImpl Call)
const;
3607 Libcalls.setLibcallImplCallingConv(Call, CC);
3613 return Libcalls.getLibcallImplCallingConv(Call);
3619 return Libcalls.getLibcallCallingConv(Call);
3646 bool HasExtractBitsInsn;
3652 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
3657 bool JumpIsExpensive;
3661 BooleanContent BooleanContents;
3665 BooleanContent BooleanFloatContents;
3669 BooleanContent BooleanVectorContents;
3676 Align MinStackArgumentAlignment;
3680 Align MinFunctionAlignment;
3684 Align PrefFunctionAlignment;
3687 Align PrefLoopAlignment;
3689 unsigned MaxBytesForAlignment;
3693 unsigned MaxAtomicSizeInBitsSupported;
3697 unsigned MaxDivRemBitWidthSupported;
3702 unsigned MaxLargeFPConvertBitWidthSupported;
3706 unsigned MinCmpXchgSizeInBits;
3709 bool SupportsUnalignedAtomics;
3713 Register StackPointerRegisterToSaveRestore;
3718 uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE];
3719 MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE];
3732 uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE];
3739 MVT TransformToType[MVT::VALUETYPE_SIZE];
3746 LegalizeAction OpActions[MVT::VALUETYPE_SIZE][ISD::BUILTIN_OP_END];
3752 uint16_t LoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
3756 uint16_t AtomicLoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
3760 LegalizeAction TruncStoreActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
3768 uint16_t IndexedModeActions[MVT::VALUETYPE_SIZE][ISD::LAST_INDEXED_MODE];
3776 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8];
3778 using PartialReduceActionTypes =
3779 std::tuple<unsigned, MVT::SimpleValueType, MVT::SimpleValueType>;
3783 DenseMap<PartialReduceActionTypes, LegalizeAction> PartialReduceMLAActions;
3785 ValueTypeActionImpl ValueTypeActions;
3792 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
3800 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
3804 RTLIB::RuntimeLibcallsInfo Libcalls;
3808 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
3812 enum IndexedModeActionsBits {
3815 IMAB_MaskedStore = 8,
3816 IMAB_MaskedLoad = 12
3819 void setIndexedModeAction(
unsigned IdxMode, MVT VT,
unsigned Shift,
3820 LegalizeAction Action) {
3821 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
3822 (
unsigned)Action < 0xf &&
"Table isn't big enough!");
3823 unsigned Ty = (
unsigned)VT.SimpleTy;
3824 IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift);
3825 IndexedModeActions[Ty][IdxMode] |= ((
uint16_t)Action) << Shift;
3829 unsigned Shift)
const {
3830 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
3831 "Table isn't big enough!");
3832 unsigned Ty = (
unsigned)VT.SimpleTy;
3833 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf);
3881 unsigned MaxGluedStoresPerMemcpy = 0;
3948 bool isPositionIndependent()
const;
3975 return MRI.hasOneNonDBGUse(N0);
4013 virtual unsigned getJumpTableEncoding()
const;
4016 return getPointerTy(
DL);
4049 unsigned OpNo)
const {
4065 bool IsSignaling =
false)
const;
4083 std::pair<SDValue, SDValue> makeLibCall(
SelectionDAG &DAG, RTLIB::Libcall LC,
4085 MakeLibCallOptions CallOptions,
4093 const uint32_t *CallerPreservedMask,
4113 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
4131 findOptimalMemOpLowering(
LLVMContext &Context, std::vector<EVT> &MemOps,
4132 unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
4141 const APInt &DemandedElts,
4142 TargetLoweringOpt &TLO)
const;
4146 TargetLoweringOpt &TLO)
const;
4153 const APInt &DemandedElts,
4164 TargetLoweringOpt &TLO)
const;
4181 TargetLoweringOpt &TLO,
unsigned Depth = 0,
4182 bool AssumeSingleUse =
false)
const;
4187 KnownBits &Known, TargetLoweringOpt &TLO,
4189 bool AssumeSingleUse =
false)
const;
4194 DAGCombinerInfo &DCI)
const;
4199 const APInt &DemandedElts,
4200 DAGCombinerInfo &DCI)
const;
4206 const APInt &DemandedElts,
4208 unsigned Depth = 0)
const;
4214 unsigned Depth = 0)
const;
4219 const APInt &DemandedElts,
4221 unsigned Depth = 0)
const;
4237 bool SimplifyDemandedVectorElts(
SDValue Op,
const APInt &DemandedEltMask,
4239 TargetLoweringOpt &TLO,
unsigned Depth = 0,
4240 bool AssumeSingleUse =
false)
const;
4244 bool SimplifyDemandedVectorElts(
SDValue Op,
const APInt &DemandedElts,
4245 DAGCombinerInfo &DCI)
const;
4259 virtual void computeKnownBitsForTargetNode(
const SDValue Op,
4261 const APInt &DemandedElts,
4263 unsigned Depth = 0)
const;
4271 const APInt &DemandedElts,
4273 unsigned Depth = 0)
const;
4278 const APInt &DemandedElts,
4280 unsigned Depth = 0)
const;
4289 unsigned Depth = 0)
const;
4294 virtual void computeKnownBitsForFrameIndex(
int FIOp,
4302 virtual unsigned ComputeNumSignBitsForTargetNode(
SDValue Op,
4303 const APInt &DemandedElts,
4305 unsigned Depth = 0)
const;
4311 virtual unsigned computeNumSignBitsForTargetInstr(
4320 virtual bool SimplifyDemandedVectorEltsForTargetNode(
4322 APInt &KnownZero, TargetLoweringOpt &TLO,
unsigned Depth = 0)
const;
4329 virtual bool SimplifyDemandedBitsForTargetNode(
SDValue Op,
4331 const APInt &DemandedElts,
4333 TargetLoweringOpt &TLO,
4334 unsigned Depth = 0)
const;
4339 virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(
4346 virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(
4354 canCreateUndefOrPoisonForTargetNode(
SDValue Op,
const APInt &DemandedElts,
4356 bool ConsiderFlags,
unsigned Depth)
const;
4373 virtual bool isKnownNeverNaNForTargetNode(
SDValue Op,
4374 const APInt &DemandedElts,
4377 unsigned Depth = 0)
const;
4381 virtual bool isSplatValueForTargetNode(
SDValue Op,
const APInt &DemandedElts,
4384 unsigned Depth = 0)
const;
4389 return Op.getOpcode() == ISD::SPLAT_VECTOR ||
4390 Op.getOpcode() == ISD::SPLAT_VECTOR_PARTS;
4407 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
4441 bool foldBooleans, DAGCombinerInfo &DCI,
4442 const SDLoc &dl)
const;
4465 virtual SDValue PerformDAGCombine(
SDNode *
N, DAGCombinerInfo &DCI)
const;
4477 SDValue ShiftLHS =
N->getOperand(0);
4480 if (ShiftLHS.
getOpcode() == ISD::SIGN_EXTEND &&
4495 bool IsAfterLegal)
const {
4525 return AndOrSETCCFoldKind::None;
4542 return isTypeLegal(VT);
4599 bool LegalOps,
bool OptForSize,
4600 NegatibleCost &
Cost,
4601 unsigned Depth = 0)
const;
4606 unsigned Depth = 0)
const {
4609 getNegatedExpression(
Op, DAG, LegalOps, OptForSize,
Cost,
Depth);
4625 bool LegalOps,
bool OptForSize,
4626 unsigned Depth = 0)
const {
4627 return getCheaperOrNeutralNegatedExpression(
Op, DAG, LegalOps, OptForSize,
4628 NegatibleCost::Cheaper,
Depth);
4634 bool OptForSize,
unsigned Depth = 0)
const {
4636 return getNegatedExpression(
Op, DAG, LegalOps, OptForSize,
Cost,
Depth);
4648 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID> CC)
const {
4672 const SDValue *Parts,
unsigned NumParts,
4674 std::optional<CallingConv::ID> CC)
const {
4723 bool IsTailCall =
false;
4726 bool IsPostTypeLegalization =
false;
4728 unsigned NumFixedArgs = -1;
4742 std::optional<PtrAuthInfo>
PAI;
4746 DoesNotReturn(
false), IsReturnValueUsed(
true), IsConvergent(
false),
4763 return setLibCallee(CC, ResultType, ResultType,
Target,
4764 std::move(ArgsList));
4770 OrigRetTy = OrigResultType;
4774 NumFixedArgs = ArgsList.size();
4775 Args = std::move(ArgsList);
4785 RetTy = OrigRetTy = ResultType;
4786 IsInReg = ResultAttrs.hasAttribute(Attribute::InReg);
4787 RetSExt = ResultAttrs.hasAttribute(Attribute::SExt);
4788 RetZExt = ResultAttrs.hasAttribute(Attribute::ZExt);
4789 NoMerge = ResultAttrs.hasAttribute(Attribute::NoMerge);
4793 NumFixedArgs = ArgsList.size();
4794 Args = std::move(ArgsList);
4801 RetTy = OrigRetTy = ResultType;
4803 IsInReg = Call.hasRetAttr(Attribute::InReg);
4805 Call.doesNotReturn() ||
4806 (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode()));
4808 IsReturnValueUsed = !Call.use_empty();
4809 RetSExt = Call.hasRetAttr(Attribute::SExt);
4810 RetZExt = Call.hasRetAttr(Attribute::ZExt);
4811 NoMerge = Call.hasFnAttr(Attribute::NoMerge);
4815 CallConv = Call.getCallingConv();
4817 Args = std::move(ArgsList);
4830 DoesNotReturn =
Value;
4845 IsReturnValueUsed = !
Value;
4850 IsConvergent =
Value;
4865 IsPatchPoint =
Value;
4870 IsPreallocated =
Value;
4880 IsPostTypeLegalization =
Value;
4890 ConvergenceControlToken = Token;
4914 : IsSigned(
false), DoesNotReturn(
false), IsReturnValueUsed(
true),
4915 IsPostTypeLegalization(
false), IsSoften(
false) {}
4923 DoesNotReturn =
Value;
4928 IsReturnValueUsed = !
Value;
4933 IsPostTypeLegalization =
Value;
4938 OpsVTBeforeSoften = OpsVT;
4939 RetVTBeforeSoften = RetVT;
4947 OpsTypeOverrides = OpsTypes;
4956 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI)
const;
5028 EVT MinVT = getRegisterType(MVT::i32);
5029 return VT.
bitsLT(MinVT) ? MinVT : VT;
5048 return DL.isLittleEndian();
5088 virtual void LowerOperationWrapper(
SDNode *
N,
5115 virtual const char *getTargetNodeName(
unsigned Opcode)
const;
5155 CW_SpecificReg = CW_Okay,
5156 CW_Register = CW_Good,
5157 CW_Memory = CW_Better,
5158 CW_Constant = CW_Best,
5159 CW_Default = CW_Okay
5179 MVT ConstraintVT = MVT::Other;
5187 LLVM_ABI bool isMatchingInputConstraint()
const;
5191 LLVM_ABI unsigned getMatchedOperand()
const;
5243 virtual std::pair<unsigned, const TargetRegisterClass *>
5249 if (ConstraintCode ==
"m")
5250 return InlineAsm::ConstraintCode::m;
5251 if (ConstraintCode ==
"o")
5252 return InlineAsm::ConstraintCode::o;
5253 if (ConstraintCode ==
"X")
5254 return InlineAsm::ConstraintCode::X;
5255 if (ConstraintCode ==
"p")
5256 return InlineAsm::ConstraintCode::p;
5257 return InlineAsm::ConstraintCode::Unknown;
5263 virtual const char *LowerXConstraint(
EVT ConstraintVT)
const;
5268 std::vector<SDValue> &Ops,
5274 const AsmOperandInfo &OpInfo,
5279 virtual void CollectTargetIntrinsicOperands(
const CallInst &
I,
5288 bool IsAfterLegalTypes,
5291 bool IsAfterLegalTypes,
5341 int Enabled,
int &RefinementSteps,
5342 bool &UseOneConstNR,
bool Reciprocal)
const {
5364 int Enabled,
int &RefinementSteps)
const {
5539 SDValue Op,
unsigned NumBitsPerElt)
const;
5569 bool IsNegative =
false)
const;
5606 std::pair<SDValue, SDValue> scalarizeVectorLoad(
LoadSDNode *LD,
5616 std::pair<SDValue, SDValue> expandUnalignedLoad(
LoadSDNode *LD,
5632 bool IsCompressedMemory)
const;
5673 SDValue expandFixedPointDiv(
unsigned Opcode,
const SDLoc &dl,
5757 SDValue &Chain,
bool IsSignaling =
false)
const;
5829 const SDLoc &
DL, DAGCombinerInfo &DCI)
const;
5831 const SDLoc &
DL, DAGCombinerInfo &DCI)
const;
5833 const SDLoc &
DL, DAGCombinerInfo &DCI)
const;
5837 DAGCombinerInfo &DCI,
5841 SDValue optimizeSetCCByHoistingAndByConstFromLogicalShift(
5843 DAGCombinerInfo &DCI,
const SDLoc &
DL)
const;
5847 DAGCombinerInfo &DCI,
const SDLoc &
DL,
5855 DAGCombinerInfo &DCI,
const SDLoc &
DL,
5867 SmallVectorImpl<ISD::OutputArg> &Outs,
5868 const TargetLowering &TLI,
const DataLayout &
DL);
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
block Block Frequency Analysis
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ShadowStackGC > C("shadow-stack", "Very portable GC for uncooperative code generators")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
static cl::opt< unsigned > CostThreshold("dfa-cost-threshold", cl::desc("Maximum cost accepted for the transformation"), cl::Hidden, cl::init(50))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static cl::opt< unsigned > MaxLoadsPerMemcmpOptSize("max-loads-per-memcmp-opt-size", cl::Hidden, cl::desc("Set maximum number of loads used in expanded memcmp for -Os/Oz"))
static cl::opt< unsigned > MaxLoadsPerMemcmp("max-loads-per-memcmp", cl::Hidden, cl::desc("Set maximum number of loads used in expanded memcmp"))
const HexagonInstrInfo * TII
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
Implement a low-level type suitable for MachineInstr level instruction selection.
Machine Check Debug Module
Register const TargetRegisterInfo * TRI
Promote Memory to Register
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
PowerPC Reduce CR logical Operation
StandardInstrumentations SI(Mod->getContext(), Debug, VerifyEach)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT, SelectionDAG &DAG)
Scalarize a vector store, bitcasting to TargetVT to determine the scalar type.
support::ulittle16_t & Lo
support::ulittle16_t & Hi
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
bool isFloatingPointOperation() const
BinOp getOperation() const
LLVM_ABI bool getValueAsBool() const
Return the attribute's value as a boolean.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
CCState - This class holds information needed while lowering arguments and return values.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
This class represents a range of values.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
constexpr bool isScalar() const
Exactly one element.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Common base class shared among various IRBuilders.
A wrapper class for inspecting calls to intrinsic functions.
constexpr unsigned getScalarSizeInBits() const
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
Represents a single loop in the control flow graph.
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
Wrapper class representing physical registers. Should be passed by value.
uint64_t getScalarSizeInBits() const
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
ElementCount getVectorElementCount() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
bool isValid() const
Return true if this is a valid simple valuetype.
Instructions::iterator instr_iterator
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This is an abstract virtual class for memory operations.
A Module instance is used to store all the information related to an LLVM module.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
A discriminated union of two or more pointer types, with the discriminator in the low bit of the poin...
Analysis providing profile information.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
bool use_empty() const
Return true if there are no uses of this node.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
const TargetLowering & getTargetLoweringInfo() const
const DataLayout & getDataLayout() const
LLVM_ABI void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
MachineFunction & getMachineFunction() const
LLVMContext * getContext() const
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
ArgListEntry(Value *Val, SDValue Node=SDValue())
ArgListEntry(Value *Val, SDValue Node, Type *Ty)
ArgListEntry(SDValue Node, Type *Ty)
Type * Ty
Same as OrigTy, or partially legalized for soft float libcalls.
Type * OrigTy
Original unlegalized argument type.
LegalizeTypeAction getTypeAction(MVT VT) const
void setTypeAction(MVT VT, LegalizeTypeAction Action)
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
virtual Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const
Perform a store-conditional operation to Addr.
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
virtual bool enableAggressiveFMAFusion(LLT Ty) const
Return true if target always benefits from combining into FMA for a given value type.
virtual void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const
Perform a bit test atomicrmw using a target-specific intrinsic.
void setOperationAction(ArrayRef< unsigned > Ops, ArrayRef< MVT > VTs, LegalizeAction Action)
virtual bool requiresUniformRegister(MachineFunction &MF, const Value *) const
Allows target to decide about the register class of the specific value that is live outside the defin...
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
virtual unsigned getVaListSizeInBits(const DataLayout &DL) const
Returns the size of the platform's va_list object.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual bool preferSextInRegOfTruncate(EVT TruncVT, EVT VT, EVT ExtVT) const
virtual bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
virtual bool hasAndNot(SDValue X) const
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
ReciprocalEstimate
Reciprocal estimate status values used by the functions below.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
virtual bool isShuffleMaskLegal(ArrayRef< int >, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
virtual bool enableAggressiveFMAFusion(EVT VT) const
Return true if target always benefits from combining into FMA for a given value type.
virtual bool isComplexDeinterleavingOperationSupported(ComplexDeinterleavingOperation Operation, Type *Ty) const
Does this target support complex deinterleaving with the given operation and type.
virtual bool shouldRemoveRedundantExtend(SDValue Op) const
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
SDValue promoteTargetBoolean(SelectionDAG &DAG, SDValue Bool, EVT ValVT) const
Promote the given target boolean to a target boolean of the given type.
virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const
Returns true if be combined with to form an ISD::FMAD.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset=std::nullopt) const
Return true if it is profitable to reduce a load to a smaller type.
virtual bool hasStandaloneRem(EVT VT) const
Return true if the target can handle a standalone remainder operation.
virtual bool isExtFreeImpl(const Instruction *I) const
Return true if the extension represented by I is free.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool shouldExpandBuildVectorWithShuffles(EVT, unsigned DefinedValues) const
LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const
Return how the indexed store should be treated: either it is legal, needs to be promoted to a larger ...
virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT, bool LegalOnly) const
virtual bool isSelectSupported(SelectSupportKind) const
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual bool isEqualityCmpFoldedWithSignedCmp() const
Return true if instruction generated for equality comparison is folded with instruction generated for...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const
Use bitwise logic to make pairs of compares more efficient.
void setAtomicLoadExtAction(ArrayRef< unsigned > ExtTypes, MVT ValVT, ArrayRef< MVT > MemVTs, LegalizeAction Action)
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const
Try to convert math with an overflow comparison into the corresponding DAG node operation.
ShiftLegalizationStrategy
Return the preferred strategy to legalize tihs SHIFT instruction, with ExpansionFactor being the recu...
virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable.
virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const
Return if the target supports combining a chain like:
virtual Value * createComplexDeinterleavingIR(IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, Value *Accumulator=nullptr) const
Create the IR node for the given complex deinterleaving operation.
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const
Return true if it is beneficial to convert a load of a constant to just the constant itself.
virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT, unsigned Scale) const
Custom method defined by each target to indicate if an operation which may require a scale is support...
void setLoadExtAction(ArrayRef< unsigned > ExtTypes, MVT ValVT, MVT MemVT, LegalizeAction Action)
virtual Sched::Preference getSchedulingPreference(SDNode *) const
Some scheduler, e.g.
void setLibcallImplCallingConv(RTLIB::LibcallImpl Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
virtual MachineInstr * EmitKCFICheck(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
bool isExtLoad(const LoadInst *Load, const Instruction *Ext, const DataLayout &DL) const
Return true if Load and Ext can form an ExtLoad.
LegalizeTypeAction getTypeAction(MVT VT) const
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual bool shouldInsertFencesForAtomic(const Instruction *I) const
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
virtual AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual bool allowsMisalignedMemoryAccesses(LLT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
LLT handling variant.
virtual bool isSafeMemOpType(MVT) const
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
virtual void emitExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) const
Perform a cmpxchg expansion using a target-specific method.
virtual CondMergingParams getJumpConditionMergingParams(Instruction::BinaryOps, const Value *, const Value *) const
const TargetMachine & getTargetMachine() const
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
bool rangeFitsInWord(const APInt &Low, const APInt &High, const DataLayout &DL) const
Check whether the range [Low,High] fits in a machine word.
virtual bool isCtpopFast(EVT VT) const
Return true if ctpop instruction is fast.
virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
bool isPaddedAtMostSignificantBitsWhenStored(EVT VT) const
Indicates if any padding is guaranteed to go at the most significant bits when storing the type to me...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
@ TypeScalarizeScalableVector
unsigned getMinCmpXchgSizeInBits() const
Returns the size of the smallest cmpxchg or ll/sc instruction the backend supports.
virtual Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const
Perform a masked atomicrmw using a target-specific intrinsic.
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool enableExtLdPromotion() const
Return true if the target wants to use the optimization that turns ext(promotableInst1(....
virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode, LLT DestTy, LLT SrcTy) const
Return true if an fpext operation input to an Opcode operation is free (for instance,...
void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked load does or does not work with the specified type and ind...
void setMaxBytesForAlignment(unsigned MaxBytes)
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
void setHasExtractBitsInsn(bool hasExtractInsn=true)
Tells the code generator that the target has BitExtract instructions.
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
virtual bool hasBitTest(SDValue X, SDValue Y) const
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
MVT getRegisterType(LLVMContext &Context, EVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual bool needsFixedCatchObjects() const
virtual Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum fp to/from int conversion the backend supports.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual bool isCheapToSpeculateCttz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
virtual bool useFPRegsForHalfType() const
LegalizeAction getCondCodeAction(ISD::CondCode CC, MVT VT) const
Return how the condition code should be treated: either it is legal, needs to be expanded to some oth...
bool hasExtractBitsInsn() const
Return true if the target has BitExtract instructions.
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation is legal on this target.
LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const
Return how the indexed store should be treated: either it is legal, needs to be promoted to a larger ...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
CallingConv::ID getLibcallImplCallingConv(RTLIB::LibcallImpl Call) const
Get the CallingConv that should be used for the specified libcall implementation.
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
virtual bool softPromoteHalfType() const
virtual bool areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode &NodeX, const MemSDNode &NodeY) const
Return true if it is valid to merge the TargetMMOFlags in two SDNodes.
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual bool isFPImmLegal(const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
virtual unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT, EVT ToVT) const
virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const
Return true if extraction of a scalar element from the given vector type at the given index is cheap.
void setOperationAction(ArrayRef< unsigned > Ops, MVT VT, LegalizeAction Action)
virtual bool optimizeFMulOrFDivAsShiftAddBitcast(SDNode *N, SDValue FPConst, SDValue IntPow2) const
SelectSupportKind
Enum that describes what type of support for selects the target has.
LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const
Return how the indexed load should be treated: either it is legal, needs to be promoted to a larger s...
virtual bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, EVT DestVT, EVT SrcVT) const
Return true if an fpext operation input to an Opcode operation is free (for instance,...
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context, EVT VT) const
Returns true if we should normalize select(N0&N1, X, Y) => select(N0, select(N1, X,...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
virtual StringRef getStackProbeSymbolName(const MachineFunction &MF) const
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
virtual bool preferScalarizeSplat(SDNode *N) const
bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
virtual ISD::NodeType getExtendForAtomicOps() const
Returns how the platform's atomic operations are extended (ZERO_EXTEND, SIGN_EXTEND,...
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
virtual bool shouldExpandCmpUsingSelects(EVT VT) const
Should we expand [US]CMP nodes using two selects and two compares, or by doing arithmetic on boolean ...
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual LLT getOptimalMemOpLLT(const MemOp &Op, const AttributeList &) const
LLT returning variant.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const
Perform a atomicrmw expansion using a target-specific way.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const
Return true if it is profitable to convert a select of FP constants into a constant pool load whose a...
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?...
const char * getMemcpyName() const
virtual bool hasStackProbeSymbol(const MachineFunction &MF) const
Returns the name of the symbol used to emit stack probes or the empty string if not applicable.
bool isSlowDivBypassed() const
Returns true if target has indicated at least one type should be bypassed.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
virtual bool isMulAddWithConstProfitable(SDValue AddNode, SDValue ConstNode) const
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x,...
virtual bool shouldExtendTypeInLibCall(EVT Type) const
Returns true if arguments should be extended in lib calls.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
bool isPartialReduceMLALegalOrCustom(unsigned Opc, EVT AccVT, EVT InputVT) const
Return true if a PARTIAL_REDUCE_U/SMLA node with the specified types is legal or custom for this targ...
virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const
Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const
Return true if it is profitable to fold a pair of shifts into a mask.
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const
Return true if the target shall perform extract vector element and store given that the vector is kno...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual bool hasMultipleConditionRegisters(EVT VT) const
Does the target have multiple (allocatable) condition registers that can be used to store the results...
unsigned getMaxExpandSizeMemcmp(bool OptSize) const
Get maximum # of load operations permitted for memcmp.
bool isStrictFPEnabled() const
Return true if the target support strict float operation.
virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const
Return true if creating a shift of the type by the given amount is not profitable.
virtual bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const
True if target has some particular form of dealing with pointer arithmetic semantics for pointers wit...
virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
virtual bool lowerInterleavedStore(Instruction *Store, Value *Mask, ShuffleVectorInst *SVI, unsigned Factor, const APInt &GapMask) const
Lower an interleaved store to target specific intrinsics.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
virtual bool shouldFoldSelectWithSingleBitTest(EVT VT, const APInt &AndMask) const
MVT getSimpleValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the MVT corresponding to this LLVM type. See getValueType.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
virtual bool shouldReassociateReduction(unsigned RedOpc, EVT VT) const
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal for a comparison of the specified types on this ...
virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const
Return true if the target can combine store(extractelement VectorTy, Idx).
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
LegalizeAction getAtomicLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Same as getLoadExtAction, but for atomic loads.
bool EnableExtLdPromotion
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const
void setSupportsUnalignedAtomics(bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
void setLoadExtAction(ArrayRef< unsigned > ExtTypes, MVT ValVT, ArrayRef< MVT > MemVTs, LegalizeAction Action)
virtual void emitExpandAtomicStore(StoreInst *SI) const
Perform a atomic store using a target-specific way.
bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool preferIncOfAddToSubOfNot(EVT VT) const
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
virtual bool ShouldShrinkFPConstant(EVT) const
If true, then instruction selection should seek to shrink the FP constant of the specified type to a ...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned getMaxDivRemBitWidthSupported() const
Returns the size in bits of the maximum div/rem the backend supports.
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
virtual unsigned getMaxSupportedInterleaveFactor() const
Get the maximum supported factor for interleaved memory accesses.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
virtual bool shouldKeepZExtForFP16Conv() const
Does this target require the clearing of high-order bits in a register passed to the fp16 to fp conve...
virtual AtomicExpansionKind shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const
Returns how the given atomic atomicrmw should be cast by the IR-level AtomicExpand pass.
void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked store does or does not work with the specified type and in...
virtual bool shouldConsiderGEPOffsetSplit() const
const ValueTypeActionImpl & getValueTypeActions() const
virtual AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
virtual bool isTruncateFree(SDValue Val, EVT VT2) const
Return true if truncating the specific node Val to type VT2 is free.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const
virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const
Return the maximum number of "x & (x - 1)" operations that can be done instead of deferring to a cust...
virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
virtual bool isFNegFree(EVT VT) const
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
void setPartialReduceMLAAction(unsigned Opc, MVT AccVT, MVT InputVT, LegalizeAction Action)
Indicate how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input type InputVT should be treate...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
virtual bool shouldInsertTrailingFenceForAtomicStore(const Instruction *I) const
Whether AtomicExpandPass should automatically insert a trailing fence without reducing the ordering f...
virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
bool isExtFree(const Instruction *I) const
Return true if the extension represented by I is free.
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const
Perform a masked cmpxchg using a target-specific intrinsic.
virtual bool isZExtFree(EVT FromTy, EVT ToTy) const
virtual ISD::NodeType getExtendForAtomicCmpSwapArg() const
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_...
virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, SDValue Y) const
Return true if pulling a binary operation into a select with an identity constant is profitable.
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
virtual ShiftLegalizationStrategy preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const
virtual uint8_t getRepRegClassCostFor(MVT VT) const
Return the cost of the 'representative' register class for the specified value type.
virtual bool isZExtFree(LLT FromTy, LLT ToTy, LLVMContext &Ctx) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
LegalizeAction getPartialReduceMLAAction(unsigned Opc, EVT AccVT, EVT InputVT) const
Return how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input type InputVT should be treated.
bool isPredictableSelectExpensive() const
Return true if selects are only cheaper than branches if the branch is unlikely to be predicted right...
virtual bool mergeStoresAfterLegalization(EVT MemVT) const
Allow store merging for the specified type after legalization in addition to before legalization.
virtual bool shouldMergeStoreOfLoadsOverCall(EVT, EVT) const
Returns true if it's profitable to allow merging store of loads when there are functions calls betwee...
RTLIB::LibcallImpl getSupportedLibcallImpl(StringRef FuncName) const
Check if this is valid libcall for the current module, otherwise RTLIB::Unsupported.
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
virtual bool isProfitableToHoist(Instruction *I) const
unsigned getGatherAllAliasesMaxDepth() const
virtual LegalizeAction getCustomOperationAction(SDNode &Op) const
How to legalize this custom operation?
virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const
IR version.
virtual bool hasAndNotCompare(SDValue Y) const
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
virtual bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, unsigned NumElem, unsigned AddrSpace) const
Return true if it is expected to be cheaper to do a store of vector constant with the given size and ...
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
virtual bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const
Return true if it is cheaper to split the store of a merged int val from a pair of smaller values int...
bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal or custom on this target.
TargetLoweringBase(const TargetLoweringBase &)=delete
virtual unsigned getMaxGluedStoresPerMemcpy() const
Get maximum # of store operations to be glued together.
bool isAtomicLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified atomic load with extension is legal on this target.
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const
There are two ways to clear extreme bits (either low or high): Mask: x & (-1 << y) (the instcombine c...
virtual bool alignLoopsWithOptSize() const
Should loops be aligned even when the function is marked OptSize (but not MinSize).
unsigned getMaxAtomicSizeInBitsSupported() const
Returns the maximum atomic operation size (in bits) supported by the backend.
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
virtual ~TargetLoweringBase()
virtual bool canMergeStoresTo(unsigned AS, EVT MemVT, const MachineFunction &MF) const
Returns if it's reasonable to merge stores to MemVT size.
void setPartialReduceMLAAction(ArrayRef< unsigned > Opcodes, MVT AccVT, MVT InputVT, LegalizeAction Action)
LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
virtual bool preferABDSToABSWithNSW(EVT VT) const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
virtual bool getAddrModeArguments(const IntrinsicInst *, SmallVectorImpl< Value * > &, Type *&) const
CodeGenPrepare sinks address calculations into the same BB as Load/Store instructions reading the add...
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
virtual bool hasInlineStackProbe(const MachineFunction &MF) const
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
virtual bool shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const
Return true if the @llvm.experimental.vector.partial.reduce.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
const DenseMap< unsigned int, unsigned int > & getBypassSlowDivWidths() const
Returns map of slow types for division or remainder with corresponding fast types.
void setOperationPromotedToType(ArrayRef< unsigned > Ops, MVT OrigVT, MVT DestVT)
unsigned getMaxLargeFPConvertBitWidthSupported() const
Returns the size in bits of the maximum fp to/from int conversion the backend supports.
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, LLT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const
virtual bool isCheapToSpeculateCtlz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
virtual bool lowerInterleaveIntrinsicToStore(Instruction *Store, Value *Mask, ArrayRef< Value * > InterleaveValues) const
Lower an interleave intrinsic to a target specific store intrinsic.
virtual bool isTruncateFree(LLT FromTy, LLT ToTy, LLVMContext &Ctx) const
AndOrSETCCFoldKind
Enum of different potentially desirable ways to fold (and/or (setcc ...), (setcc ....
virtual bool shouldScalarizeBinop(SDValue VecOp) const
Try to convert an extract element of a vector binary operation into an extract element followed by a ...
Align getPrefFunctionAlignment() const
Return the preferred function alignment.
RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Get the libcall impl routine name for the specified libcall.
virtual void emitExpandAtomicLoad(LoadInst *LI) const
Perform a atomic load using a target-specific way.
Align getMinFunctionAlignment() const
Return the minimum function alignment.
virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
static StringRef getLibcallImplName(RTLIB::LibcallImpl Call)
Get the libcall routine name for the specified libcall implementation.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
virtual bool isCtlzFast() const
Return true if ctlz instruction is fast.
virtual bool useSoftFloat() const
virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const
Return true if the following transform is beneficial: (store (y (conv x)), y*)) -> (store x,...
BooleanContent getBooleanContents(EVT Type) const
virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const
Return the prefered common base offset.
virtual bool isVectorClearMaskLegal(ArrayRef< int >, EVT) const
Similar to isShuffleMaskLegal.
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, bool IsSigned) const
Return true if it is more correct/profitable to use strict FP_TO_INT conversion operations - canonica...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
bool hasTargetDAGCombine(ISD::NodeType NT) const
If true, the target has custom DAG combine transformations that it can perform for the specified node...
void setLibcallImpl(RTLIB::Libcall Call, RTLIB::LibcallImpl Impl)
virtual bool fallBackToDAGISel(const Instruction &Inst) const
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should continue looking for chain dependencies when trying to find a more...
virtual bool shouldSplatInsEltVarIndex(EVT) const
Return true if inserting a scalar into a variable element of an undef vector is more efficiently hand...
LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const
Return how the indexed load should be treated: either it is legal, needs to be promoted to a larger s...
NegatibleCost
Enum that specifies when a float negation is beneficial.
bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation has solution on this target.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual unsigned preferedOpcodeForCmpEqPiecesOfOperand(EVT VT, unsigned ShiftOpc, bool MayTransformRotate, const APInt &ShiftOrRotateAmt, const std::optional< APInt > &AndMask) const
virtual void emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst *AI) const
Perform a atomicrmw which the result is only used by comparison, using a target-specific intrinsic.
virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const
Returns true if MI can be combined with another instruction to form TargetOpcode::G_FMAD.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, ArrayRef< MVT > VTs, LegalizeAction Action)
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
virtual bool isLegalAddScalableImmediate(int64_t) const
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
std::vector< ArgListEntry > ArgListTy
virtual bool shouldAlignPointerArgs(CallInst *, unsigned &, Align &) const
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
virtual bool hasVectorBlend() const
Return true if the target has a vector blend instruction.
virtual AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, ArrayRef< MVT > VTs, LegalizeAction Action)
virtual bool isVScaleKnownToBeAPowerOfTwo() const
Return true only if vscale must be a power of two.
virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const
virtual MachineMemOperand::Flags getTargetMMOFlags(const MemSDNode &Node) const
This callback is used to inspect load/store SDNode.
virtual EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &) const
Returns the target specific optimal type for load and store operations as a result of memset,...
virtual Type * shouldConvertSplatType(ShuffleVectorInst *SVI) const
Given a shuffle vector SVI representing a vector splat, return a new scalar type of size equal to SVI...
virtual bool isZExtFree(SDValue Val, EVT VT2) const
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
void setAtomicLoadExtAction(ArrayRef< unsigned > ExtTypes, MVT ValVT, MVT MemVT, LegalizeAction Action)
virtual bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
virtual LLVM_READONLY LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const
Return the preferred type to use for a shift opcode, given the shifted amount type is ShiftValueTy.
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
LLT getVectorIdxLLT(const DataLayout &DL) const
Returns the type to be used for the index operand of: G_INSERT_VECTOR_ELT, G_EXTRACT_VECTOR_ELT,...
virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, ArrayRef< MVT > VTs, LegalizeAction Action)
virtual AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal or custom for a comparison of the specified type...
virtual bool isComplexDeinterleavingSupported() const
Does this target support complex deinterleaving.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
virtual bool addressingModeSupportsTLS(const GlobalValue &) const
Returns true if the targets addressing mode can target thread local storage (TLS).
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual bool shouldConvertPhiType(Type *From, Type *To) const
Given a set in interconnected phis of type 'From' that are loaded/stored or bitcast to type 'To',...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
virtual bool isLegalStoreImmediate(int64_t Value) const
Return true if the specified immediate is legal for the value input of a store instruction.
virtual bool preferZeroCompareBranch() const
Return true if the heuristic to prefer icmp eq zero should be used in code gen prepare.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
virtual bool lowerInterleavedLoad(Instruction *Load, Value *Mask, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor, const APInt &GapMask) const
Lower an interleaved load to target specific intrinsics.
virtual unsigned getVectorIdxWidth(const DataLayout &DL) const
Returns the type to be used for the index operand vector operations.
MVT getTypeToPromoteTo(unsigned Op, MVT VT) const
If the action for this operation is to promote, this method returns the ValueType to promote to.
virtual bool generateFMAsInMachineCombiner(EVT VT, CodeGenOptLevel OptLevel) const
virtual LoadInst * lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const
On some platforms, an AtomicRMW that never actually modifies the value (such as fetch_add of 0) can b...
virtual bool hasPairedLoad(EVT, Align &) const
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
virtual bool convertSelectOfConstantsToMath(EVT VT) const
Return true if a select of constants (select Cond, C1, C2) should be transformed into simple math ops...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool optimizeExtendOrTruncateConversion(Instruction *I, Loop *L, const TargetTransformInfo &TTI) const
Try to optimize extending or truncating conversion instructions (like zext, trunc,...
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
TargetLoweringBase & operator=(const TargetLoweringBase &)=delete
MulExpansionKind
Enum that specifies when a multiplication should be expanded.
static ISD::NodeType getExtendForContent(BooleanContent Content)
virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
virtual bool lowerDeinterleaveIntrinsicToLoad(Instruction *Load, Value *Mask, IntrinsicInst *DI) const
Lower a deinterleave intrinsic to a target specific load intrinsic.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps, bool &UseOneConstNR, bool Reciprocal) const
Hooks for building estimates in place of slower divisions and square roots.
virtual bool isDesirableToCommuteWithShift(const MachineInstr &MI, bool IsAfterLegal) const
GlobalISel - return true if it is profitable to move this shift by a constant amount through its oper...
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual void ReplaceNodeResults(SDNode *, SmallVectorImpl< SDValue > &, SelectionDAG &) const
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
virtual bool isUsedByReturnOnly(SDNode *, SDValue &) const
Return true if result of the specified node is used by a return node only.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, unsigned Depth=0) const
This is the helper function to return the newly negated expression if the cost is not expensive.
virtual bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, SDValue N1) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
SDValue getCheaperOrNeutralNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, const NegatibleCost CostThreshold=NegatibleCost::Neutral, unsigned Depth=0) const
virtual Register getRegisterByName(const char *RegName, LLT Ty, const MachineFunction &MF) const
Return the register ID of the name passed in.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
virtual bool isTargetCanonicalConstantNode(SDValue Op) const
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
virtual bool isTargetCanonicalSelect(SDNode *N) const
Return true if the given select/vselect should be considered canonical and not be transformed.
SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, unsigned Depth=0) const
This is the helper function to return the newly negated expression only when the cost is cheaper.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual SDValue unwrapAddress(SDValue N) const
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual bool IsDesirableToPromoteOp(SDValue, EVT &) const
This method query the target whether it is beneficial for dag combiner to promote the specified node.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const
Insert explicit copies in entry and exit blocks.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
virtual bool isTypeDesirableForOp(unsigned, EVT VT) const
Return true if the target has native support for the specified value type and it is 'desirable' to us...
~TargetLowering() override
TargetLowering & operator=(const TargetLowering &)=delete
virtual bool isDesirableToPullExtFromShl(const MachineInstr &MI) const
GlobalISel - return true if it's profitable to perform the combine: shl ([sza]ext x),...
std::pair< StringRef, TargetLowering::ConstraintType > ConstraintPair
virtual bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset, bool IsPre, MachineRegisterInfo &MRI) const
Returns true if the specified base+offset is a legal indexed addressing mode for this target.
virtual void initializeSplitCSR(MachineBasicBlock *Entry) const
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
virtual bool isSDNodeSourceOfDivergence(const SDNode *N, FunctionLoweringInfo *FLI, UniformityInfo *UA) const
virtual bool ExpandInlineAsm(CallInst *) const
This hook allows the target to expand an inline asm call to be explicit llvm code if it wants to.
virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps) const
Return a reciprocal estimate value for the input operand.
virtual bool isSDNodeAlwaysUniform(const SDNode *N) const
virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const
Return true if it is profitable to combine an XOR of a logical shift to create a logical shift of NOT...
TargetLowering(const TargetLowering &)=delete
virtual bool shouldSimplifyDemandedVectorElts(SDValue Op, const TargetLoweringOpt &TLO) const
Return true if the target supports simplifying demanded vector elements by converting them to undefs.
virtual SDValue LowerFormalArguments(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::InputArg > &, const SDLoc &, SelectionDAG &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
virtual SDValue getSqrtResultForDenormInput(SDValue Operand, SelectionDAG &DAG) const
Return a target-dependent result if the input operand is not suitable for use with a square root esti...
virtual bool getPostIndexedAddressParts(SDNode *, SDNode *, SDValue &, SDValue &, ISD::MemIndexedMode &, SelectionDAG &) const
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
virtual bool shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) const
For most targets, an LLVM type must be broken down into multiple smaller types.
virtual ArrayRef< MCPhysReg > getRoundingControlRegisters() const
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.
virtual SDValue LowerReturn(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::OutputArg > &, const SmallVectorImpl< SDValue > &, const SDLoc &, SelectionDAG &) const
This hook must be implemented to lower outgoing return values, described by the Outs array,...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const
Return true if it is profitable to move this shift by a constant amount through its operand,...
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *, const MachineBasicBlock *, unsigned, MCContext &) const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual unsigned combineRepeatedFPDivisors() const
Indicate whether this target prefers to combine FDIVs with the same divisor.
virtual AndOrSETCCFoldKind isDesirableToCombineLogicOpOfSETCC(const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const
virtual void HandleByVal(CCState *, unsigned &, Align) const
Target-specific cleanup for formal ByVal parameters.
virtual const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
virtual bool getPreIndexedAddressParts(SDNode *, SDValue &, SDValue &, ISD::MemIndexedMode &, SelectionDAG &) const
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...
virtual FastISel * createFastISel(FunctionLoweringInfo &, const TargetLibraryInfo *) const
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
virtual bool supportSplitCSR(MachineFunction *MF) const
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
virtual bool isReassocProfitable(MachineRegisterInfo &MRI, Register N0, Register N1) const
virtual bool mayBeEmittedAsTailCall(const CallInst *) const
Return true if the target may be able emit the call instruction as a tail call.
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const
virtual bool checkForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, const TargetRegisterInfo *TRI, const TargetInstrInfo *TII, MCRegister &PhysReg, int &Cost) const
Allows the target to handle physreg-carried dependency in target-specific way.
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
virtual bool isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode, EVT) const
virtual bool isDesirableToTransformToIntegerOp(unsigned, EVT) const
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Target - Wrapper for Target specific information.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
void fill(R &&Range, T &&Value)
Provide wrappers to std::fill which take ranges instead of having to pass begin/end explicitly.
int popcount(T Value) noexcept
Count the number of set bits in a value.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
ComplexDeinterleavingOperation
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
CodeGenOptLevel
Code generation optimization level.
AtomicOrdering
Atomic ordering for LLVM's memory model.
LLVM_ABI EVT getApproximateEVTForLLT(LLT Ty, LLVMContext &Ctx)
ComplexDeinterleavingRotation
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
LLVM_ABI bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Represent subnormal handling kind for floating point instruction inputs and outputs.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool isVector() const
Return true if this is a vector value type.
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
bool isInteger() const
Return true if this is an integer or a vector integer type.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
bool isDstAligned(Align AlignCheck) const
bool allowOverlap() const
bool isFixedDstAlign() const
static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, bool IsZeroMemset, bool IsVolatile)
Align getDstAlign() const
bool isMemcpyStrSrc() const
bool isAligned(Align AlignCheck) const
static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, Align SrcAlign, bool IsVolatile, bool MemcpyStrSrc=false)
bool isSrcAligned(Align AlignCheck) const
bool isMemcpyWithFixedDstAlign() const
bool isZeroMemset() const
Align getSrcAlign() const
These are IR-level optimization flags that may be propagated to SDNodes.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
std::optional< unsigned > fallbackAddressSpace
PointerUnion< const Value *, const PseudoSourceValue * > ptrVal
This contains information for each constraint that we are lowering.
AsmOperandInfo(InlineAsm::ConstraintInfo Info)
Copy constructor for copying from a ConstraintInfo.
std::string ConstraintCode
This contains the actual string for the code, like "m".
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setIsPostTypeLegalization(bool Value=true)
CallLoweringInfo & setCallee(Type *ResultType, FunctionType *FTy, SDValue Target, ArgListTy &&ArgsList, const CallBase &Call)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
CallLoweringInfo & setInRegister(bool Value=true)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
SDValue ConvergenceControlToken
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setVarArg(bool Value=true)
SmallVector< SDValue, 4 > InVals
std::optional< PtrAuthInfo > PAI
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, Type *OrigResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo(SelectionDAG &DAG)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
bool isBeforeLegalizeOps() const
bool isAfterLegalizeDAG() const
CombineLevel getDAGCombineLevel()
bool isCalledByLegalizer() const
bool isBeforeLegalize() const
DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setIsPostTypeLegalization(bool Value=true)
MakeLibCallOptions & setDiscardResult(bool Value=true)
ArrayRef< EVT > OpsVTBeforeSoften
bool IsPostTypeLegalization
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT)
ArrayRef< Type * > OpsTypeOverrides
MakeLibCallOptions & setIsSigned(bool Value=true)
MakeLibCallOptions & setNoReturn(bool Value=true)
MakeLibCallOptions & setOpsTypeOverrides(ArrayRef< Type * > OpsTypes)
Override the argument type for an operand.
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)
bool LegalOperations() const
TargetLoweringOpt(SelectionDAG &InDAG, bool LT, bool LO)