50#include "llvm/IR/IntrinsicsNVPTX.h"
76#define DEBUG_TYPE "nvptx-lower"
86 cl::desc(
"NVPTX Specific: FMA contraction (0: don't do it"
87 " 1: do it 2: do it aggressively"),
93 "NVPTX Specific: Override the precision of the lowering for f32 fdiv"),
98 "Use IEEE Compliant F32 div.rnd if available (default)"),
100 "Use IEEE Compliant F32 div.rnd if available, no FTZ")),
105 cl::desc(
"NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
111 "nvptx-approx-log2f32",
112 cl::desc(
"NVPTX Specific: whether to use lg2.approx for log2"),
116 "nvptx-force-min-byval-param-align",
cl::Hidden,
117 cl::desc(
"NVPTX Specific: force 4-byte minimal alignment for byval"
118 " params of device functions."),
129 if (Flags.hasApproximateFuncs())
142 if (Flags.hasApproximateFuncs())
198static std::optional<std::pair<unsigned int, MVT>>
205 return {{4, MVT::i64}};
212 if (VectorVT == MVT::i128 || VectorVT == MVT::f128)
213 return {{2, MVT::i64}};
221 unsigned PackRegSize;
234 if (!CanLowerTo256Bit)
241 return std::pair(NumElts, EltVT);
249 if (!CanLowerTo256Bit)
271 if (!CanLowerTo256Bit)
279 return std::pair(NumElts, EltVT);
289 const unsigned NPerReg = PackRegSize / EltVT.
getSizeInBits();
310 for (
const auto [VT, Off] :
zip(TempVTs, TempOffsets)) {
316 if (VT.getScalarType() == MVT::i8) {
317 if (RegisterVT == MVT::i16)
318 RegisterVT = MVT::i8;
319 else if (RegisterVT == MVT::v2i16)
320 RegisterVT = MVT::v2i8;
322 assert(RegisterVT == MVT::v4i8 &&
323 "Expected v4i8, v2i16, or i16 for i8 RegisterVT");
330 for (
unsigned I :
seq(NumRegs)) {
351 if (V.getValueType() == VT) {
352 assert(
I == 0 &&
"Index must be 0 for scalar value");
369 return GetElement(0);
395 "Promotion is not suitable for scalars of size larger than 64-bits");
429 if (ParamAlignment < AccessSize)
432 if (Offsets[Idx] & (AccessSize - 1))
435 EVT EltVT = ValueVTs[Idx];
439 if (EltSize >= AccessSize)
442 unsigned NumElts = AccessSize / EltSize;
444 if (AccessSize != EltSize * NumElts)
448 if (Idx + NumElts > ValueVTs.
size())
452 if (NumElts != 4 && NumElts != 2)
455 for (
unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
457 if (ValueVTs[j] != EltVT)
461 if (Offsets[j] - Offsets[j - 1] != EltSize)
480 bool IsVAArg =
false) {
489 const auto GetNumElts = [&](
unsigned I) ->
unsigned {
490 for (
const unsigned AccessSize : {16, 8, 4, 2}) {
492 I, AccessSize, ValueVTs, Offsets, ParamAlignment);
493 assert((NumElts == 1 || NumElts == 2 || NumElts == 4) &&
494 "Unexpected vectorization size");
502 for (
unsigned I = 0,
E = ValueVTs.
size();
I !=
E;) {
503 const unsigned NumElts = GetNumElts(
I);
504 VectorInfo.push_back(NumElts);
507 assert(std::accumulate(VectorInfo.begin(), VectorInfo.end(), 0u) ==
542 bool IsOpSupported = STI.allowFP16Math();
547 case ISD::FMAXNUM_IEEE:
548 case ISD::FMINNUM_IEEE:
551 case ISD::FMAXIMUMNUM:
552 case ISD::FMINIMUMNUM:
553 IsOpSupported &= STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
556 IsOpSupported &= STI.getSmVersion() >= 75 && STI.getPTXVersion() >= 70;
564 bool IsOpSupported = STI.hasNativeBF16Support(
Op);
566 Op, VT, IsOpSupported ? Action : NoBF16Action);
571 bool IsOpSupported =
false;
579 IsOpSupported = STI.getSmVersion() >= 90 && STI.getPTXVersion() >= 80;
598 if (STI.hasF32x2Instructions()) {
610 if (STI.getSmVersion() >= 30 && STI.getPTXVersion() > 31)
647 if (STI.hasF32x2Instructions())
675 for (
MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
676 MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16,
677 MVT::v4i8, MVT::i32, MVT::v2i32, MVT::i64}) {
705 {MVT::i8, MVT::i16, MVT::v2i16, MVT::i32, MVT::i64},
708 if (STI.hasHWROT32()) {
726 for (
MVT ValVT : FloatVTs) {
727 for (
MVT MemVT : FloatVTs) {
739 for (
MVT ValVT : IntVTs)
740 for (
MVT MemVT : IntVTs)
767 if (!
isTypeLegal(VT) && VT.getStoreSizeInBits() <= 256)
804 {MVT::i16, MVT::i32, MVT::i64},
Legal);
830 {MVT::v2i16, MVT::v2i32},
Expand);
843 if (STI.getPTXVersion() >= 43) {
866 ISD::FMAXIMUM, ISD::FMINIMUM, ISD::FMAXIMUMNUM,
874 if (STI.allowFP16Math() || STI.hasBF16Math())
881 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
883 ISD::VECREDUCE_FMAXIMUM, ISD::VECREDUCE_FMINIMUM},
908 for (
const auto &VT : {MVT::bf16, MVT::v2bf16}) {
909 if (!STI.hasNativeBF16Support(
Op) && STI.hasNativeBF16Support(
ISD::FMA)) {
916 const bool IsFP16FP16x2NegAvailable = STI.getSmVersion() >= 53 &&
917 STI.getPTXVersion() >= 60 &&
919 for (
const auto &VT : {MVT::f16, MVT::v2f16})
923 setBF16OperationAction(ISD::FNEG, MVT::bf16,
Legal,
Expand);
924 setBF16OperationAction(ISD::FNEG, MVT::v2bf16,
Legal,
Expand);
929 for (
const auto &
Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
930 ISD::FROUNDEVEN, ISD::FTRUNC}) {
942 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71) {
945 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
946 for (
MVT VT : {MVT::bf16, MVT::f32, MVT::f64}) {
959 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
960 for (
MVT VT : {MVT::i1, MVT::i16, MVT::i32, MVT::i64}) {
989 for (
const auto &
Op :
1005 if (STI.getPTXVersion() >= 65) {
1006 setFP16OperationAction(ISD::FABS, MVT::f16,
Legal,
Promote);
1007 setFP16OperationAction(ISD::FABS, MVT::v2f16,
Legal,
Expand);
1012 setBF16OperationAction(ISD::FABS, MVT::v2bf16,
Legal,
Expand);
1013 setBF16OperationAction(ISD::FABS, MVT::bf16,
Legal,
Promote);
1017 for (
const auto &
Op :
1018 {ISD::FMINNUM, ISD::FMAXNUM, ISD::FMINIMUMNUM, ISD::FMAXIMUMNUM}) {
1029 bool SupportsF32MinMaxNaN =
1030 STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
1031 for (
const auto &
Op : {ISD::FMINIMUM, ISD::FMAXIMUM}) {
1051 setFP16OperationAction(ISD::FEXP2, MVT::f16,
Legal,
Promote);
1052 setFP16OperationAction(ISD::FEXP2, MVT::v2f16,
Legal,
Expand);
1053 setBF16OperationAction(ISD::FEXP2, MVT::bf16,
Legal,
Promote);
1054 setBF16OperationAction(ISD::FEXP2, MVT::v2bf16,
Legal,
Expand);
1086 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1087 MVT::v32i32, MVT::v64i32, MVT::v128i32},
1092 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1093 MVT::v32i32, MVT::v64i32, MVT::v128i32, MVT::Other},
1102 {MVT::i32, MVT::i128, MVT::v4f32, MVT::Other},
Custom);
1107#define MAKE_CASE(V) \
1207 bool Reciprocal)
const {
1228 if (Reciprocal || ExtraSteps > 0) {
1230 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
1231 : Intrinsic::nvvm_rsqrt_approx_f);
1232 else if (VT == MVT::f64)
1233 return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
1238 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
1239 : Intrinsic::nvvm_sqrt_approx_f);
1247 DAG.
getConstant(Intrinsic::nvvm_rcp_approx_ftz_d,
DL, MVT::i32),
1248 MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
1256 std::optional<unsigned> FirstVAArg,
const CallBase &CB,
1257 unsigned UniqueCallSite)
const {
1260 std::string Prototype;
1262 O <<
"prototype_" << UniqueCallSite <<
" : .callprototype ";
1269 const Align RetAlign = getArgumentAlignment(&CB, RetTy, 0,
DL);
1270 O <<
".param .align " << RetAlign.
value() <<
" .b8 _["
1271 <<
DL.getTypeAllocSize(RetTy) <<
"]";
1275 size = ITy->getBitWidth();
1278 "Floating point type expected here");
1286 O <<
".param .b" <<
size <<
" _";
1288 O <<
".param .b" << PtrVT.getSizeInBits() <<
" _";
1298 const unsigned NumArgs = FirstVAArg.value_or(Args.size());
1300 for (
const unsigned I :
llvm::seq(NumArgs)) {
1301 const auto ArgOuts =
1302 AllOuts.take_while([
I](
auto O) {
return O.OrigArgIndex ==
I; });
1303 AllOuts = AllOuts.drop_front(ArgOuts.size());
1305 Type *Ty = Args[
I].Ty;
1311 if (ArgOuts[0].Flags.isByVal()) {
1314 Type *ETy = Args[
I].IndirectType;
1315 Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1316 Align ParamByValAlign =
1319 O <<
".param .align " << ParamByValAlign.
value() <<
" .b8 _["
1320 << ArgOuts[0].Flags.getByValSize() <<
"]";
1324 getArgumentAlignment(&CB, Ty,
I + AttributeList::FirstArgIndex,
DL);
1325 O <<
".param .align " << ParamAlign.
value() <<
" .b8 _["
1326 <<
DL.getTypeAllocSize(Ty) <<
"]";
1331 (
getValueType(
DL, Ty) == MVT::i8 && ArgOuts[0].VT == MVT::i16)) &&
1332 "type mismatch between callee prototype and arguments");
1338 sz = PtrVT.getSizeInBits();
1340 sz = Ty->getPrimitiveSizeInBits();
1342 O <<
".param .b" << sz <<
" _";
1347 O << (first ?
"" :
",") <<
" .param .align "
1348 << STI.getMaxRequiredAlignment() <<
" .b8 _[]";
1367 return DL.getABITypeAlign(Ty);
1372 if (!DirectCallee) {
1380 return StackAlign.value();
1391 return DL.getABITypeAlign(Ty);
1416 if (
Ptr->getOpcode() == ISD::ADDRSPACECAST) {
1419 Ptr = ASC->getOperand(0);
1438 const EVT ActualVT = V.getValueType();
1439 assert((ActualVT == ExpectedVT ||
1441 "Non-integer argument type size mismatch");
1442 if (ExpectedVT.
bitsGT(ActualVT))
1444 if (ExpectedVT.
bitsLT(ActualVT))
1453 if (CLI.
IsVarArg && (STI.getPTXVersion() < 60 || STI.getSmVersion() < 30))
1455 "Support for variadic functions (unsized array parameter) introduced "
1456 "in PTX ISA version 6.0 and requires target sm_30.");
1468 const auto GetI32 = [&](
const unsigned I) {
1472 const unsigned UniqueCallSite = GlobalUniqueCallSite++;
1480 const auto MakeDeclareScalarParam = [&](
SDValue Symbol,
unsigned Size) {
1486 {StartChain, Symbol, GetI32(SizeBits), DeclareGlue});
1496 {StartChain, Symbol, GetI32(
Align.
value()), GetI32(
Size), DeclareGlue});
1518 "Non-VarArg function with extra arguments");
1521 unsigned VAOffset = 0;
1523 const SDValue VADeclareParam =
1524 CLI.
Args.size() > FirstVAArg
1525 ? MakeDeclareArrayParam(getCallParamSymbol(DAG, FirstVAArg, MVT::i32),
1526 Align(STI.getMaxRequiredAlignment()), 0)
1540 assert(AllOuts.size() == AllOutVals.size() &&
1541 "Outs and OutVals must be the same size");
1545 const auto ArgI = E.index();
1546 const auto Arg = E.value();
1547 const auto ArgOuts =
1548 AllOuts.take_while([&](
auto O) {
return O.OrigArgIndex == ArgI; });
1549 const auto ArgOutVals = AllOutVals.take_front(ArgOuts.size());
1550 AllOuts = AllOuts.drop_front(ArgOuts.size());
1551 AllOutVals = AllOutVals.drop_front(ArgOuts.size());
1553 const bool IsVAArg = (ArgI >= FirstVAArg);
1554 const bool IsByVal = Arg.IsByVal;
1557 getCallParamSymbol(DAG, IsVAArg ? FirstVAArg : ArgI, MVT::i32);
1559 assert((!IsByVal || Arg.IndirectType) &&
1560 "byval arg must have indirect type");
1561 Type *ETy = (IsByVal ? Arg.IndirectType : Arg.Ty);
1563 const Align ArgAlign = [&]() {
1568 const Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1572 return getArgumentAlignment(CB, Arg.Ty, ArgI + 1,
DL);
1575 const unsigned TySize =
DL.getTypeAllocSize(ETy);
1576 assert((!IsByVal || TySize == ArgOuts[0].Flags.getByValSize()) &&
1577 "type size mismatch");
1579 const SDValue ArgDeclare = [&]() {
1581 return VADeclareParam;
1584 return MakeDeclareArrayParam(ParamSymbol, ArgAlign, TySize);
1586 assert(ArgOuts.size() == 1 &&
"We must pass only one value as non-array");
1587 assert((ArgOuts[0].VT.isInteger() || ArgOuts[0].VT.isFloatingPoint()) &&
1588 "Only int and float types are supported as non-array arguments");
1590 return MakeDeclareScalarParam(ParamSymbol, TySize);
1594 assert(ArgOutVals.size() == 1 &&
"We must pass only one value as byval");
1595 SDValue SrcPtr = ArgOutVals[0];
1596 const auto PointerInfo =
refinePtrAS(SrcPtr, DAG,
DL, *
this);
1597 const Align BaseSrcAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1600 VAOffset =
alignTo(VAOffset, ArgAlign);
1608 for (
const unsigned NumElts : VI) {
1613 DAG.
getLoad(LoadVT, dl, CallChain, SrcAddr, PointerInfo, SrcAlign);
1615 TypeSize ParamOffset = Offsets[J].getWithIncrement(VAOffset);
1620 DAG.
getStore(ArgDeclare, dl, SrcLoad, ParamAddr,
1633 assert(VTs.
size() == Offsets.size() &&
"Size mismatch");
1634 assert(VTs.
size() == ArgOuts.size() &&
"Size mismatch");
1640 const bool ExtendIntegerParam =
1641 Arg.Ty->isIntegerTy() &&
DL.getTypeAllocSizeInBits(Arg.Ty) < 32;
1643 const auto GetStoredValue = [&](
const unsigned I) {
1647 "OutVal type should always be legal");
1651 ExtendIntegerParam ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1658 for (
const unsigned NumElts : VI) {
1666 "Vectorization should be disabled for vaargs.");
1672 const EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : EltVT;
1675 assert(VAOffset == 0 &&
"VAOffset must be 0 for non-VA args");
1682 const MaybeAlign CurrentAlign = ExtendIntegerParam
1688 return GetStoredValue(J + K);
1704 const unsigned ResultSize =
DL.getTypeAllocSize(RetTy);
1706 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0,
DL);
1707 MakeDeclareArrayParam(RetSymbol, RetAlign, ResultSize);
1709 MakeDeclareScalarParam(RetSymbol, ResultSize);
1715 if (VADeclareParam) {
1718 VADeclareParam.
getOperand(2), GetI32(VAOffset),
1721 VADeclareParam->
getVTList(), DeclareParamOps);
1732 const bool IsIndirectCall = (!Func && CB) || ConvertToIndirectCall;
1739 assert(CalleeFunc !=
nullptr &&
"Libcall callee must be set.");
1743 CalleeFunc->
addFnAttr(
"nvptx-libcall-callee",
"true");
1746 if (IsIndirectCall) {
1757 HasVAArgs ? std::optional(FirstVAArg) : std::nullopt, *CB,
1759 const char *ProtoStr =
nvTM->getStrPool().save(Proto).data();
1763 CallPrereqs.
push_back(PrototypeDeclare);
1766 const unsigned Proto = IsIndirectCall ? UniqueCallSite : 0;
1767 const unsigned NumArgs =
1774 {CallToken, GetI32(CLI.
IsConvergent), GetI32(IsIndirectCall),
1775 GetI32(Ins.empty() ? 0 : 1), GetI32(NumArgs), Callee, GetI32(Proto)});
1783 assert(VTs.
size() == Ins.size() &&
"Bad value decomposition");
1785 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0,
DL);
1791 const bool ExtendIntegerRetVal =
1792 RetTy->
isIntegerTy() &&
DL.getTypeAllocSizeInBits(RetTy) < 32;
1796 for (
const unsigned NumElts : VI) {
1798 ExtendIntegerRetVal ?
MaybeAlign(std::nullopt)
1803 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1813 for (
const unsigned J :
llvm::seq(NumElts))
1821 UniqueCallSite + 1,
SDValue(), dl);
1842 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1847 "Support for dynamic alloca introduced in PTX ISA version 7.3 and "
1848 "requires target sm_52.",
1882 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1887 "Support for stackrestore requires PTX ISA version >= 7.3 and target "
1890 return Op.getOperand(0);
1904 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1909 "Support for stacksave requires PTX ISA version >= 7.3 and target >= "
1933 unsigned NumOperands =
Node->getNumOperands();
1934 for (
unsigned i = 0; i < NumOperands; ++i) {
1936 EVT VVT = SubOp.getNode()->getValueType(0);
1939 for (
unsigned j = 0; j < NumSubElem; ++j) {
1950 assert(
A.getValueType() == MVT::i32 &&
B.getValueType() == MVT::i32 &&
1951 Selector.
getValueType() == MVT::i32 &&
"PRMT must have i32 operands");
1969 ArrayRef<std::pair<unsigned /*NodeType*/, unsigned /*NumInputs*/>>
Ops,
1975 while (Level.size() > 1) {
1981 unsigned I = 0,
E = Level.size();
1982 for (;
I + NumInputs <=
E;
I += NumInputs) {
1991 if (ReducedLevel.
empty()) {
1995 assert(
OpIdx <
Ops.size() &&
"no smaller operators for reduction");
2007 Level = ReducedLevel;
2010 return *Level.begin();
2015 switch (ReductionOpcode) {
2016 case ISD::VECREDUCE_FMAX:
2017 return ISD::FMAXNUM;
2018 case ISD::VECREDUCE_FMIN:
2019 return ISD::FMINNUM;
2020 case ISD::VECREDUCE_FMAXIMUM:
2021 return ISD::FMAXIMUM;
2022 case ISD::VECREDUCE_FMINIMUM:
2023 return ISD::FMINIMUM;
2030static std::optional<NVPTXISD::NodeType>
2032 switch (ReductionOpcode) {
2033 case ISD::VECREDUCE_FMAX:
2035 case ISD::VECREDUCE_FMIN:
2037 case ISD::VECREDUCE_FMAXIMUM:
2039 case ISD::VECREDUCE_FMINIMUM:
2042 return std::nullopt;
2052 const SDNodeFlags
Flags =
Op->getFlags();
2055 const unsigned Opcode =
Op->getOpcode();
2056 const EVT EltTy =
Vector.getValueType().getVectorElementType();
2059 const bool CanUseMinMax3 =
2060 EltTy == MVT::f32 && STI.getSmVersion() >= 100 &&
2061 STI.getPTXVersion() >= 88 &&
2062 (Opcode == ISD::VECREDUCE_FMAX || Opcode == ISD::VECREDUCE_FMIN ||
2063 Opcode == ISD::VECREDUCE_FMAXIMUM || Opcode == ISD::VECREDUCE_FMINIMUM);
2067 SmallVector<std::pair<
unsigned ,
unsigned >, 2> ScalarOps;
2070 CanUseMinMax3 && Opcode3Elem)
2071 ScalarOps.push_back({*Opcode3Elem, 3});
2083 EVT FromVT =
Op->getOperand(0)->getValueType(0);
2084 if (FromVT != MVT::v2i8) {
2100 EVT ToVT =
Op->getValueType(0);
2110 EVT VT =
Op->getValueType(0);
2116 return Operand->isUndef() || isa<ConstantSDNode>(Operand) ||
2117 isa<ConstantFPSDNode>(Operand);
2119 if (VT != MVT::v4i8)
2124 uint64_t SelectionValue) ->
SDValue {
2131 return getPRMT(L, R, SelectionValue,
DL, DAG);
2133 auto PRMT__10 = GetPRMT(
Op->getOperand(0),
Op->getOperand(1),
true, 0x3340);
2134 auto PRMT__32 = GetPRMT(
Op->getOperand(2),
Op->getOperand(3),
true, 0x3340);
2135 auto PRMT3210 = GetPRMT(PRMT__10, PRMT__32,
false, 0x5410);
2140 auto GetOperand = [](
SDValue Op,
int N) -> APInt {
2142 EVT VT =
Op->getValueType(0);
2144 return APInt(32, 0);
2146 if (VT == MVT::v2f16 || VT == MVT::v2bf16)
2148 else if (VT == MVT::v2i16 || VT == MVT::v4i8)
2154 if (VT == MVT::v4i8)
2156 return Value.zext(32);
2174 assert(32 % NumElements == 0 &&
"must evenly divide bit length");
2175 const unsigned ShiftAmount = 32 / NumElements;
2176 for (
unsigned ElementNo :
seq(NumElements))
2177 Value |= GetOperand(
Op, ElementNo).shl(ElementNo * ShiftAmount);
2179 return DAG.
getNode(ISD::BITCAST,
DL,
Op->getValueType(0), Const);
2187 EVT VectorVT =
Vector.getValueType();
2189 if (VectorVT == MVT::v4i8) {
2197 Flags.setNoSignedWrap(
Ext.getScalarValueSizeInBits() > 8);
2198 Flags.setNoUnsignedWrap(
Ext.getScalarValueSizeInBits() >= 8);
2199 Ext->setFlags(Flags);
2212 SDLoc dl(
Op.getNode());
2224 EVT VectorVT =
Vector.getValueType();
2226 if (VectorVT != MVT::v4i8)
2230 if (
Value->isUndef())
2242 return DAG.
getNode(ISD::BITCAST,
DL,
Op->getValueType(0), BFI);
2249 if (VectorVT != MVT::v4i8 ||
Op.getValueType() != MVT::v4i8)
2255 uint32_t Selector = 0;
2257 if (
I.value() != -1)
2258 Selector |= (
I.value() << (
I.index() * 4));
2276 EVT VT =
Op.getValueType();
2284 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2337 EVT VT =
Op.getValueType();
2344 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2391 EVT VT =
Op.getValueType();
2405 EVT VT =
Op.getValueType();
2408 return LowerFROUND32(
Op, DAG);
2411 return LowerFROUND64(
Op, DAG);
2427 EVT VT =
Op.getValueType();
2433 const unsigned SignBitMask = 0x80000000;
2436 const unsigned PointFiveInBits = 0x3F000000;
2437 SDValue PointFiveWithSignRaw =
2441 DAG.
getNode(ISD::BITCAST, SL, VT, PointFiveWithSignRaw);
2468 EVT VT =
Op.getValueType();
2487 DAG.
getNode(ISD::FTRUNC, SL, VT,
A);
2497 EVT VT =
N->getValueType(0);
2519 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2521 if (
Op.getValueType() == MVT::bf16) {
2525 DAG.
getNode(
Op.getOpcode(), Loc, MVT::f32,
Op.getOperand(0)),
2535 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2537 if (
Op.getOperand(0).getValueType() == MVT::bf16) {
2540 Op.getOpcode(), Loc,
Op.getValueType(),
2541 DAG.
getNode(ISD::FP_EXTEND, Loc, MVT::f32,
Op.getOperand(0)));
2550 EVT NarrowVT =
Op.getValueType();
2555 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 70) {
2558 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
2560 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70) {
2587 EVT WideVT =
Op.getValueType();
2590 (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71)) {
2592 return DAG.
getNode(ISD::BF16_TO_FP, Loc, WideVT, Narrow);
2595 (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78)) {
2599 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 71) {
2604 return DAG.
getNode(ISD::FP_EXTEND, Loc, WideVT,
Op);
2614 if (
Op.getValueType() != MVT::v2i16)
2616 EVT EltVT =
Op.getValueType().getVectorElementType();
2618 for (
int I = 0,
E =
Op.getValueType().getVectorNumElements();
I <
E;
I++) {
2621 [&](
const SDUse &O) {
2622 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
2623 O.get(), DAG.getIntPtrConstant(I, DL));
2638 for (
size_t I = 0;
I <
N->getNumOperands();
I++) {
2655 return Tcgen05StNode;
2660 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2662 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2664 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2666 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2668 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2670 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2672 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2674 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2676 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2678 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2681 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2684 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2686 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2688 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2690 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2692 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2694 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2696 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2698 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2700 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2702 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2704 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2707 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2709 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2711 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2713 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2725 for (
size_t I = 0;
I <
N->getNumOperands();
I++) {
2744 return Tcgen05MMANode;
2748static std::optional<std::pair<SDValue, SDValue>>
2751 EVT ResVT =
N->getValueType(0);
2759 for (
unsigned i = 0; i < NumElts; ++i)
2770 Ops.push_back(
N->getOperand(3));
2771 Ops.push_back(
N->getOperand(4));
2773 Ops.push_back(
N->getOperand(3));
2782 for (
unsigned i = 0; i < NumElts; ++i) {
2789 return {{BuildVector, Chain}};
2801 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
2802 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
2803 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
2804 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
2805 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
2806 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
2807 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
2808 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
2809 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
2810 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
2811 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
2812 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
2813 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
2814 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
2815 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
2816 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
2817 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
2818 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
2819 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
2820 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
2821 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1:
2822 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2:
2823 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4:
2824 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8:
2825 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16:
2826 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32:
2827 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64:
2828 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128:
2829 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
2830 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
2831 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
2832 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
2833 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
2834 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
2835 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
2836 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
2837 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
2839 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2840 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2841 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2842 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2843 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2844 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2845 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2846 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2847 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2848 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2849 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2850 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2851 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2852 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2853 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2854 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2855 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2856 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2858 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2860 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2861 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2862 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2864 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2866 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2876 if (
N->getOperand(1).getValueType() != MVT::i128) {
2883 auto Opcode = [&]() {
2885 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
2887 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
2889 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
2891 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
2899 SDValue TryCancelResponse =
N->getOperand(1);
2900 SDValue Cast = DAG.
getNode(ISD::BITCAST,
DL, MVT::v2i64, TryCancelResponse);
2908 return DAG.
getNode(Opcode,
DL,
N->getVTList(),
2909 {TryCancelResponse0, TryCancelResponse1});
2918 unsigned IntrinsicID =
N->getConstantOperandVal(0);
2922 for (
unsigned i = 0; i < 4; ++i)
2928 auto [OpCode, RetTy, CvtModeFlag] =
2929 [&]() -> std::tuple<NVPTXISD::NodeType, MVT::SimpleValueType, uint32_t> {
2930 switch (IntrinsicID) {
2931 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
2933 CvtMode::RS | CvtMode::RELU_FLAG};
2934 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
2936 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
2938 CvtMode::RS | CvtMode::RELU_FLAG};
2939 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
2941 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
2943 CvtMode::RS | CvtMode::RELU_FLAG};
2944 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
2946 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
2948 CvtMode::RS | CvtMode::RELU_FLAG};
2949 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
2951 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
2953 CvtMode::RS | CvtMode::RELU_FLAG};
2954 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
2961 Ops.push_back(RBits);
2968 const unsigned Mode = [&]() {
2969 switch (
Op->getConstantOperandVal(0)) {
2970 case Intrinsic::nvvm_prmt:
2972 case Intrinsic::nvvm_prmt_b4e:
2974 case Intrinsic::nvvm_prmt_ecl:
2976 case Intrinsic::nvvm_prmt_ecr:
2978 case Intrinsic::nvvm_prmt_f4e:
2980 case Intrinsic::nvvm_prmt_rc16:
2982 case Intrinsic::nvvm_prmt_rc8:
2990 SDValue B =
Op.getNumOperands() == 4 ?
Op.getOperand(2)
2992 SDValue Selector = (
Op->op_end() - 1)->get();
2997 switch (
Op->getConstantOperandVal(1)) {
3003 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
3004 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
3005 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
3010 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2:
3018 switch (
Op->getConstantOperandVal(0)) {
3021 case Intrinsic::nvvm_prmt:
3022 case Intrinsic::nvvm_prmt_b4e:
3023 case Intrinsic::nvvm_prmt_ecl:
3024 case Intrinsic::nvvm_prmt_ecr:
3025 case Intrinsic::nvvm_prmt_f4e:
3026 case Intrinsic::nvvm_prmt_rc16:
3027 case Intrinsic::nvvm_prmt_rc8:
3029 case Intrinsic::nvvm_internal_addrspace_wrap:
3030 return Op.getOperand(1);
3031 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
3032 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
3033 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
3034 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
3036 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
3037 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
3038 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
3039 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
3040 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
3041 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
3042 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
3043 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
3044 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
3045 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
3055 assert(V.getValueType() == MVT::i64 &&
3056 "Unexpected CTLZ/CTPOP type to legalize");
3065 assert(
A.getValueType() == MVT::i64 &&
B.getValueType() == MVT::i64);
3070 const auto Amt = AmtConst->getZExtValue() & 63;
3097 ? std::make_tuple(AHi, ALo, BHi)
3098 : std::make_tuple(ALo, BHi, BLo);
3125 EVT Ty =
Op.getValueType();
3135 if (Flags.hasNoInfs())
3147 assert(
Op.getValueType() == MVT::i1 &&
"Custom lowering enabled only for i1");
3157 TrueVal = TrueVal.getOperand(0);
3158 FalseVal = FalseVal.getOperand(0);
3160 EVT VT = TrueVal.getSimpleValueType().bitsLE(FalseVal.getSimpleValueType())
3161 ? TrueVal.getValueType()
3162 : FalseVal.getValueType();
3182 switch (
Op.getOpcode()) {
3187 case ISD::ADDRSPACECAST:
3188 return LowerADDRSPACECAST(
Op, DAG);
3196 return LowerBUILD_VECTOR(
Op, DAG);
3198 return LowerBITCAST(
Op, DAG);
3202 return LowerEXTRACT_VECTOR_ELT(
Op, DAG);
3204 return LowerINSERT_VECTOR_ELT(
Op, DAG);
3206 return LowerVECTOR_SHUFFLE(
Op, DAG);
3208 return LowerCONCAT_VECTORS(
Op, DAG);
3209 case ISD::VECREDUCE_FMAX:
3210 case ISD::VECREDUCE_FMIN:
3211 case ISD::VECREDUCE_FMAXIMUM:
3212 case ISD::VECREDUCE_FMINIMUM:
3213 return LowerVECREDUCE(
Op, DAG);
3215 return LowerSTORE(
Op, DAG);
3217 return LowerLOAD(
Op, DAG);
3219 return LowerShiftLeftParts(
Op, DAG);
3222 return LowerShiftRightParts(
Op, DAG);
3226 return LowerFROUND(
Op, DAG);
3228 return LowerFCOPYSIGN(
Op, DAG);
3231 return LowerINT_TO_FP(
Op, DAG);
3234 return LowerFP_TO_INT(
Op, DAG);
3236 return LowerFP_ROUND(
Op, DAG);
3237 case ISD::FP_EXTEND:
3238 return LowerFP_EXTEND(
Op, DAG);
3240 return LowerBR_JT(
Op, DAG);
3242 return LowerVAARG(
Op, DAG);
3244 return LowerVASTART(
Op, DAG);
3263 case ISD::DYNAMIC_STACKALLOC:
3265 case ISD::STACKRESTORE:
3267 case ISD::STACKSAVE:
3270 return LowerCopyToReg_128(
Op, DAG);
3275 return PromoteBinOpIfF32FTZ(
Op, DAG);
3293 unsigned JId = JT->getIndex();
3325 unsigned SrcAS =
N->getSrcAddressSpace();
3326 unsigned DestAS =
N->getDestAddressSpace();
3336 const MVT GenerictVT =
3340 SDValue SharedClusterConversion =
3343 return SharedClusterConversion;
3358 SDNode *
Node =
Op.getNode();
3360 EVT VT =
Node->getValueType(0);
3364 const MaybeAlign MA(
Node->getConstantOperandVal(3));
3367 Tmp1, Tmp2, MachinePointerInfo(V));
3387 MachinePointerInfo(V));
3393 return DAG.
getLoad(VT,
DL, Tmp1, VAList, MachinePointerInfo(SrcV));
3402 SDValue VAReg = getParamSymbol(DAG, -1, PtrVT);
3405 return DAG.
getStore(
Op.getOperand(0),
DL, VAReg,
Op.getOperand(1),
3406 MachinePointerInfo(SV));
3410static std::optional<std::pair<SDValue, SDValue>>
3413 const EVT ResVT = LD->getValueType(0);
3414 const EVT MemVT = LD->getMemoryVT();
3419 return std::nullopt;
3421 const auto NumEltsAndEltVT =
3423 if (!NumEltsAndEltVT)
3424 return std::nullopt;
3425 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3427 Align Alignment = LD->getAlign();
3430 if (Alignment < PrefAlign) {
3436 return std::nullopt;
3447 return std::nullopt;
3459 ListVTs.push_back(MVT::Other);
3472 LD->getMemOperand());
3481 for (
const unsigned I :
llvm::seq(NumElts)) {
3486 for (
const unsigned I :
llvm::seq(NumElts)) {
3488 if (LoadEltVT != EltVT)
3496 const MVT BuildVecVT =
3508 Results.append({Res->first, Res->second});
3525 assert(LD->getValueType(0) == MVT::i1 &&
"Custom lowering for i1 load only");
3527 LD->getBasePtr(), LD->getPointerInfo(),
3528 MVT::i8, LD->getAlign(),
3529 LD->getMemOperand()->getFlags());
3540 if (
Op.getValueType() == MVT::i1)
3547 assert(
LD->getValueType(0).isInteger() &&
LD->getMemoryVT().isInteger() &&
3548 "Unexpected fpext-load");
3550 LD->getChain(),
LD->getBasePtr(),
LD->getMemoryVT(),
3551 LD->getMemOperand());
3563 const EVT MemVT =
N->getMemoryVT();
3570 const auto NumEltsAndEltVT =
3572 if (!NumEltsAndEltVT)
3574 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3578 Align Alignment =
N->getAlign();
3580 if (Alignment < PrefAlign) {
3607 Ops.push_back(
N->getOperand(0));
3617 for (
const unsigned I :
llvm::seq(NumElts)) {
3620 NumEltsPerSubVector);
3625 for (
const unsigned I :
llvm::seq(NumElts)) {
3635 Ops.push_back(ExtVal);
3640 Ops.append(
N->op_begin() + 2,
N->op_end());
3644 N->getMemoryVT(),
N->getMemOperand());
3652 EVT VT =
Store->getMemoryVT();
3655 return LowerSTOREi1(
Op, DAG);
3667 SDNode *
Node =
Op.getNode();
3676 DAG.
getTruncStore(Tmp1, dl, Tmp3, Tmp2,
ST->getPointerInfo(), MVT::i8,
3677 ST->getAlign(),
ST->getMemOperand()->getFlags());
3686 assert(
Op.getOperand(1).getValueType() == MVT::i128 &&
3687 "Custom lowering for 128-bit CopyToReg only");
3689 SDNode *
Node =
Op.getNode();
3701 NewOps[0] =
Op->getOperand(0);
3702 NewOps[1] =
Op->getOperand(1);
3706 NewOps[4] =
Op->getOperand(3);
3711unsigned NVPTXTargetLowering::getNumRegisters(
3713 std::optional<MVT> RegisterVT = std::nullopt)
const {
3714 if (VT == MVT::i128 && RegisterVT == MVT::i128)
3719bool NVPTXTargetLowering::splitValueIntoRegisterParts(
3721 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID> CC)
const {
3722 if (Val.
getValueType() == MVT::i128 && NumParts == 1) {
3735 StringRef SavedStr =
nvTM->getStrPool().save(
3742 const StringRef SavedStr =
nvTM->getStrPool().save(
"param" + Twine(
I));
3770 for (
const auto &Arg :
F.args()) {
3771 const auto ArgIns = AllIns.take_while(
3772 [&](
auto I) {
return I.OrigArgIndex == Arg.getArgNo(); });
3773 AllIns = AllIns.drop_front(ArgIns.size());
3775 Type *Ty = Arg.getType();
3780 if (Arg.use_empty()) {
3782 for (
const auto &In : ArgIns) {
3783 assert(!In.Used &&
"Arg.use_empty() is true but Arg is used?");
3789 SDValue ArgSymbol = getParamSymbol(DAG, Arg.getArgNo(), PtrVT);
3795 if (Arg.hasByValAttr()) {
3803 assert(ArgIns.size() == 1 &&
"ByVal argument must be a pointer");
3804 const auto &ByvalIn = ArgIns[0];
3806 "Ins type did not match function type");
3807 assert(ByvalIn.VT == PtrVT &&
"ByVal argument must be a pointer");
3812 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3815 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3824 assert(VTs.
size() == ArgIns.size() &&
"Size mismatch");
3825 assert(VTs.
size() == Offsets.size() &&
"Size mismatch");
3828 &
F, Ty, Arg.getArgNo() + AttributeList::FirstArgIndex,
DL);
3832 for (
const unsigned NumElts : VI) {
3834 const EVT LoadVT = VTs[
I] == MVT::i1 ? MVT::i8 : VTs[
I];
3842 DAG.
getLoad(VecVT, dl, Root, VecAddr,
3846 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3847 for (
const unsigned J :
llvm::seq(NumElts)) {
3859 if (!OutChains.
empty())
3872 Type *RetTy =
F.getReturnType();
3875 assert(OutVals.
empty() && Outs.
empty() &&
"Return value expected for void");
3888 const bool ExtendIntegerRetVal =
3889 RetTy->
isIntegerTy() &&
DL.getTypeAllocSizeInBits(RetTy) < 32;
3894 assert(VTs.
size() == OutVals.
size() &&
"Bad return value decomposition");
3896 const auto GetRetVal = [&](
unsigned I) ->
SDValue {
3900 "OutVal type should always be legal");
3904 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
3910 for (
const unsigned NumElts : VI) {
3911 const MaybeAlign CurrentAlign = ExtendIntegerRetVal
3916 NumElts, dl, DAG, [&](
unsigned K) {
return GetRetVal(
I + K); });
3933 if (Constraint.
size() > 1)
3949 case Intrinsic::nvvm_match_all_sync_i32p:
3950 case Intrinsic::nvvm_match_all_sync_i64p:
3955 Info.memVT = MVT::i1;
3960 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col:
3961 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row:
3962 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride:
3963 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride:
3964 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col:
3965 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row:
3966 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride:
3967 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride:
3968 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col:
3969 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row:
3970 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride:
3971 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride:
3972 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col:
3973 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row:
3974 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride:
3975 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride:
3976 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col:
3977 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row:
3978 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride:
3979 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride:
3980 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col:
3981 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row:
3982 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride:
3983 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride: {
3985 Info.memVT = MVT::v8f16;
3986 Info.ptrVal =
I.getArgOperand(0);
3989 Info.align =
Align(16);
3992 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col:
3993 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col_stride:
3994 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col_stride:
3995 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col:
3996 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row:
3997 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row_stride:
3998 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row_stride:
3999 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row:
4000 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col:
4001 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col_stride:
4002 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row:
4003 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row_stride:
4004 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col:
4005 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col_stride:
4006 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col_stride:
4007 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col:
4008 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row:
4009 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row_stride:
4010 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row_stride:
4011 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row:
4012 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col:
4013 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col_stride:
4014 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row:
4015 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row_stride: {
4017 Info.memVT = MVT::v2i32;
4018 Info.ptrVal =
I.getArgOperand(0);
4021 Info.align =
Align(8);
4025 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col:
4026 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col_stride:
4027 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col_stride:
4028 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col:
4029 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row:
4030 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row_stride:
4031 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row_stride:
4032 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row:
4033 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col:
4034 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col_stride:
4035 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row:
4036 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row_stride:
4037 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col:
4038 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col_stride:
4039 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row:
4040 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row_stride:
4042 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col:
4043 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col_stride:
4044 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col_stride:
4045 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col:
4046 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row:
4047 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row_stride:
4048 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row_stride:
4049 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row:
4050 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col:
4051 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col_stride:
4052 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row:
4053 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row_stride:
4054 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col:
4055 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col_stride:
4056 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row:
4057 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row_stride:
4058 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_b16:
4059 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_trans_b16:
4060 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8:
4061 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b4x16_p64:
4062 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b6x16_p32:
4063 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b4x16_p64:
4064 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b6x16_p32: {
4066 Info.memVT = MVT::v4i32;
4067 Info.ptrVal =
I.getArgOperand(0);
4070 Info.align =
Align(16);
4074 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col:
4075 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col_stride:
4076 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col_stride:
4077 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col:
4078 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row:
4079 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row_stride:
4080 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row_stride:
4081 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row:
4083 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col:
4084 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col_stride:
4085 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col_stride:
4086 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col:
4087 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row:
4088 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row_stride:
4089 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row_stride:
4090 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row:
4091 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row:
4092 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row_stride:
4093 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col:
4094 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col_stride:
4095 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row:
4096 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row_stride:
4097 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row_stride:
4098 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row:
4099 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col:
4100 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col_stride:
4101 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col_stride:
4102 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col:
4103 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_b16:
4104 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_trans_b16:
4105 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b4x16_p64:
4106 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b6x16_p32: {
4108 Info.memVT = MVT::i32;
4109 Info.ptrVal =
I.getArgOperand(0);
4112 Info.align =
Align(4);
4116 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col:
4117 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row:
4118 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride:
4119 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride:
4120 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col:
4121 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row:
4122 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride:
4123 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride:
4124 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col:
4125 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row:
4126 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride:
4127 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride: {
4129 Info.memVT = MVT::v4f16;
4130 Info.ptrVal =
I.getArgOperand(0);
4133 Info.align =
Align(16);
4137 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col:
4138 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row:
4139 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride:
4140 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride:
4141 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col:
4142 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row:
4143 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride:
4144 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride:
4145 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col:
4146 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row:
4147 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride:
4148 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride:
4149 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col:
4150 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row:
4151 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col_stride:
4152 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row_stride: {
4154 Info.memVT = MVT::v8f32;
4155 Info.ptrVal =
I.getArgOperand(0);
4158 Info.align =
Align(16);
4162 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col:
4163 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col_stride:
4164 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row:
4165 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row_stride:
4167 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col:
4168 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col_stride:
4169 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row:
4170 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row_stride:
4172 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col:
4173 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col_stride:
4174 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row:
4175 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row_stride:
4176 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col:
4177 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col_stride:
4178 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row:
4179 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row_stride:
4180 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col:
4181 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col_stride:
4182 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row:
4183 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row_stride: {
4185 Info.memVT = MVT::v8i32;
4186 Info.ptrVal =
I.getArgOperand(0);
4189 Info.align =
Align(16);
4193 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col:
4194 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col_stride:
4195 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row:
4196 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row_stride:
4197 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col:
4198 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col_stride:
4199 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row:
4200 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row_stride:
4201 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_b16:
4202 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_trans_b16:
4203 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8:
4204 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b4x16_p64:
4205 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b6x16_p32:
4206 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b4x16_p64:
4207 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b6x16_p32: {
4209 Info.memVT = MVT::v2i32;
4210 Info.ptrVal =
I.getArgOperand(0);
4213 Info.align =
Align(8);
4217 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col:
4218 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col_stride:
4219 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row:
4220 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row_stride:
4222 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col:
4223 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col_stride:
4224 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row:
4225 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row_stride: {
4227 Info.memVT = MVT::f64;
4228 Info.ptrVal =
I.getArgOperand(0);
4231 Info.align =
Align(8);
4235 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col:
4236 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col_stride:
4237 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row:
4238 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row_stride: {
4240 Info.memVT = MVT::v2f64;
4241 Info.ptrVal =
I.getArgOperand(0);
4244 Info.align =
Align(16);
4248 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col:
4249 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row:
4250 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride:
4251 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride:
4252 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col:
4253 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row:
4254 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride:
4255 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride:
4256 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col:
4257 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row:
4258 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride:
4259 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride: {
4261 Info.memVT = MVT::v4f16;
4262 Info.ptrVal =
I.getArgOperand(0);
4265 Info.align =
Align(16);
4269 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col:
4270 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row:
4271 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride:
4272 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride:
4273 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col:
4274 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row:
4275 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride:
4276 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride:
4277 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col:
4278 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row:
4279 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride:
4280 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride:
4281 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col:
4282 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row:
4283 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col_stride:
4284 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row_stride: {
4286 Info.memVT = MVT::v8f32;
4287 Info.ptrVal =
I.getArgOperand(0);
4290 Info.align =
Align(16);
4294 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col:
4295 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col_stride:
4296 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row:
4297 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row_stride:
4298 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col:
4299 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col_stride:
4300 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row:
4301 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row_stride:
4302 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col:
4303 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col_stride:
4304 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row:
4305 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row_stride: {
4307 Info.memVT = MVT::v8i32;
4308 Info.ptrVal =
I.getArgOperand(0);
4311 Info.align =
Align(16);
4315 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col:
4316 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col_stride:
4317 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row:
4318 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row_stride:
4319 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col:
4320 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col_stride:
4321 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row:
4322 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row_stride:
4323 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_b16:
4324 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_trans_b16:
4325 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x2_trans_b8: {
4327 Info.memVT = MVT::v2i32;
4328 Info.ptrVal =
I.getArgOperand(0);
4331 Info.align =
Align(8);
4335 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col:
4336 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col_stride:
4337 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row:
4338 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row_stride: {
4340 Info.memVT = MVT::v2f64;
4341 Info.ptrVal =
I.getArgOperand(0);
4344 Info.align =
Align(16);
4348 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_b16:
4349 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_trans_b16:
4350 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x1_trans_b8: {
4352 Info.memVT = MVT::i32;
4353 Info.ptrVal =
I.getArgOperand(0);
4356 Info.align =
Align(4);
4360 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_b16:
4361 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_trans_b16:
4362 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x4_trans_b8: {
4364 Info.memVT = MVT::v4i32;
4365 Info.ptrVal =
I.getArgOperand(0);
4368 Info.align =
Align(16);
4372 case Intrinsic::nvvm_atomic_add_gen_f_cta:
4373 case Intrinsic::nvvm_atomic_add_gen_f_sys:
4374 case Intrinsic::nvvm_atomic_add_gen_i_cta:
4375 case Intrinsic::nvvm_atomic_add_gen_i_sys:
4376 case Intrinsic::nvvm_atomic_and_gen_i_cta:
4377 case Intrinsic::nvvm_atomic_and_gen_i_sys:
4378 case Intrinsic::nvvm_atomic_cas_gen_i_cta:
4379 case Intrinsic::nvvm_atomic_cas_gen_i_sys:
4380 case Intrinsic::nvvm_atomic_dec_gen_i_cta:
4381 case Intrinsic::nvvm_atomic_dec_gen_i_sys:
4382 case Intrinsic::nvvm_atomic_inc_gen_i_cta:
4383 case Intrinsic::nvvm_atomic_inc_gen_i_sys:
4384 case Intrinsic::nvvm_atomic_max_gen_i_cta:
4385 case Intrinsic::nvvm_atomic_max_gen_i_sys:
4386 case Intrinsic::nvvm_atomic_min_gen_i_cta:
4387 case Intrinsic::nvvm_atomic_min_gen_i_sys:
4388 case Intrinsic::nvvm_atomic_or_gen_i_cta:
4389 case Intrinsic::nvvm_atomic_or_gen_i_sys:
4390 case Intrinsic::nvvm_atomic_exch_gen_i_cta:
4391 case Intrinsic::nvvm_atomic_exch_gen_i_sys:
4392 case Intrinsic::nvvm_atomic_xor_gen_i_cta:
4393 case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
4394 auto &
DL =
I.getDataLayout();
4397 Info.ptrVal =
I.getArgOperand(0);
4404 case Intrinsic::nvvm_prefetch_tensormap: {
4405 auto &
DL =
I.getDataLayout();
4408 Info.ptrVal =
I.getArgOperand(0);
4416 case Intrinsic::nvvm_ldu_global_i:
4417 case Intrinsic::nvvm_ldu_global_f:
4418 case Intrinsic::nvvm_ldu_global_p: {
4421 Info.ptrVal =
I.getArgOperand(0);
4428 case Intrinsic::nvvm_tex_1d_v4f32_s32:
4429 case Intrinsic::nvvm_tex_1d_v4f32_f32:
4430 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
4431 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
4432 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
4433 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
4434 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
4435 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
4436 case Intrinsic::nvvm_tex_2d_v4f32_s32:
4437 case Intrinsic::nvvm_tex_2d_v4f32_f32:
4438 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
4439 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
4440 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
4441 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
4442 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
4443 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
4444 case Intrinsic::nvvm_tex_3d_v4f32_s32:
4445 case Intrinsic::nvvm_tex_3d_v4f32_f32:
4446 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
4447 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
4448 case Intrinsic::nvvm_tex_cube_v4f32_f32:
4449 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
4450 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
4451 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
4452 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
4453 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
4454 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
4455 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
4456 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
4457 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
4458 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
4459 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
4460 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
4461 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
4462 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
4463 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
4464 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
4465 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
4466 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
4467 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
4468 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
4469 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
4470 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
4471 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
4472 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
4473 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
4474 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
4475 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
4476 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
4477 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
4478 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
4479 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
4480 case Intrinsic::nvvm_tex_unified_cube_grad_v4f32_f32:
4481 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4f32_f32:
4482 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
4483 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
4484 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
4485 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
4487 Info.memVT = MVT::v4f32;
4488 Info.ptrVal =
nullptr;
4491 Info.align =
Align(16);
4494 case Intrinsic::nvvm_tex_1d_v4s32_s32:
4495 case Intrinsic::nvvm_tex_1d_v4s32_f32:
4496 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
4497 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
4498 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
4499 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
4500 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
4501 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
4502 case Intrinsic::nvvm_tex_2d_v4s32_s32:
4503 case Intrinsic::nvvm_tex_2d_v4s32_f32:
4504 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
4505 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
4506 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
4507 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
4508 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
4509 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
4510 case Intrinsic::nvvm_tex_3d_v4s32_s32:
4511 case Intrinsic::nvvm_tex_3d_v4s32_f32:
4512 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
4513 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
4514 case Intrinsic::nvvm_tex_cube_v4s32_f32:
4515 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
4516 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
4517 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
4518 case Intrinsic::nvvm_tex_cube_v4u32_f32:
4519 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
4520 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
4521 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
4522 case Intrinsic::nvvm_tex_1d_v4u32_s32:
4523 case Intrinsic::nvvm_tex_1d_v4u32_f32:
4524 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
4525 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
4526 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
4527 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
4528 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
4529 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
4530 case Intrinsic::nvvm_tex_2d_v4u32_s32:
4531 case Intrinsic::nvvm_tex_2d_v4u32_f32:
4532 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
4533 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
4534 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
4535 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
4536 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
4537 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
4538 case Intrinsic::nvvm_tex_3d_v4u32_s32:
4539 case Intrinsic::nvvm_tex_3d_v4u32_f32:
4540 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
4541 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
4542 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
4543 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
4544 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
4545 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
4546 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
4547 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
4548 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
4549 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
4550 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
4551 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
4552 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
4553 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
4554 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
4555 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
4556 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
4557 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
4558 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
4559 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
4560 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
4561 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
4562 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
4563 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
4564 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
4565 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
4566 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
4567 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
4568 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
4569 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
4570 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
4571 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
4572 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
4573 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
4574 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
4575 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
4576 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
4577 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
4578 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
4579 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
4580 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
4581 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
4582 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
4583 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
4584 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
4585 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
4586 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
4587 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
4588 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
4589 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
4590 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
4591 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
4592 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
4593 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
4594 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
4595 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
4596 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
4597 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
4598 case Intrinsic::nvvm_tex_unified_cube_grad_v4s32_f32:
4599 case Intrinsic::nvvm_tex_unified_cube_grad_v4u32_f32:
4600 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4s32_f32:
4601 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4u32_f32:
4602 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
4603 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
4604 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
4605 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
4606 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
4607 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
4608 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
4609 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
4611 Info.memVT = MVT::v4i32;
4612 Info.ptrVal =
nullptr;
4615 Info.align =
Align(16);
4618 case Intrinsic::nvvm_suld_1d_i8_clamp:
4619 case Intrinsic::nvvm_suld_1d_v2i8_clamp:
4620 case Intrinsic::nvvm_suld_1d_v4i8_clamp:
4621 case Intrinsic::nvvm_suld_1d_array_i8_clamp:
4622 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
4623 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
4624 case Intrinsic::nvvm_suld_2d_i8_clamp:
4625 case Intrinsic::nvvm_suld_2d_v2i8_clamp:
4626 case Intrinsic::nvvm_suld_2d_v4i8_clamp:
4627 case Intrinsic::nvvm_suld_2d_array_i8_clamp:
4628 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
4629 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
4630 case Intrinsic::nvvm_suld_3d_i8_clamp:
4631 case Intrinsic::nvvm_suld_3d_v2i8_clamp:
4632 case Intrinsic::nvvm_suld_3d_v4i8_clamp:
4633 case Intrinsic::nvvm_suld_1d_i8_trap:
4634 case Intrinsic::nvvm_suld_1d_v2i8_trap:
4635 case Intrinsic::nvvm_suld_1d_v4i8_trap:
4636 case Intrinsic::nvvm_suld_1d_array_i8_trap:
4637 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
4638 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
4639 case Intrinsic::nvvm_suld_2d_i8_trap:
4640 case Intrinsic::nvvm_suld_2d_v2i8_trap:
4641 case Intrinsic::nvvm_suld_2d_v4i8_trap:
4642 case Intrinsic::nvvm_suld_2d_array_i8_trap:
4643 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
4644 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
4645 case Intrinsic::nvvm_suld_3d_i8_trap:
4646 case Intrinsic::nvvm_suld_3d_v2i8_trap:
4647 case Intrinsic::nvvm_suld_3d_v4i8_trap:
4648 case Intrinsic::nvvm_suld_1d_i8_zero:
4649 case Intrinsic::nvvm_suld_1d_v2i8_zero:
4650 case Intrinsic::nvvm_suld_1d_v4i8_zero:
4651 case Intrinsic::nvvm_suld_1d_array_i8_zero:
4652 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
4653 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
4654 case Intrinsic::nvvm_suld_2d_i8_zero:
4655 case Intrinsic::nvvm_suld_2d_v2i8_zero:
4656 case Intrinsic::nvvm_suld_2d_v4i8_zero:
4657 case Intrinsic::nvvm_suld_2d_array_i8_zero:
4658 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
4659 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
4660 case Intrinsic::nvvm_suld_3d_i8_zero:
4661 case Intrinsic::nvvm_suld_3d_v2i8_zero:
4662 case Intrinsic::nvvm_suld_3d_v4i8_zero:
4664 Info.memVT = MVT::i8;
4665 Info.ptrVal =
nullptr;
4668 Info.align =
Align(16);
4671 case Intrinsic::nvvm_suld_1d_i16_clamp:
4672 case Intrinsic::nvvm_suld_1d_v2i16_clamp:
4673 case Intrinsic::nvvm_suld_1d_v4i16_clamp:
4674 case Intrinsic::nvvm_suld_1d_array_i16_clamp:
4675 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
4676 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
4677 case Intrinsic::nvvm_suld_2d_i16_clamp:
4678 case Intrinsic::nvvm_suld_2d_v2i16_clamp:
4679 case Intrinsic::nvvm_suld_2d_v4i16_clamp:
4680 case Intrinsic::nvvm_suld_2d_array_i16_clamp:
4681 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
4682 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
4683 case Intrinsic::nvvm_suld_3d_i16_clamp:
4684 case Intrinsic::nvvm_suld_3d_v2i16_clamp:
4685 case Intrinsic::nvvm_suld_3d_v4i16_clamp:
4686 case Intrinsic::nvvm_suld_1d_i16_trap:
4687 case Intrinsic::nvvm_suld_1d_v2i16_trap:
4688 case Intrinsic::nvvm_suld_1d_v4i16_trap:
4689 case Intrinsic::nvvm_suld_1d_array_i16_trap:
4690 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
4691 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
4692 case Intrinsic::nvvm_suld_2d_i16_trap:
4693 case Intrinsic::nvvm_suld_2d_v2i16_trap:
4694 case Intrinsic::nvvm_suld_2d_v4i16_trap:
4695 case Intrinsic::nvvm_suld_2d_array_i16_trap:
4696 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
4697 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
4698 case Intrinsic::nvvm_suld_3d_i16_trap:
4699 case Intrinsic::nvvm_suld_3d_v2i16_trap:
4700 case Intrinsic::nvvm_suld_3d_v4i16_trap:
4701 case Intrinsic::nvvm_suld_1d_i16_zero:
4702 case Intrinsic::nvvm_suld_1d_v2i16_zero:
4703 case Intrinsic::nvvm_suld_1d_v4i16_zero:
4704 case Intrinsic::nvvm_suld_1d_array_i16_zero:
4705 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
4706 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
4707 case Intrinsic::nvvm_suld_2d_i16_zero:
4708 case Intrinsic::nvvm_suld_2d_v2i16_zero:
4709 case Intrinsic::nvvm_suld_2d_v4i16_zero:
4710 case Intrinsic::nvvm_suld_2d_array_i16_zero:
4711 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
4712 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
4713 case Intrinsic::nvvm_suld_3d_i16_zero:
4714 case Intrinsic::nvvm_suld_3d_v2i16_zero:
4715 case Intrinsic::nvvm_suld_3d_v4i16_zero:
4717 Info.memVT = MVT::i16;
4718 Info.ptrVal =
nullptr;
4721 Info.align =
Align(16);
4724 case Intrinsic::nvvm_suld_1d_i32_clamp:
4725 case Intrinsic::nvvm_suld_1d_v2i32_clamp:
4726 case Intrinsic::nvvm_suld_1d_v4i32_clamp:
4727 case Intrinsic::nvvm_suld_1d_array_i32_clamp:
4728 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
4729 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
4730 case Intrinsic::nvvm_suld_2d_i32_clamp:
4731 case Intrinsic::nvvm_suld_2d_v2i32_clamp:
4732 case Intrinsic::nvvm_suld_2d_v4i32_clamp:
4733 case Intrinsic::nvvm_suld_2d_array_i32_clamp:
4734 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
4735 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
4736 case Intrinsic::nvvm_suld_3d_i32_clamp:
4737 case Intrinsic::nvvm_suld_3d_v2i32_clamp:
4738 case Intrinsic::nvvm_suld_3d_v4i32_clamp:
4739 case Intrinsic::nvvm_suld_1d_i32_trap:
4740 case Intrinsic::nvvm_suld_1d_v2i32_trap:
4741 case Intrinsic::nvvm_suld_1d_v4i32_trap:
4742 case Intrinsic::nvvm_suld_1d_array_i32_trap:
4743 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
4744 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
4745 case Intrinsic::nvvm_suld_2d_i32_trap:
4746 case Intrinsic::nvvm_suld_2d_v2i32_trap:
4747 case Intrinsic::nvvm_suld_2d_v4i32_trap:
4748 case Intrinsic::nvvm_suld_2d_array_i32_trap:
4749 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
4750 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
4751 case Intrinsic::nvvm_suld_3d_i32_trap:
4752 case Intrinsic::nvvm_suld_3d_v2i32_trap:
4753 case Intrinsic::nvvm_suld_3d_v4i32_trap:
4754 case Intrinsic::nvvm_suld_1d_i32_zero:
4755 case Intrinsic::nvvm_suld_1d_v2i32_zero:
4756 case Intrinsic::nvvm_suld_1d_v4i32_zero:
4757 case Intrinsic::nvvm_suld_1d_array_i32_zero:
4758 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
4759 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
4760 case Intrinsic::nvvm_suld_2d_i32_zero:
4761 case Intrinsic::nvvm_suld_2d_v2i32_zero:
4762 case Intrinsic::nvvm_suld_2d_v4i32_zero:
4763 case Intrinsic::nvvm_suld_2d_array_i32_zero:
4764 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
4765 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
4766 case Intrinsic::nvvm_suld_3d_i32_zero:
4767 case Intrinsic::nvvm_suld_3d_v2i32_zero:
4768 case Intrinsic::nvvm_suld_3d_v4i32_zero:
4770 Info.memVT = MVT::i32;
4771 Info.ptrVal =
nullptr;
4774 Info.align =
Align(16);
4777 case Intrinsic::nvvm_suld_1d_i64_clamp:
4778 case Intrinsic::nvvm_suld_1d_v2i64_clamp:
4779 case Intrinsic::nvvm_suld_1d_array_i64_clamp:
4780 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
4781 case Intrinsic::nvvm_suld_2d_i64_clamp:
4782 case Intrinsic::nvvm_suld_2d_v2i64_clamp:
4783 case Intrinsic::nvvm_suld_2d_array_i64_clamp:
4784 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
4785 case Intrinsic::nvvm_suld_3d_i64_clamp:
4786 case Intrinsic::nvvm_suld_3d_v2i64_clamp:
4787 case Intrinsic::nvvm_suld_1d_i64_trap:
4788 case Intrinsic::nvvm_suld_1d_v2i64_trap:
4789 case Intrinsic::nvvm_suld_1d_array_i64_trap:
4790 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
4791 case Intrinsic::nvvm_suld_2d_i64_trap:
4792 case Intrinsic::nvvm_suld_2d_v2i64_trap:
4793 case Intrinsic::nvvm_suld_2d_array_i64_trap:
4794 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
4795 case Intrinsic::nvvm_suld_3d_i64_trap:
4796 case Intrinsic::nvvm_suld_3d_v2i64_trap:
4797 case Intrinsic::nvvm_suld_1d_i64_zero:
4798 case Intrinsic::nvvm_suld_1d_v2i64_zero:
4799 case Intrinsic::nvvm_suld_1d_array_i64_zero:
4800 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
4801 case Intrinsic::nvvm_suld_2d_i64_zero:
4802 case Intrinsic::nvvm_suld_2d_v2i64_zero:
4803 case Intrinsic::nvvm_suld_2d_array_i64_zero:
4804 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
4805 case Intrinsic::nvvm_suld_3d_i64_zero:
4806 case Intrinsic::nvvm_suld_3d_v2i64_zero:
4808 Info.memVT = MVT::i64;
4809 Info.ptrVal =
nullptr;
4812 Info.align =
Align(16);
4815 case Intrinsic::nvvm_tcgen05_ld_16x64b_x1:
4816 case Intrinsic::nvvm_tcgen05_ld_32x32b_x1:
4817 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x1: {
4819 Info.memVT = MVT::v1i32;
4820 Info.ptrVal =
I.getArgOperand(0);
4827 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
4828 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
4829 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
4830 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2: {
4832 Info.memVT = MVT::v2i32;
4833 Info.ptrVal =
I.getArgOperand(0);
4840 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
4841 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
4842 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
4843 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
4844 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4: {
4846 Info.memVT = MVT::v4i32;
4847 Info.ptrVal =
I.getArgOperand(0);
4854 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
4855 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
4856 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
4857 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
4858 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8: {
4860 Info.memVT = MVT::v8i32;
4861 Info.ptrVal =
I.getArgOperand(0);
4868 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
4869 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
4870 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
4871 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
4872 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16: {
4874 Info.memVT = MVT::v16i32;
4875 Info.ptrVal =
I.getArgOperand(0);
4882 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
4883 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
4884 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
4885 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
4886 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32: {
4888 Info.memVT = MVT::v32i32;
4889 Info.ptrVal =
I.getArgOperand(0);
4896 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
4897 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
4898 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
4899 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
4900 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64: {
4902 Info.memVT = MVT::v64i32;
4903 Info.ptrVal =
I.getArgOperand(0);
4910 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
4911 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
4912 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
4913 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
4914 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128: {
4916 Info.memVT = MVT::v128i32;
4917 Info.ptrVal =
I.getArgOperand(0);
4924 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
4925 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
4926 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1: {
4928 Info.memVT = MVT::i32;
4929 Info.ptrVal =
I.getArgOperand(0);
4936 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
4937 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
4938 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
4939 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2: {
4941 Info.memVT = MVT::v2i32;
4942 Info.ptrVal =
I.getArgOperand(0);
4949 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
4950 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
4951 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
4952 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
4953 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4: {
4955 Info.memVT = MVT::v4i32;
4956 Info.ptrVal =
I.getArgOperand(0);
4963 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
4964 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
4965 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
4966 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
4967 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8: {
4969 Info.memVT = MVT::v8i32;
4970 Info.ptrVal =
I.getArgOperand(0);
4977 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
4978 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
4979 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
4980 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
4981 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16: {
4983 Info.memVT = MVT::v16i32;
4984 Info.ptrVal =
I.getArgOperand(0);
4991 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
4992 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
4993 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
4994 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
4995 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32: {
4997 Info.memVT = MVT::v32i32;
4998 Info.ptrVal =
I.getArgOperand(0);
5005 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
5006 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
5007 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
5008 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
5009 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64: {
5011 Info.memVT = MVT::v64i32;
5012 Info.ptrVal =
I.getArgOperand(0);
5019 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
5020 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
5021 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
5022 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
5023 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128: {
5025 Info.memVT = MVT::v128i32;
5026 Info.ptrVal =
I.getArgOperand(0);
5032 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
5033 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
5034 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
5035 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
5036 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
5037 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
5038 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
5040 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
5041 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
5042 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
5043 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
5045 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift: {
5048 Info.memVT = MVT::v4i32;
5049 Info.ptrVal =
I.getArgOperand(0);
5052 Info.align =
Align(16);
5056 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
5057 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
5058 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
5059 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
5060 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
5061 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
5062 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
5063 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
5064 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
5066 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
5067 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
5069 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift: {
5072 Info.memVT = MVT::v8i32;
5073 Info.ptrVal =
I.getArgOperand(0);
5076 Info.align =
Align(16);
5094 const Align ABITypeAlign = std::min(
Align(128),
DL.getABITypeAlign(ArgTy));
5099 if (!
F || !
F->hasLocalLinkage() ||
5100 F->hasAddressTaken(
nullptr,
5104 return ABITypeAlign;
5107 return std::max(
Align(16), ABITypeAlign);
5114 Align ArgAlign = InitialAlign;
5129 ArgAlign = std::max(ArgAlign,
Align(4));
5139 std::string ParamName;
5144 ParamStr <<
"_vararg";
5146 ParamStr <<
"_param_" << Idx;
5198 if (Constraint.
size() == 1) {
5199 switch (Constraint[0]) {
5218std::pair<unsigned, const TargetRegisterClass *>
5222 if (Constraint.
size() == 1) {
5223 switch (Constraint[0]) {
5225 return std::make_pair(0U, &NVPTX::B1RegClass);
5228 return std::make_pair(0U, &NVPTX::B16RegClass);
5231 return std::make_pair(0U, &NVPTX::B32RegClass);
5235 return std::make_pair(0U, &NVPTX::B64RegClass);
5237 if (STI.getSmVersion() < 70)
5239 "supported for sm_70 and higher!");
5240 return std::make_pair(0U, &NVPTX::B128RegClass);
5270 return Const && Const->getZExtValue() == 0;
5302 if (M->getOpcode() !=
ISD::MUL || !M.getNode()->hasOneUse())
5310 ((ZeroOpNum == 1) ? N1 : MAD),
5311 ((ZeroOpNum == 1) ? MAD : N1));
5326 (
N->getFlags().hasAllowContract() &&
5339 int nonAddCount = 0;
5348 int orderNo =
N->getIROrder();
5354 if (orderNo - orderNo2 < 500)
5360 bool opIsLive =
false;
5369 int orderNo3 =
User->getIROrder();
5370 if (orderNo3 > orderNo) {
5378 int orderNo3 =
User->getIROrder();
5379 if (orderNo3 > orderNo) {
5414 EVT ElementVT =
N->getValueType(0);
5425 if (U.getValueType() == MVT::Glue || U.getValueType() == MVT::Other)
5427 if (U.getUser()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
5428 if (N->getOpcode() != ISD::LOAD)
5445 return !U.getUser()->use_empty();
5459 unsigned OldNumOutputs;
5460 switch (
LD->getOpcode()) {
5467 Operands.push_back(DCI.DAG.getIntPtrConstant(
5477 if (ElementVT != MVT::v2f32)
5488 const unsigned NewNumOutputs = OldNumOutputs * 2;
5491 NewVTs.append(
LD->value_begin() + OldNumOutputs,
LD->value_end());
5494 SDValue NewLoad = DCI.DAG.getMemIntrinsicNode(
5495 Opcode,
DL, DCI.DAG.getVTList(NewVTs),
Operands,
LD->getMemoryVT(),
5496 LD->getMemOperand());
5502 for (
unsigned I :
seq(OldNumOutputs))
5503 Results.push_back(DCI.DAG.getBuildVector(
5504 ElementVT,
DL, {NewLoad.getValue(I * 2), NewLoad.getValue(I * 2 + 1)}));
5509 return DCI.DAG.getMergeValues(
Results,
DL);
5524 unsigned Front,
unsigned Back) {
5531 EVT ElementVT =
N->getOperand(Front).getValueType();
5541 switch (
N->getOpcode()) {
5554 if (ElementVT != MVT::v2f32)
5568 for (
SDValue BV :
N->ops().drop_front(Front).drop_back(Back)) {
5574 if (!BV.hasOneUse())
5581 if (
Op.getOpcode() == ISD::BITCAST)
5582 Op =
Op.getOperand(0);
5586 Op->getOperand(0).getValueType() == MVT::i32)
5593 Operands.append({BV.getOperand(0), BV.getOperand(1)});
5595 Operands.append(
N->op_end() - Back,
N->op_end());
5599 ST->getMemoryVT(), ST->getMemOperand());
5610 if (!ST->getValue().getValueType().isSimple())
5623 if (!
N->getValueType(0).isSimple())
5643 if (VT.
isVector() || VT != MVT::i32)
5663 if (VT.
isVector() || !(VT == MVT::f32 || VT == MVT::f64))
5676 switch (MinMax2Opcode) {
5678 case ISD::FMAXIMUMNUM:
5681 case ISD::FMINIMUMNUM:
5696 unsigned PTXVersion,
unsigned SmVersion) {
5699 EVT VT =
N->getValueType(0);
5700 if (VT != MVT::f32 || PTXVersion < 88 || SmVersion < 100)
5705 unsigned MinMaxOp2 =
N->getOpcode();
5735 EVT VT =
N->getValueType(0);
5739 const SDValue &Num =
N->getOperand(0);
5740 const SDValue &Den =
N->getOperand(1);
5743 if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
5762 if (!
Op.hasOneUse())
5764 EVT ToVT =
N->getValueType(0);
5765 EVT FromVT =
Op.getValueType();
5766 if (!((ToVT == MVT::i32 && FromVT == MVT::i16) ||
5767 (ToVT == MVT::i64 && FromVT == MVT::i32)))
5774 unsigned ExtOpcode =
N->getOpcode();
5775 unsigned Opcode = 0;
5784 const auto ShiftAmt =
Op.getConstantOperandVal(1);
5807 EVT OrigVT =
Op.getOperand(0).getValueType();
5813 EVT OrigVT =
Op.getOperand(0).getValueType();
5840 IsSigned = (LHSSign ==
Signed);
5844 const APInt &Val = CI->getAPIntValue();
5846 return Val.
isIntN(OptSize);
5855 return LHSSign == RHSSign;
5865 EVT MulType =
N->getValueType(0);
5866 if (MulType != MVT::i32 && MulType != MVT::i64) {
5906 if (MulType == MVT::i32) {
5907 DemotedVT = MVT::i16;
5909 DemotedVT = MVT::i32;
5931 return Const && Const->getZExtValue() == 1;
5939 return Add->getOperand(1);
5942 return Add->getOperand(0);
5983 (ConstOpNo == 1) ?
X : NewMul,
5984 (ConstOpNo == 1) ? NewMul :
X);
5995 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
6045 unsigned int SmVersion) {
6046 EVT CCType =
N->getValueType(0);
6050 EVT AType =
A.getValueType();
6051 if (!(CCType == MVT::v2i1 && (AType == MVT::v2f16 || AType == MVT::v2bf16)))
6054 if (
A.getValueType() == MVT::v2bf16 && SmVersion < 90)
6065 DL, DCI.
DAG.
getVTList(MVT::i1, MVT::i1), {A, B, N->getOperand(2)});
6093 if (!(VectorBits == 16 || VectorBits == 32 || VectorBits == 64))
6098 if (!Index || Index->getZExtValue() == 0)
6113 if (EltVT != EltIVT)
6114 Result = DCI.
DAG.
getNode(ISD::BITCAST,
DL, EltVT, Result);
6116 if (EltVT !=
N->getValueType(0))
6126 if (VectorVT != MVT::v4i8)
6137 for (
int I = 0;
I < 4; ++
I) {
6156 auto VT =
N->getValueType(0);
6163 auto Op0 =
N->getOperand(0);
6164 auto Op1 =
N->getOperand(1);
6171 std::pair<SDValue *, uint64_t *> OpData[2] = {{&Op0, &Op0Bytes},
6177 for (
auto &[
Op, OpBytes] : OpData) {
6179 if (
Op->getOpcode() == ISD::BITCAST)
6180 *
Op =
Op->getOperand(0);
6183 Op->getOperand(0).getValueType() == MVT::i32))
6188 if (!
Op->hasOneUse())
6191 *
Op =
Op->getOperand(0);
6199 assert((*OpBytes == 0x10 || *OpBytes == 0x54) &&
6200 "PRMT selector values out of range");
6202 *
Op =
Op->getOperand(0);
6208 auto &DAG = DCI.
DAG;
6212 (Op1Bytes << 8) | Op0Bytes,
DL, DAG);
6221 assert(ASCN2->getDestAddressSpace() == ASCN1->getSrcAddressSpace());
6224 if (ASCN1->getDestAddressSpace() == ASCN2->getSrcAddressSpace())
6225 return ASCN2->getOperand(0);
6243 const auto GetSelector = [](
unsigned S0,
unsigned S1,
unsigned S2,
6245 return APInt(32, S0 | (
S1 << 4) | (S2 << 8) | (S3 << 12));
6250 return GetSelector(V, V + 1, V + 2, V + 3);
6252 return GetSelector(V, (V - 1) & 7, (V - 2) & 7, (V - 3) & 7);
6254 return GetSelector(V, V, V, V);
6256 return GetSelector(V, std::max(V, 1U), std::max(V, 2U), 3U);
6258 return GetSelector(0, std::min(V, 1U), std::min(V, 2U), V);
6260 unsigned V1 = (V & 1) << 1;
6261 return GetSelector(V1, V1 + 1, V1, V1 + 1);
6269 assert(
A.getBitWidth() == 32 &&
B.getBitWidth() == 32 &&
6270 Selector.
getBitWidth() == 32 &&
"PRMT must have i32 operands");
6274 APInt Result(32, 0);
6279 APInt Byte = BitField.extractBits(8, Idx * 8);
6281 Byte = Byte.ashr(8);
6282 Result.insertBits(Byte,
I * 8);
6297 N->getConstantOperandAPInt(1),
6298 N->getConstantOperandAPInt(2),
6299 N->getConstantOperandVal(3)),
6300 SDLoc(
N),
N->getValueType(0));
6315 switch (R.getOpcode()) {
6320 case ISD::BITCAST: {
6347 for (
auto &
Op : R->ops()) {
6361 R.getValueType(), V, R.getOperand(1));
6377 if (
Reg.getOpcode() != ISD::LOAD) {
6386 DAGCombinerInfo &DCI)
const {
6388 switch (
N->getOpcode()) {
6393 case ISD::ADDRSPACECAST:
6408 case ISD::FMAXIMUMNUM:
6409 case ISD::FMINIMUMNUM:
6411 STI.getSmVersion());
6444 EVT ToVT =
Op->getValueType(0);
6445 if (ToVT != MVT::v2i8) {
6472 case Intrinsic::nvvm_ldu_global_i:
6473 case Intrinsic::nvvm_ldu_global_f:
6474 case Intrinsic::nvvm_ldu_global_p: {
6475 EVT ResVT =
N->getValueType(0);
6487 bool NeedTrunc =
false;
6493 unsigned Opcode = 0;
6501 LdResVTs = DAG.
getVTList(EltVT, EltVT, MVT::Other);
6505 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
6518 OtherOps.
append(
N->op_begin() + 2,
N->op_end());
6528 for (
unsigned i = 0; i < NumElts; ++i) {
6546 "Custom handling of non-i8 ldu/ldg?");
6569 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
6570 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
6571 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
6572 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
6573 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
6574 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
6575 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
6576 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
6577 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
6578 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
6579 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
6580 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
6581 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
6582 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
6583 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
6584 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
6585 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
6586 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
6587 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
6588 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
6589 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
6590 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
6591 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
6592 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
6594 Results.push_back(Res->first);
6595 Results.push_back(Res->second);
6599 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4:
6600 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8:
6601 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16:
6602 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32:
6603 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64:
6604 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128:
6606 Results.push_back(Res->first);
6607 Results.push_back(Res->second);
6622 assert(
Reg.getValueType() == MVT::i128 &&
6623 "Custom lowering for CopyFromReg with 128-bit reg only");
6625 N->getValueType(2)};
6656 assert(
N->getValueType(0) == MVT::i128 &&
6657 "Custom lowering for atomic128 only supports i128");
6665 "Support for b128 atomics introduced in PTX ISA version 8.3 and "
6666 "requires target sm_90.",
6677 for (
const auto &
Op : AN->
ops().drop_front(2)) {
6685 unsigned Opcode =
N->getOpcode() == ISD::ATOMIC_SWAP
6692 {Result.getValue(0), Result.getValue(1)}));
6693 Results.push_back(Result.getValue(2));
6696void NVPTXTargetLowering::ReplaceNodeResults(
6698 switch (
N->getOpcode()) {
6716 case ISD::ATOMIC_CMP_SWAP:
6717 case ISD::ATOMIC_SWAP:
6729 if (Ty->isHalfTy() && STI.getSmVersion() >= 70 &&
6730 STI.getPTXVersion() >= 63)
6732 if (Ty->isBFloatTy() && STI.getSmVersion() >= 90 &&
6733 STI.getPTXVersion() >= 78)
6735 if (Ty->isFloatTy())
6737 if (Ty->isDoubleTy() && STI.hasAtomAddF64())
6743 assert(Ty->isIntegerTy() &&
"Ty should be integer at this point");
6763 if (STI.hasAtomBitwise64())
6784 if (STI.hasAtomMinMax64())
6823 STI.getMinCmpXchgSizeInBits() ||
6830 bool BitwidthSupportedAndIsSeqCst =
6833 STI.getMinCmpXchgSizeInBits();
6870 CASWidth < STI.getMinCmpXchgSizeInBits()))
6893 case ISD::VP_FP_TO_UINT:
6895 return ISD::VP_FP_TO_SINT;
6916 unsigned Mode =
Op.getConstantOperandVal(3);
6926 "PRMT must have i32 operands");
6935 KnownBits Byte = BitField.extractBits(8, Idx * 8);
6946 auto ExtType = LD->getConstantOperandVal(LD->getNumOperands() - 1);
6951 auto DestVT = LD->getValueType(0);
6952 if (DestVT.isVector())
6965 switch (
Op.getOpcode()) {
6992 APInt &Src = Idx < 4 ? DemandedLHS : DemandedRHS;
6993 unsigned ByteStart = (Idx % 4) * 8;
6995 Src.
setBit(ByteStart + 7);
6997 Src.setBits(ByteStart, ByteStart + 8);
7000 return {DemandedLHS, DemandedRHS};
7019 SDValue Op0 = PRMT.getOperand(0);
7020 SDValue Op1 = PRMT.getOperand(1);
7025 unsigned Mode = PRMT.getConstantOperandVal(3);
7030 const unsigned LeadingBytes =
DemandedBits.countLeadingZeros() / 8;
7031 const unsigned SelBits = (4 - LeadingBytes) * 4;
7032 if (Selector.
getLoBits(SelBits) ==
APInt(32, 0x3210).getLoBits(SelBits))
7034 if (Selector.
getLoBits(SelBits) ==
APInt(32, 0x7654).getLoBits(SelBits))
7047 if ((DemandedOp0 && DemandedOp0 != Op0) ||
7048 (DemandedOp1 && DemandedOp1 != Op1)) {
7049 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
7050 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
7062 switch (
Op.getOpcode()) {
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1.
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
static SDValue PerformVSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file contains the declarations of entities that describe floating point environment and related ...
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
mir Rename Register Operands
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
Register const TargetRegisterInfo * TRI
NVPTX address space definition.
static bool shouldConvertToIndirectCall(const CallBase *CB, const GlobalAddressSDNode *Func)
static SDValue combineADDRSPACECAST(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< bool > sched4reg("nvptx-sched4reg", cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false))
static SDValue lowerTcgen05St(SDValue Op, SelectionDAG &DAG)
static SDValue PerformEXTRACTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< NVPTX::DivPrecisionLevel > UsePrecDivF32("nvptx-prec-divf32", cl::Hidden, cl::desc("NVPTX Specific: Override the precision of the lowering for f32 fdiv"), cl::values(clEnumValN(NVPTX::DivPrecisionLevel::Approx, "0", "Use div.approx"), clEnumValN(NVPTX::DivPrecisionLevel::Full, "1", "Use div.full"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754, "2", "Use IEEE Compliant F32 div.rnd if available (default)"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754_NoFTZ, "3", "Use IEEE Compliant F32 div.rnd if available, no FTZ")), cl::init(NVPTX::DivPrecisionLevel::IEEE754))
static bool isConstOne(const SDValue &Operand)
static cl::opt< unsigned > FMAContractLevelOpt("nvptx-fma-level", cl::Hidden, cl::desc("NVPTX Specific: FMA contraction (0: don't do it" " 1: do it 2: do it aggressively"), cl::init(2))
static bool IsPTXVectorType(MVT VT)
static SDValue lowerLOADi1(LoadSDNode *LD, SelectionDAG &DAG)
static SDValue lowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG)
static MachinePointerInfo refinePtrAS(SDValue &Ptr, SelectionDAG &DAG, const DataLayout &DL, const TargetLowering &TL)
static SDValue lowerROT(SDValue Op, SelectionDAG &DAG)
static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, LLVMContext &Ctx, CallingConv::ID CallConv, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > &Offsets, uint64_t StartingOffset=0)
ComputePTXValueVTs - For the given Type Ty, returns the set of primitive legal-ish MVTs that compose ...
static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static void replaceAtomicSwap128(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI, SmallVectorImpl< SDValue > &Results)
static SDValue lowerSTOREVector(SDValue Op, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static SDValue lowerLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static void replaceProxyReg(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &Results)
static void ReplaceCopyFromReg_128(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static SDValue lowerCTLZCTPOP(SDValue Op, SelectionDAG &DAG)
static SDValue combineMADConstOne(SDValue X, SDValue Add, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static SDValue combinePRMT(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue combinePackingMovIntoStore(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned Front, unsigned Back)
Fold packing movs into a store.
static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static SDValue getBuildVectorizedValue(unsigned N, const SDLoc &dl, SelectionDAG &DAG, T GetElement)
static SDValue getExtractVectorizedValue(SDValue V, unsigned I, EVT VT, const SDLoc &dl, SelectionDAG &DAG)
static NVPTXISD::NodeType getMinMax3Opcode(unsigned MinMax2Opcode)
Get 3-input version of a 2-input min/max opcode.
static unsigned canMergeParamLoadStoresStartingAt(unsigned Idx, uint32_t AccessSize, const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment)
static EVT getVectorizedVT(EVT VT, unsigned N, LLVMContext &C)
static SDValue lowerIntrinsicWOChain(SDValue Op, SelectionDAG &DAG)
static SDValue PerformFMinMaxCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned PTXVersion, unsigned SmVersion)
PerformFMinMaxCombine - Combine (fmaxnum (fmaxnum a, b), c) into (fmaxnum3 a, b, c).
static SDValue combineMulWide(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue lowerIntrinsicWChain(SDValue Op, SelectionDAG &DAG)
static bool isConstZero(const SDValue &Operand)
static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG)
static SDValue LowerTcgen05MMADisableOutputLane(SDValue Op, SelectionDAG &DAG)
static bool IsMulWideOperandDemotable(SDValue Op, unsigned OptSize, OperandSignedness &S)
IsMulWideOperandDemotable - Checks if the provided DAG node is an operand that can be demoted to OptS...
static unsigned getTcgen05MMADisableOutputLane(unsigned IID)
static std::pair< APInt, APInt > getPRMTDemandedBits(const APInt &SelectorVal, const APInt &DemandedBits)
static APInt computePRMT(APInt A, APInt B, APInt Selector, unsigned Mode)
static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode)
static SDValue PerformREMCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue PerformMULCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI)
static void computeKnownBitsForPRMT(const SDValue Op, KnownBits &Known, const SelectionDAG &DAG, unsigned Depth)
static SDValue combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Fold unpacking movs into a load by increasing the number of return values.
static SDValue LowerClusterLaunchControlQueryCancel(SDValue Op, SelectionDAG &DAG)
static std::optional< std::pair< SDValue, SDValue > > lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset=false)
static SDValue lowerCvtRSIntrinsics(SDValue Op, SelectionDAG &DAG)
static std::optional< NVPTXISD::NodeType > getScalar3OpcodeForReduction(unsigned ReductionOpcode)
Get 3-input scalar reduction opcode.
static std::optional< std::pair< SDValue, SDValue > > replaceLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
replaceLoadVector - Convert vector loads into multi-output scalar loads.
static SDValue expandFSH64(SDValue A, SDValue B, SDValue ShiftAmount, SDLoc DL, unsigned Opcode, SelectionDAG &DAG)
static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS, unsigned OptSize, bool &IsSigned)
AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can be demoted to OptSize bits...
static SDValue TryMULWIDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply of M/2 bits that produces...
static SDValue lowerPrmtIntrinsic(SDValue Op, SelectionDAG &DAG)
static SDValue combineMulSelectConstOne(SDValue X, SDValue Select, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static SDValue buildTreeReduction(const SmallVector< SDValue > &Elements, EVT EltTy, ArrayRef< std::pair< unsigned, unsigned > > Ops, const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG)
Reduces the elements using the scalar operations provided.
static SDValue combineProxyReg(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SmallVector< unsigned, 16 > VectorizePTXValueVTs(const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment, bool IsVAArg=false)
static SDValue getPRMT(SDValue A, SDValue B, SDValue Selector, SDLoc DL, SelectionDAG &DAG, unsigned Mode=NVPTX::PTXPrmtMode::NONE)
static SDValue matchMADConstOnePattern(SDValue Add)
static SDValue correctParamType(SDValue V, EVT ExpectedVT, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, SDLoc dl)
static ISD::NodeType getExtOpcode(const ISD::ArgFlagsTy &Flags)
static cl::opt< bool > UsePrecSqrtF32("nvptx-prec-sqrtf32", cl::Hidden, cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."), cl::init(true))
static void computeKnownBitsForLoadV(const SDValue Op, KnownBits &Known)
static APInt getPRMTSelector(const APInt &Selector, unsigned Mode)
static EVT promoteScalarIntegerPTX(const EVT VT)
PromoteScalarIntegerPTX Used to make sure the arguments/returns are suitable for passing and promote ...
static SDValue simplifyDemandedBitsForPRMT(SDValue PRMT, const APInt &DemandedBits, SelectionDAG &DAG, const TargetLowering &TLI, unsigned Depth)
static SDValue lowerFREM(SDValue Op, SelectionDAG &DAG)
static SDValue canonicalizePRMTInput(SDValue Op, SelectionDAG &DAG)
static SDValue sinkProxyReg(SDValue R, SDValue Chain, TargetLowering::DAGCombinerInfo &DCI)
static SDValue lowerFSH(SDValue Op, SelectionDAG &DAG)
static SDValue PromoteBinOpToF32(SDNode *N, SelectionDAG &DAG)
static SDValue PerformSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned int SmVersion)
static std::optional< std::pair< unsigned int, MVT > > getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI, unsigned AddressSpace)
static cl::opt< bool > ForceMinByValParamAlign("nvptx-force-min-byval-param-align", cl::Hidden, cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval" " params of device functions."), cl::init(false))
static cl::opt< bool > UseApproxLog2F32("nvptx-approx-log2f32", cl::desc("NVPTX Specific: whether to use lg2.approx for log2"), cl::init(false))
Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it does NOT use lg2....
static SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG)
static SDValue combineLOAD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue combineSTORE(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue PerformSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
MachineInstr unsigned OpIdx
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This file describes how to lower LLVM code to machine code.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Class for arbitrary precision integers.
LLVM_ABI APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
bool slt(const APInt &RHS) const
Signed less than comparison.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
an instruction that atomically reads a memory location, combines it with another value,...
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
bool isFloatingPointOperation() const
BinOp getOperation() const
This is an SDNode representing atomic operations.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
FunctionType * getFunctionType() const
This class represents a function call, abstracting a target machine's calling convention.
const APInt & getAPIntValue() const
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Diagnostic information for unsupported feature in backend.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Common base class shared among various IRBuilders.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
MCSection * getDataSection() const
Instances of this class represent a uniqued identifier for a section in the current translation unit.
StringRef getName() const
getName - Get the symbol name.
static auto integer_fixedlen_vector_valuetypes()
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
static auto fp_fixedlen_vector_valuetypes()
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
EVT getMemoryVT() const
Return the type of the in-memory value.
static unsigned getFromTypeWidthForLoad(const MemSDNode *Mem)
bool hasAtomSwap128() const
bool hasF32x2Instructions() const
bool has256BitVectorLoadStore(unsigned AS) const
AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const override
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const NVPTXTargetMachine * nvTM
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
NVPTXTargetLowering(const NVPTXTargetMachine &TM, const NVPTXSubtarget &STI)
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, std::optional< unsigned > FirstVAArg, const CallBase &CB, unsigned UniqueCallSite) const
unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT, EVT ToVT) const override
bool useF32FTZ(const MachineFunction &MF) const
SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const
Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
std::string getParamName(const Function *F, int Idx) const
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
NVPTX::DivPrecisionLevel getDivF32Level(const MachineFunction &MF, const SDNode &N) const
bool shouldInsertFencesForAtomic(const Instruction *) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL) const
getFunctionParamOptimizedAlign - since function arguments are passed via .param space,...
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, EVT VT) const override
Return the ValueType of the result of SETCC operations.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Align getFunctionByValParamAlign(const Function *F, Type *ArgTy, Align InitialAlign, const DataLayout &DL) const
Helper for computing alignment of a device function byval parameter.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const
bool usePrecSqrtF32(const SDNode *N=nullptr) const
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
MCSection * SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const override
~NVPTXTargetObjectFile() override
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
bool hasOneUse() const
Return true if there is exactly one use of this node.
unsigned getIROrder() const
Return the node ordering.
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
iterator_range< user_iterator > users()
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
SectionKind - This is a simple POD value that classifies the properties of a section.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrNegativeOneBooleanContent
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
MCSymbol * getSymbol(const GlobalValue *GV) const
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
Type * getType() const
All values are typed, get the type of this value.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt pow(const APInt &X, int64_t N)
Compute X^N for N>=0.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ SSUBO
Same for subtraction.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
@ Bitcast
Perform the operation on a different, but equivalently sized type.
@ ADDRESS_SPACE_SHARED_CLUSTER
@ TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_IS_CANCELED
@ TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ CALL
This node represents a PTX call instruction.
@ TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ UNPACK_VECTOR
This node is the inverse of NVPTX::BUILD_VECTOR.
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y
@ TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ DeclareScalarParam
These nodes represent a parameter declaration.
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z
@ ATOMIC_CMP_SWAP_B128
These nodes are used to lower atomic instructions with i128 type.
@ BUILD_VECTOR
This node is similar to ISD::BUILD_VECTOR except that the output may be implicitly bitcast to a scala...
@ TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2
bool isPackedVectorTy(EVT VT)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
FunctionAddr VTableAddr Value
bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
bool isReleaseOrStronger(AtomicOrdering AO)
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
unsigned promoteScalarArgumentSize(unsigned size)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool shouldPassAsArray(Type *Ty)
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Sub
Subtraction of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
bool isKernelFunction(const Function &F)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Function * getMaybeBitcastedCallee(const CallBase *CB)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
ElementCount getVectorElementCount() const
bool is32BitVector() const
Return true if this is a 32-bit vector type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
void resetAll()
Resets the known state of all bits.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasAllowContract() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
bool isAfterLegalizeDAG() const
bool isBeforeLegalize() const
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)