50#include "llvm/IR/IntrinsicsNVPTX.h"
76#define DEBUG_TYPE "nvptx-lower"
86 cl::desc(
"NVPTX Specific: FMA contraction (0: don't do it"
87 " 1: do it 2: do it aggressively"),
93 "NVPTX Specific: Override the precision of the lowering for f32 fdiv"),
95 clEnumValN(NVPTX::DivPrecisionLevel::Approx,
"0",
"Use div.approx"),
96 clEnumValN(NVPTX::DivPrecisionLevel::Full,
"1",
"Use div.full"),
97 clEnumValN(NVPTX::DivPrecisionLevel::IEEE754,
"2",
98 "Use IEEE Compliant F32 div.rnd if available (default)"),
99 clEnumValN(NVPTX::DivPrecisionLevel::IEEE754_NoFTZ,
"3",
100 "Use IEEE Compliant F32 div.rnd if available, no FTZ")),
101 cl::init(NVPTX::DivPrecisionLevel::IEEE754));
105 cl::desc(
"NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
111 "nvptx-approx-log2f32",
112 cl::desc(
"NVPTX Specific: whether to use lg2.approx for log2"),
116 "nvptx-force-min-byval-param-align",
cl::Hidden,
117 cl::desc(
"NVPTX Specific: force 4-byte minimal alignment for byval"
118 " params of device functions."),
129 if (Flags.hasApproximateFuncs())
142 if (Flags.hasApproximateFuncs())
198static std::optional<std::pair<unsigned int, MVT>>
205 return {{4, MVT::i64}};
212 if (VectorVT == MVT::i128 || VectorVT == MVT::f128)
213 return {{2, MVT::i64}};
221 unsigned PackRegSize;
234 if (!CanLowerTo256Bit)
243 return std::pair(NumElts, EltVT);
250 if (!CanLowerTo256Bit)
268 if (!CanLowerTo256Bit)
274 return std::pair(NumElts, EltVT);
284 const unsigned NPerReg = PackRegSize / EltVT.
getSizeInBits();
305 for (
const auto [VT, Off] :
zip(TempVTs, TempOffsets)) {
311 if (VT.getScalarType() == MVT::i8) {
312 if (RegisterVT == MVT::i16)
313 RegisterVT = MVT::i8;
314 else if (RegisterVT == MVT::v2i16)
315 RegisterVT = MVT::v2i8;
317 assert(RegisterVT == MVT::v4i8 &&
318 "Expected v4i8, v2i16, or i16 for i8 RegisterVT");
325 for (
unsigned I :
seq(NumRegs)) {
346 if (V.getValueType() == VT) {
347 assert(
I == 0 &&
"Index must be 0 for scalar value");
364 return GetElement(0);
390 "Promotion is not suitable for scalars of size larger than 64-bits");
424 if (ParamAlignment < AccessSize)
427 if (Offsets[
Idx] & (AccessSize - 1))
430 EVT EltVT = ValueVTs[
Idx];
434 if (EltSize >= AccessSize)
437 unsigned NumElts = AccessSize / EltSize;
439 if (AccessSize != EltSize * NumElts)
443 if (
Idx + NumElts > ValueVTs.
size())
447 if (NumElts != 4 && NumElts != 2)
450 for (
unsigned j =
Idx + 1; j <
Idx + NumElts; ++j) {
452 if (ValueVTs[j] != EltVT)
456 if (Offsets[j] - Offsets[j - 1] != EltSize)
475 bool IsVAArg =
false) {
484 const auto GetNumElts = [&](
unsigned I) ->
unsigned {
485 for (
const unsigned AccessSize : {16, 8, 4, 2}) {
487 I, AccessSize, ValueVTs, Offsets, ParamAlignment);
488 assert((NumElts == 1 || NumElts == 2 || NumElts == 4) &&
489 "Unexpected vectorization size");
497 for (
unsigned I = 0, E = ValueVTs.
size();
I != E;) {
498 const unsigned NumElts = GetNumElts(
I);
502 assert(std::accumulate(VectorInfo.
begin(), VectorInfo.
end(), 0u) ==
510 :
TargetLowering(TM), nvTM(&TM), STI(STI), GlobalUniqueCallSite(0) {
561 Op, VT, IsOpSupported ? Action : NoBF16Action);
566 bool IsOpSupported =
false;
662 for (
MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
663 MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16,
664 MVT::v4i8, MVT::i32, MVT::i64}) {
693 {MVT::i8, MVT::i16, MVT::v2i16, MVT::i32, MVT::i64},
714 for (
MVT ValVT : FloatVTs) {
715 for (
MVT MemVT : FloatVTs) {
727 for (
MVT ValVT : IntVTs)
728 for (
MVT MemVT : IntVTs)
755 if (!
isTypeLegal(VT) && VT.getStoreSizeInBits() <= 256)
792 {MVT::i16, MVT::i32, MVT::i64},
Legal);
858 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
885 for (
const auto &VT : {MVT::bf16, MVT::v2bf16}) {
893 const bool IsFP16FP16x2NegAvailable = STI.
getSmVersion() >= 53 &&
896 for (
const auto &VT : {MVT::f16, MVT::v2f16})
923 for (
MVT VT : {MVT::bf16, MVT::f32, MVT::f64}) {
937 for (
MVT VT : {MVT::i1, MVT::i16, MVT::i32, MVT::i64}) {
966 for (
const auto &
Op :
994 for (
const auto &
Op :
1006 bool SupportsF32MinMaxNaN =
1063 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1064 MVT::v32i32, MVT::v64i32, MVT::v128i32},
1069 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1070 MVT::v32i32, MVT::v64i32, MVT::v128i32},
1083#define MAKE_CASE(V) \
1150 bool Reciprocal)
const {
1171 if (Reciprocal || ExtraSteps > 0) {
1173 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
1174 : Intrinsic::nvvm_rsqrt_approx_f);
1175 else if (VT == MVT::f64)
1176 return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
1181 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
1182 : Intrinsic::nvvm_sqrt_approx_f);
1190 DAG.
getConstant(Intrinsic::nvvm_rcp_approx_ftz_d,
DL, MVT::i32),
1191 MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
1199 std::optional<unsigned> FirstVAArg,
const CallBase &CB,
1200 unsigned UniqueCallSite)
const {
1203 std::string Prototype;
1205 O <<
"prototype_" << UniqueCallSite <<
" : .callprototype ";
1207 if (
RetTy->isVoidTy()) {
1212 const Align RetAlign = getArgumentAlignment(&CB,
RetTy, 0,
DL);
1213 O <<
".param .align " << RetAlign.
value() <<
" .b8 _["
1214 <<
DL.getTypeAllocSize(
RetTy) <<
"]";
1215 }
else if (
RetTy->isFloatingPointTy() ||
RetTy->isIntegerTy()) {
1217 if (
auto *ITy = dyn_cast<IntegerType>(
RetTy)) {
1218 size = ITy->getBitWidth();
1221 "Floating point type expected here");
1229 O <<
".param .b" <<
size <<
" _";
1230 }
else if (isa<PointerType>(
RetTy)) {
1231 O <<
".param .b" << PtrVT.getSizeInBits() <<
" _";
1241 const unsigned NumArgs = FirstVAArg.value_or(Args.size());
1243 for (
const unsigned I :
llvm::seq(NumArgs)) {
1244 const auto ArgOuts =
1245 AllOuts.take_while([
I](
auto O) {
return O.OrigArgIndex ==
I; });
1246 AllOuts = AllOuts.drop_front(ArgOuts.size());
1248 Type *Ty = Args[
I].Ty;
1254 if (ArgOuts[0].Flags.isByVal()) {
1257 Type *ETy = Args[
I].IndirectType;
1258 Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1259 Align ParamByValAlign =
1262 O <<
".param .align " << ParamByValAlign.
value() <<
" .b8 _["
1263 << ArgOuts[0].Flags.getByValSize() <<
"]";
1268 O <<
".param .align " << ParamAlign.
value() <<
" .b8 _["
1269 <<
DL.getTypeAllocSize(Ty) <<
"]";
1274 (
getValueType(
DL, Ty) == MVT::i8 && ArgOuts[0].VT == MVT::i16)) &&
1275 "type mismatch between callee prototype and arguments");
1278 if (
auto *ITy = dyn_cast<IntegerType>(Ty)) {
1280 }
else if (isa<PointerType>(Ty)) {
1281 sz = PtrVT.getSizeInBits();
1285 O <<
".param .b" << sz <<
" _";
1290 O << (first ?
"" :
",") <<
" .param .align "
1310 return DL.getABITypeAlign(Ty);
1315 if (!DirectCallee) {
1320 if (
const auto *CI = dyn_cast<CallInst>(CB)) {
1323 return StackAlign.value();
1334 return DL.getABITypeAlign(Ty);
1341 if (
auto *CalleeFunc = dyn_cast<Function>(Func->getGlobal()))
1360 const auto *ASC = cast<AddrSpaceCastSDNode>(
Ptr);
1362 Ptr = ASC->getOperand(0);
1381 const EVT ActualVT = V.getValueType();
1382 assert((ActualVT == ExpectedVT ||
1384 "Non-integer argument type size mismatch");
1385 if (ExpectedVT.
bitsGT(ActualVT))
1387 if (ExpectedVT.
bitsLT(ActualVT))
1398 "Support for variadic functions (unsized array parameter) introduced "
1399 "in PTX ISA version 6.0 and requires target sm_30.");
1411 const auto GetI32 = [&](
const unsigned I) {
1415 const unsigned UniqueCallSite = GlobalUniqueCallSite++;
1423 const auto MakeDeclareScalarParam = [&](
SDValue Symbol,
unsigned Size) {
1429 {StartChain, Symbol, GetI32(SizeBits), DeclareGlue});
1430 CallPrereqs.push_back(Declare);
1439 {StartChain, Symbol, GetI32(
Align.
value()), GetI32(
Size), DeclareGlue});
1440 CallPrereqs.push_back(Declare);
1461 "Non-VarArg function with extra arguments");
1464 unsigned VAOffset = 0;
1466 const SDValue VADeclareParam =
1467 CLI.
Args.size() > FirstVAArg
1468 ? MakeDeclareArrayParam(getCallParamSymbol(DAG, FirstVAArg, MVT::i32),
1483 assert(AllOuts.size() == AllOutVals.size() &&
1484 "Outs and OutVals must be the same size");
1488 const auto ArgI = E.index();
1489 const auto Arg = E.value();
1490 const auto ArgOuts =
1491 AllOuts.take_while([&](
auto O) {
return O.OrigArgIndex == ArgI; });
1492 const auto ArgOutVals = AllOutVals.take_front(ArgOuts.size());
1493 AllOuts = AllOuts.drop_front(ArgOuts.size());
1494 AllOutVals = AllOutVals.drop_front(ArgOuts.size());
1496 const bool IsVAArg = (ArgI >= FirstVAArg);
1497 const bool IsByVal = Arg.IsByVal;
1500 getCallParamSymbol(DAG, IsVAArg ? FirstVAArg : ArgI, MVT::i32);
1502 assert((!IsByVal || Arg.IndirectType) &&
1503 "byval arg must have indirect type");
1504 Type *ETy = (IsByVal ? Arg.IndirectType : Arg.Ty);
1506 const Align ArgAlign = [&]() {
1511 const Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1515 return getArgumentAlignment(CB, Arg.Ty, ArgI + 1,
DL);
1518 const unsigned TySize =
DL.getTypeAllocSize(ETy);
1519 assert((!IsByVal || TySize == ArgOuts[0].Flags.getByValSize()) &&
1520 "type size mismatch");
1522 const SDValue ArgDeclare = [&]() {
1524 return VADeclareParam;
1527 return MakeDeclareArrayParam(ParamSymbol, ArgAlign, TySize);
1529 assert(ArgOuts.size() == 1 &&
"We must pass only one value as non-array");
1530 assert((ArgOuts[0].VT.isInteger() || ArgOuts[0].VT.isFloatingPoint()) &&
1531 "Only int and float types are supported as non-array arguments");
1533 return MakeDeclareScalarParam(ParamSymbol, TySize);
1537 assert(ArgOutVals.size() == 1 &&
"We must pass only one value as byval");
1538 SDValue SrcPtr = ArgOutVals[0];
1539 const auto PointerInfo =
refinePtrAS(SrcPtr, DAG,
DL, *
this);
1540 const Align BaseSrcAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1543 VAOffset =
alignTo(VAOffset, ArgAlign);
1551 for (
const unsigned NumElts : VI) {
1556 DAG.
getLoad(LoadVT, dl, CallChain, SrcAddr, PointerInfo, SrcAlign);
1558 TypeSize ParamOffset = Offsets[J].getWithIncrement(VAOffset);
1563 DAG.
getStore(ArgDeclare, dl, SrcLoad, ParamAddr,
1565 CallPrereqs.push_back(StoreParam);
1576 assert(VTs.
size() == Offsets.size() &&
"Size mismatch");
1577 assert(VTs.
size() == ArgOuts.size() &&
"Size mismatch");
1583 const bool ExtendIntegerParam =
1584 Arg.Ty->isIntegerTy() &&
DL.getTypeAllocSizeInBits(Arg.Ty) < 32;
1586 const auto GetStoredValue = [&](
const unsigned I) {
1590 "OutVal type should always be legal");
1594 ExtendIntegerParam ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1601 for (
const unsigned NumElts : VI) {
1609 "Vectorization should be disabled for vaargs.");
1615 const EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : EltVT;
1618 assert(VAOffset == 0 &&
"VAOffset must be 0 for non-VA args");
1625 const MaybeAlign CurrentAlign = ExtendIntegerParam
1631 return GetStoredValue(J + K);
1637 CallPrereqs.push_back(StoreParam);
1647 const unsigned ResultSize =
DL.getTypeAllocSize(
RetTy);
1649 const Align RetAlign = getArgumentAlignment(CB,
RetTy, 0,
DL);
1650 MakeDeclareArrayParam(RetSymbol, RetAlign, ResultSize);
1652 MakeDeclareScalarParam(RetSymbol, ResultSize);
1658 if (VADeclareParam) {
1661 VADeclareParam.
getOperand(2), GetI32(VAOffset),
1664 VADeclareParam->
getVTList(), DeclareParamOps);
1667 const auto *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
1675 const bool IsIndirectCall = (!Func && CB) || ConvertToIndirectCall;
1677 if (isa<ExternalSymbolSDNode>(Callee)) {
1682 assert(CalleeFunc !=
nullptr &&
"Libcall callee must be set.");
1686 CalleeFunc->
addFnAttr(
"nvptx-libcall-callee",
"true");
1689 if (IsIndirectCall) {
1700 HasVAArgs ? std::optional(FirstVAArg) : std::nullopt, *CB,
1706 CallPrereqs.push_back(PrototypeDeclare);
1709 const unsigned Proto = IsIndirectCall ? UniqueCallSite : 0;
1710 const unsigned NumArgs =
1717 {CallToken, GetI32(CLI.
IsConvergent), GetI32(IsIndirectCall),
1718 GetI32(Ins.empty() ? 0 : 1), GetI32(NumArgs), Callee, GetI32(Proto)});
1726 assert(VTs.
size() == Ins.size() &&
"Bad value decomposition");
1728 const Align RetAlign = getArgumentAlignment(CB,
RetTy, 0,
DL);
1734 const bool ExtendIntegerRetVal =
1735 RetTy->isIntegerTy() &&
DL.getTypeAllocSizeInBits(
RetTy) < 32;
1739 for (
const unsigned NumElts : VI) {
1741 ExtendIntegerRetVal ?
MaybeAlign(std::nullopt)
1746 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1755 LoadChains.push_back(R.getValue(1));
1756 for (
const unsigned J :
llvm::seq(NumElts))
1764 UniqueCallSite + 1,
SDValue(), dl);
1790 "Support for dynamic alloca introduced in PTX ISA version 7.3 and "
1791 "requires target sm_52.",
1830 "Support for stackrestore requires PTX ISA version >= 7.3 and target "
1833 return Op.getOperand(0);
1852 "Support for stacksave requires PTX ISA version >= 7.3 and target >= "
1876 unsigned NumOperands = Node->getNumOperands();
1877 for (
unsigned i = 0; i < NumOperands; ++i) {
1878 SDValue SubOp = Node->getOperand(i);
1882 for (
unsigned j = 0; j < NumSubElem; ++j) {
1893 assert(
A.getValueType() == MVT::i32 &&
B.getValueType() == MVT::i32 &&
1894 Selector.
getValueType() == MVT::i32 &&
"PRMT must have i32 operands");
1912 ArrayRef<std::pair<unsigned /*NodeType*/, unsigned /*NumInputs*/>> Ops,
1918 while (Level.size() > 1) {
1920 const auto [
Op, NumInputs] = Ops[
OpIdx];
1924 unsigned I = 0, E = Level.size();
1925 for (;
I + NumInputs <= E;
I += NumInputs) {
1934 if (ReducedLevel.
empty()) {
1950 Level = ReducedLevel;
1953 return *Level.
begin();
1958 switch (ReductionOpcode) {
1973static std::optional<NVPTXISD::NodeType>
1975 switch (ReductionOpcode) {
1985 return std::nullopt;
1998 const unsigned Opcode =
Op->getOpcode();
1999 const EVT EltTy =
Vector.getValueType().getVectorElementType();
2002 const bool CanUseMinMax3 =
2010 SmallVector<std::pair<
unsigned ,
unsigned >, 2> ScalarOps;
2013 CanUseMinMax3 && Opcode3Elem)
2026 EVT FromVT =
Op->getOperand(0)->getValueType(0);
2027 if (FromVT != MVT::v2i8) {
2043 EVT ToVT =
Op->getValueType(0);
2053 EVT VT =
Op->getValueType(0);
2059 return Operand->isUndef() || isa<ConstantSDNode>(Operand) ||
2060 isa<ConstantFPSDNode>(Operand);
2062 if (VT != MVT::v4i8)
2074 return getPRMT(L, R, SelectionValue,
DL, DAG);
2076 auto PRMT__10 = GetPRMT(
Op->getOperand(0),
Op->getOperand(1),
true, 0x3340);
2077 auto PRMT__32 = GetPRMT(
Op->getOperand(2),
Op->getOperand(3),
true, 0x3340);
2078 auto PRMT3210 = GetPRMT(PRMT__10, PRMT__32,
false, 0x5410);
2085 EVT VT =
Op->getValueType(0);
2087 return APInt(32, 0);
2089 if (VT == MVT::v2f16 || VT == MVT::v2bf16)
2090 Value = cast<ConstantFPSDNode>(Operand)->getValueAPF().bitcastToAPInt();
2091 else if (VT == MVT::v2i16 || VT == MVT::v4i8)
2097 if (VT == MVT::v4i8)
2099 return Value.zext(32);
2117 assert(32 % NumElements == 0 &&
"must evenly divide bit length");
2118 const unsigned ShiftAmount = 32 / NumElements;
2119 for (
unsigned ElementNo :
seq(NumElements))
2120 Value |= GetOperand(
Op, ElementNo).shl(ElementNo * ShiftAmount);
2132 if (VectorVT == MVT::v4i8) {
2140 Flags.setNoSignedWrap(
Ext.getScalarValueSizeInBits() > 8);
2141 Flags.setNoUnsignedWrap(
Ext.getScalarValueSizeInBits() >= 8);
2142 Ext->setFlags(Flags);
2147 if (isa<ConstantSDNode>(
Index.getNode()))
2169 if (VectorVT != MVT::v4i8)
2173 if (
Value->isUndef())
2192 if (VectorVT != MVT::v4i8 ||
Op.getValueType() != MVT::v4i8)
2200 if (
I.value() != -1)
2201 Selector |= (
I.value() << (
I.index() * 4));
2219 EVT VT =
Op.getValueType();
2280 EVT VT =
Op.getValueType();
2334 EVT VT =
Op.getValueType();
2348 EVT VT =
Op.getValueType();
2351 return LowerFROUND32(
Op, DAG);
2354 return LowerFROUND64(
Op, DAG);
2370 EVT VT =
Op.getValueType();
2376 const unsigned SignBitMask = 0x80000000;
2379 const unsigned PointFiveInBits = 0x3F000000;
2380 SDValue PointFiveWithSignRaw =
2411 EVT VT =
Op.getValueType();
2440 EVT VT =
N->getValueType(0);
2464 if (
Op.getValueType() == MVT::bf16) {
2468 DAG.
getNode(
Op.getOpcode(), Loc, MVT::f32,
Op.getOperand(0)),
2480 if (
Op.getOperand(0).getValueType() == MVT::bf16) {
2483 Op.getOpcode(), Loc,
Op.getValueType(),
2493 EVT NarrowVT =
Op.getValueType();
2530 EVT WideVT =
Op.getValueType();
2557 if (
Op.getValueType() != MVT::v2i16)
2559 EVT EltVT =
Op.getValueType().getVectorElementType();
2561 for (
int I = 0, E =
Op.getValueType().getVectorNumElements();
I < E;
I++) {
2564 [&](
const SDUse &O) {
2565 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
2566 O.get(), DAG.getIntPtrConstant(I, DL));
2581 for (
size_t I = 0;
I <
N->getNumOperands();
I++) {
2598 return Tcgen05StNode;
2606 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.
getNode())->getZExtValue();
2610 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
2611 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
2612 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
2613 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
2614 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
2615 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
2616 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
2617 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
2618 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
2619 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
2620 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
2621 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
2622 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
2623 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
2624 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
2625 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
2626 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
2627 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
2628 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
2629 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
2630 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1:
2631 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2:
2632 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4:
2633 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8:
2634 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16:
2635 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32:
2636 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64:
2637 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128:
2638 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
2639 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
2640 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
2641 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
2642 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
2643 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
2644 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
2645 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
2646 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
2656 if (
N->getOperand(1).getValueType() != MVT::i128) {
2662 cast<ConstantSDNode>(
N->getOperand(0).getNode())->getZExtValue();
2663 auto Opcode = [&]() {
2665 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
2667 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
2669 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
2671 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
2679 SDValue TryCancelResponse =
N->getOperand(1);
2688 return DAG.
getNode(Opcode,
DL,
N->getVTList(),
2689 {TryCancelResponse0, TryCancelResponse1});
2693 const unsigned Mode = [&]() {
2694 switch (
Op->getConstantOperandVal(0)) {
2695 case Intrinsic::nvvm_prmt:
2697 case Intrinsic::nvvm_prmt_b4e:
2699 case Intrinsic::nvvm_prmt_ecl:
2701 case Intrinsic::nvvm_prmt_ecr:
2703 case Intrinsic::nvvm_prmt_f4e:
2705 case Intrinsic::nvvm_prmt_rc16:
2707 case Intrinsic::nvvm_prmt_rc8:
2721 switch (
Op->getConstantOperandVal(0)) {
2724 case Intrinsic::nvvm_prmt:
2725 case Intrinsic::nvvm_prmt_b4e:
2726 case Intrinsic::nvvm_prmt_ecl:
2727 case Intrinsic::nvvm_prmt_ecr:
2728 case Intrinsic::nvvm_prmt_f4e:
2729 case Intrinsic::nvvm_prmt_rc16:
2730 case Intrinsic::nvvm_prmt_rc8:
2732 case Intrinsic::nvvm_internal_addrspace_wrap:
2733 return Op.getOperand(1);
2734 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
2735 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
2736 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
2737 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
2747 assert(V.getValueType() == MVT::i64 &&
2748 "Unexpected CTLZ/CTPOP type to legalize");
2757 assert(
A.getValueType() == MVT::i64 &&
B.getValueType() == MVT::i64);
2759 const auto *AmtConst = dyn_cast<ConstantSDNode>(ShiftAmount);
2762 const auto Amt = AmtConst->getZExtValue() & 63;
2789 ? std::make_tuple(AHi, ALo, BHi)
2790 : std::make_tuple(ALo, BHi, BLo);
2817 EVT Ty =
Op.getValueType();
2827 if (Flags.hasNoInfs())
2839 assert(
Op.getValueType() == MVT::i1 &&
"Custom lowering enabled only for i1");
2849 TrueVal = TrueVal.getOperand(0);
2850 FalseVal = FalseVal.getOperand(0);
2852 EVT VT = TrueVal.getSimpleValueType().bitsLE(FalseVal.getSimpleValueType())
2853 ? TrueVal.getValueType()
2854 : FalseVal.getValueType();
2874 switch (
Op.getOpcode()) {
2880 return LowerADDRSPACECAST(
Op, DAG);
2888 return LowerBUILD_VECTOR(
Op, DAG);
2890 return LowerBITCAST(
Op, DAG);
2894 return LowerEXTRACT_VECTOR_ELT(
Op, DAG);
2896 return LowerINSERT_VECTOR_ELT(
Op, DAG);
2898 return LowerVECTOR_SHUFFLE(
Op, DAG);
2900 return LowerCONCAT_VECTORS(
Op, DAG);
2905 return LowerVECREDUCE(
Op, DAG);
2907 return LowerSTORE(
Op, DAG);
2909 return LowerLOAD(
Op, DAG);
2911 return LowerShiftLeftParts(
Op, DAG);
2914 return LowerShiftRightParts(
Op, DAG);
2918 return LowerFROUND(
Op, DAG);
2920 return LowerFCOPYSIGN(
Op, DAG);
2923 return LowerINT_TO_FP(
Op, DAG);
2926 return LowerFP_TO_INT(
Op, DAG);
2928 return LowerFP_ROUND(
Op, DAG);
2930 return LowerFP_EXTEND(
Op, DAG);
2932 return LowerBR_JT(
Op, DAG);
2934 return LowerVAARG(
Op, DAG);
2936 return LowerVASTART(
Op, DAG);
2962 return LowerCopyToReg_128(
Op, DAG);
2967 return PromoteBinOpIfF32FTZ(
Op, DAG);
2982 const auto *JT = cast<JumpTableSDNode>(
Op.getOperand(1));
2985 unsigned JId = JT->getIndex();
3017 unsigned SrcAS =
N->getSrcAddressSpace();
3018 unsigned DestAS =
N->getDestAddressSpace();
3028 const MVT GenerictVT =
3032 SDValue SharedClusterConversion =
3035 return SharedClusterConversion;
3051 const Value *
V = cast<SrcValueSDNode>(
Node->getOperand(2))->getValue();
3052 EVT VT =
Node->getValueType(0);
3094 SDValue VAReg = getParamSymbol(DAG, -1, PtrVT);
3096 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3097 return DAG.
getStore(
Op.getOperand(0),
DL, VAReg,
Op.getOperand(1),
3102static std::optional<std::pair<SDValue, SDValue>>
3105 const EVT ResVT = LD->getValueType(0);
3106 const EVT MemVT = LD->getMemoryVT();
3111 return std::nullopt;
3113 const auto NumEltsAndEltVT =
3115 if (!NumEltsAndEltVT)
3116 return std::nullopt;
3117 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3119 Align Alignment = LD->getAlign();
3122 if (Alignment < PrefAlign) {
3128 return std::nullopt;
3139 return std::nullopt;
3151 ListVTs.push_back(MVT::Other);
3164 LD->getMemOperand());
3173 for (
const unsigned I :
llvm::seq(NumElts)) {
3178 for (
const unsigned I :
llvm::seq(NumElts)) {
3180 if (LoadEltVT != EltVT)
3188 const MVT BuildVecVT =
3200 Results.append({Res->first, Res->second});
3217 assert(LD->getValueType(0) == MVT::i1 &&
"Custom lowering for i1 load only");
3219 LD->getBasePtr(), LD->getPointerInfo(),
3220 MVT::i8, LD->getAlign(),
3221 LD->getMemOperand()->getFlags());
3232 if (
Op.getValueType() == MVT::i1)
3239 assert(
LD->getValueType(0).isInteger() &&
LD->getMemoryVT().isInteger() &&
3240 "Unexpected fpext-load");
3242 LD->getChain(),
LD->getBasePtr(),
LD->getMemoryVT(),
3243 LD->getMemOperand());
3255 const EVT MemVT =
N->getMemoryVT();
3262 const auto NumEltsAndEltVT =
3264 if (!NumEltsAndEltVT)
3266 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3270 Align Alignment =
N->getAlign();
3272 if (Alignment < PrefAlign) {
3309 for (
const unsigned I :
llvm::seq(NumElts)) {
3312 NumEltsPerSubVector);
3317 for (
const unsigned I :
llvm::seq(NumElts)) {
3332 Ops.
append(
N->op_begin() + 2,
N->op_end());
3336 N->getMemoryVT(),
N->getMemOperand());
3347 return LowerSTOREi1(
Op, DAG);
3368 DAG.
getTruncStore(Tmp1, dl, Tmp3, Tmp2,
ST->getPointerInfo(), MVT::i8,
3369 ST->getAlign(),
ST->getMemOperand()->getFlags());
3378 assert(
Op.getOperand(1).getValueType() == MVT::i128 &&
3379 "Custom lowering for 128-bit CopyToReg only");
3393 NewOps[0] =
Op->getOperand(0);
3394 NewOps[1] =
Op->getOperand(1);
3398 NewOps[4] =
Op->getOperand(3);
3403unsigned NVPTXTargetLowering::getNumRegisters(
3405 std::optional<MVT> RegisterVT = std::nullopt)
const {
3406 if (VT == MVT::i128 && RegisterVT == MVT::i128)
3411bool NVPTXTargetLowering::splitValueIntoRegisterParts(
3413 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID> CC)
const {
3414 if (Val.
getValueType() == MVT::i128 && NumParts == 1) {
3462 for (
const auto &Arg :
F.args()) {
3463 const auto ArgIns = AllIns.take_while(
3464 [&](
auto I) {
return I.OrigArgIndex == Arg.getArgNo(); });
3465 AllIns = AllIns.drop_front(ArgIns.size());
3467 Type *Ty = Arg.getType();
3472 if (Arg.use_empty()) {
3474 for (
const auto &In : ArgIns) {
3475 assert(!In.Used &&
"Arg.use_empty() is true but Arg is used?");
3481 SDValue ArgSymbol = getParamSymbol(DAG, Arg.getArgNo(), PtrVT);
3487 if (Arg.hasByValAttr()) {
3495 assert(ArgIns.size() == 1 &&
"ByVal argument must be a pointer");
3496 const auto &ByvalIn = ArgIns[0];
3498 "Ins type did not match function type");
3499 assert(ByvalIn.VT == PtrVT &&
"ByVal argument must be a pointer");
3504 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3507 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3516 assert(VTs.
size() == ArgIns.size() &&
"Size mismatch");
3517 assert(VTs.
size() == Offsets.size() &&
"Size mismatch");
3524 for (
const unsigned NumElts : VI) {
3526 const EVT LoadVT = VTs[
I] == MVT::i1 ? MVT::i8 : VTs[
I];
3534 DAG.
getLoad(VecVT, dl, Root, VecAddr,
3538 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3539 for (
const unsigned J :
llvm::seq(NumElts)) {
3551 if (!OutChains.
empty())
3566 if (
RetTy->isVoidTy()) {
3567 assert(OutVals.
empty() && Outs.
empty() &&
"Return value expected for void");
3580 const bool ExtendIntegerRetVal =
3581 RetTy->isIntegerTy() &&
DL.getTypeAllocSizeInBits(
RetTy) < 32;
3586 assert(VTs.
size() == OutVals.
size() &&
"Bad return value decomposition");
3588 const auto GetRetVal = [&](
unsigned I) ->
SDValue {
3592 "OutVal type should always be legal");
3596 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
3602 for (
const unsigned NumElts : VI) {
3603 const MaybeAlign CurrentAlign = ExtendIntegerRetVal
3608 NumElts, dl, DAG, [&](
unsigned K) {
return GetRetVal(
I + K); });
3625 if (Constraint.
size() > 1)
3638 switch (Intrinsic) {
3641 case Intrinsic::nvvm_match_all_sync_i32p:
3642 case Intrinsic::nvvm_match_all_sync_i64p:
3647 Info.memVT = MVT::i1;
3652 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col:
3653 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row:
3654 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride:
3655 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride:
3656 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col:
3657 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row:
3658 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride:
3659 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride:
3660 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col:
3661 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row:
3662 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride:
3663 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride:
3664 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col:
3665 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row:
3666 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride:
3667 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride:
3668 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col:
3669 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row:
3670 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride:
3671 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride:
3672 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col:
3673 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row:
3674 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride:
3675 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride: {
3677 Info.memVT = MVT::v8f16;
3678 Info.ptrVal =
I.getArgOperand(0);
3684 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col:
3685 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col_stride:
3686 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col_stride:
3687 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col:
3688 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row:
3689 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row_stride:
3690 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row_stride:
3691 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row:
3692 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col:
3693 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col_stride:
3694 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row:
3695 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row_stride:
3696 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col:
3697 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col_stride:
3698 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col_stride:
3699 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col:
3700 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row:
3701 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row_stride:
3702 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row_stride:
3703 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row:
3704 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col:
3705 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col_stride:
3706 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row:
3707 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row_stride: {
3709 Info.memVT = MVT::v2i32;
3710 Info.ptrVal =
I.getArgOperand(0);
3717 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col:
3718 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col_stride:
3719 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col_stride:
3720 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col:
3721 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row:
3722 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row_stride:
3723 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row_stride:
3724 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row:
3725 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col:
3726 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col_stride:
3727 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row:
3728 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row_stride:
3729 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col:
3730 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col_stride:
3731 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row:
3732 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row_stride:
3734 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col:
3735 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col_stride:
3736 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col_stride:
3737 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col:
3738 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row:
3739 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row_stride:
3740 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row_stride:
3741 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row:
3742 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col:
3743 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col_stride:
3744 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row:
3745 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row_stride:
3746 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col:
3747 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col_stride:
3748 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row:
3749 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row_stride:
3750 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_b16:
3751 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_trans_b16:
3752 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8:
3753 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b4x16_p64:
3754 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b6x16_p32:
3755 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b4x16_p64:
3756 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b6x16_p32: {
3758 Info.memVT = MVT::v4i32;
3759 Info.ptrVal =
I.getArgOperand(0);
3766 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col:
3767 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col_stride:
3768 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col_stride:
3769 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col:
3770 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row:
3771 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row_stride:
3772 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row_stride:
3773 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row:
3775 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col:
3776 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col_stride:
3777 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col_stride:
3778 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col:
3779 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row:
3780 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row_stride:
3781 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row_stride:
3782 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row:
3783 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row:
3784 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row_stride:
3785 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col:
3786 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col_stride:
3787 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row:
3788 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row_stride:
3789 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row_stride:
3790 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row:
3791 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col:
3792 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col_stride:
3793 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col_stride:
3794 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col:
3795 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_b16:
3796 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_trans_b16:
3797 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b4x16_p64:
3798 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b6x16_p32: {
3800 Info.memVT = MVT::i32;
3801 Info.ptrVal =
I.getArgOperand(0);
3808 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col:
3809 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row:
3810 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride:
3811 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride:
3812 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col:
3813 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row:
3814 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride:
3815 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride:
3816 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col:
3817 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row:
3818 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride:
3819 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride: {
3821 Info.memVT = MVT::v4f16;
3822 Info.ptrVal =
I.getArgOperand(0);
3829 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col:
3830 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row:
3831 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride:
3832 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride:
3833 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col:
3834 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row:
3835 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride:
3836 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride:
3837 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col:
3838 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row:
3839 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride:
3840 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride:
3841 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col:
3842 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row:
3843 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col_stride:
3844 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row_stride: {
3846 Info.memVT = MVT::v8f32;
3847 Info.ptrVal =
I.getArgOperand(0);
3854 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col:
3855 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col_stride:
3856 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row:
3857 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row_stride:
3859 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col:
3860 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col_stride:
3861 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row:
3862 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row_stride:
3864 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col:
3865 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col_stride:
3866 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row:
3867 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row_stride:
3868 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col:
3869 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col_stride:
3870 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row:
3871 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row_stride:
3872 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col:
3873 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col_stride:
3874 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row:
3875 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row_stride: {
3877 Info.memVT = MVT::v8i32;
3878 Info.ptrVal =
I.getArgOperand(0);
3885 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col:
3886 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col_stride:
3887 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row:
3888 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row_stride:
3889 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col:
3890 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col_stride:
3891 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row:
3892 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row_stride:
3893 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_b16:
3894 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_trans_b16:
3895 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8:
3896 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b4x16_p64:
3897 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b6x16_p32:
3898 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b4x16_p64:
3899 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b6x16_p32: {
3901 Info.memVT = MVT::v2i32;
3902 Info.ptrVal =
I.getArgOperand(0);
3909 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col:
3910 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col_stride:
3911 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row:
3912 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row_stride:
3914 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col:
3915 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col_stride:
3916 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row:
3917 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row_stride: {
3919 Info.memVT = MVT::f64;
3920 Info.ptrVal =
I.getArgOperand(0);
3927 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col:
3928 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col_stride:
3929 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row:
3930 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row_stride: {
3932 Info.memVT = MVT::v2f64;
3933 Info.ptrVal =
I.getArgOperand(0);
3940 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col:
3941 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row:
3942 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride:
3943 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride:
3944 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col:
3945 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row:
3946 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride:
3947 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride:
3948 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col:
3949 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row:
3950 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride:
3951 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride: {
3953 Info.memVT = MVT::v4f16;
3954 Info.ptrVal =
I.getArgOperand(0);
3961 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col:
3962 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row:
3963 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride:
3964 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride:
3965 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col:
3966 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row:
3967 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride:
3968 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride:
3969 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col:
3970 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row:
3971 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride:
3972 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride:
3973 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col:
3974 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row:
3975 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col_stride:
3976 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row_stride: {
3978 Info.memVT = MVT::v8f32;
3979 Info.ptrVal =
I.getArgOperand(0);
3986 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col:
3987 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col_stride:
3988 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row:
3989 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row_stride:
3990 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col:
3991 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col_stride:
3992 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row:
3993 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row_stride:
3994 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col:
3995 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col_stride:
3996 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row:
3997 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row_stride: {
3999 Info.memVT = MVT::v8i32;
4000 Info.ptrVal =
I.getArgOperand(0);
4007 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col:
4008 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col_stride:
4009 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row:
4010 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row_stride:
4011 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col:
4012 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col_stride:
4013 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row:
4014 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row_stride:
4015 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_b16:
4016 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_trans_b16:
4017 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x2_trans_b8: {
4019 Info.memVT = MVT::v2i32;
4020 Info.ptrVal =
I.getArgOperand(0);
4027 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col:
4028 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col_stride:
4029 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row:
4030 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row_stride: {
4032 Info.memVT = MVT::v2f64;
4033 Info.ptrVal =
I.getArgOperand(0);
4040 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_b16:
4041 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_trans_b16:
4042 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x1_trans_b8: {
4044 Info.memVT = MVT::i32;
4045 Info.ptrVal =
I.getArgOperand(0);
4052 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_b16:
4053 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_trans_b16:
4054 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x4_trans_b8: {
4056 Info.memVT = MVT::v4i32;
4057 Info.ptrVal =
I.getArgOperand(0);
4064 case Intrinsic::nvvm_atomic_add_gen_f_cta:
4065 case Intrinsic::nvvm_atomic_add_gen_f_sys:
4066 case Intrinsic::nvvm_atomic_add_gen_i_cta:
4067 case Intrinsic::nvvm_atomic_add_gen_i_sys:
4068 case Intrinsic::nvvm_atomic_and_gen_i_cta:
4069 case Intrinsic::nvvm_atomic_and_gen_i_sys:
4070 case Intrinsic::nvvm_atomic_cas_gen_i_cta:
4071 case Intrinsic::nvvm_atomic_cas_gen_i_sys:
4072 case Intrinsic::nvvm_atomic_dec_gen_i_cta:
4073 case Intrinsic::nvvm_atomic_dec_gen_i_sys:
4074 case Intrinsic::nvvm_atomic_inc_gen_i_cta:
4075 case Intrinsic::nvvm_atomic_inc_gen_i_sys:
4076 case Intrinsic::nvvm_atomic_max_gen_i_cta:
4077 case Intrinsic::nvvm_atomic_max_gen_i_sys:
4078 case Intrinsic::nvvm_atomic_min_gen_i_cta:
4079 case Intrinsic::nvvm_atomic_min_gen_i_sys:
4080 case Intrinsic::nvvm_atomic_or_gen_i_cta:
4081 case Intrinsic::nvvm_atomic_or_gen_i_sys:
4082 case Intrinsic::nvvm_atomic_exch_gen_i_cta:
4083 case Intrinsic::nvvm_atomic_exch_gen_i_sys:
4084 case Intrinsic::nvvm_atomic_xor_gen_i_cta:
4085 case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
4086 auto &
DL =
I.getDataLayout();
4089 Info.ptrVal =
I.getArgOperand(0);
4096 case Intrinsic::nvvm_prefetch_tensormap: {
4097 auto &
DL =
I.getDataLayout();
4100 Info.ptrVal =
I.getArgOperand(0);
4108 case Intrinsic::nvvm_ldu_global_i:
4109 case Intrinsic::nvvm_ldu_global_f:
4110 case Intrinsic::nvvm_ldu_global_p: {
4113 Info.ptrVal =
I.getArgOperand(0);
4116 Info.align = cast<ConstantInt>(
I.getArgOperand(1))->getMaybeAlignValue();
4120 case Intrinsic::nvvm_tex_1d_v4f32_s32:
4121 case Intrinsic::nvvm_tex_1d_v4f32_f32:
4122 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
4123 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
4124 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
4125 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
4126 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
4127 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
4128 case Intrinsic::nvvm_tex_2d_v4f32_s32:
4129 case Intrinsic::nvvm_tex_2d_v4f32_f32:
4130 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
4131 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
4132 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
4133 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
4134 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
4135 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
4136 case Intrinsic::nvvm_tex_3d_v4f32_s32:
4137 case Intrinsic::nvvm_tex_3d_v4f32_f32:
4138 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
4139 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
4140 case Intrinsic::nvvm_tex_cube_v4f32_f32:
4141 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
4142 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
4143 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
4144 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
4145 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
4146 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
4147 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
4148 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
4149 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
4150 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
4151 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
4152 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
4153 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
4154 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
4155 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
4156 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
4157 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
4158 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
4159 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
4160 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
4161 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
4162 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
4163 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
4164 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
4165 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
4166 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
4167 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
4168 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
4169 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
4170 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
4171 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
4172 case Intrinsic::nvvm_tex_unified_cube_grad_v4f32_f32:
4173 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4f32_f32:
4174 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
4175 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
4176 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
4177 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
4179 Info.memVT = MVT::v4f32;
4180 Info.ptrVal =
nullptr;
4186 case Intrinsic::nvvm_tex_1d_v4s32_s32:
4187 case Intrinsic::nvvm_tex_1d_v4s32_f32:
4188 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
4189 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
4190 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
4191 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
4192 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
4193 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
4194 case Intrinsic::nvvm_tex_2d_v4s32_s32:
4195 case Intrinsic::nvvm_tex_2d_v4s32_f32:
4196 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
4197 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
4198 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
4199 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
4200 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
4201 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
4202 case Intrinsic::nvvm_tex_3d_v4s32_s32:
4203 case Intrinsic::nvvm_tex_3d_v4s32_f32:
4204 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
4205 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
4206 case Intrinsic::nvvm_tex_cube_v4s32_f32:
4207 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
4208 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
4209 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
4210 case Intrinsic::nvvm_tex_cube_v4u32_f32:
4211 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
4212 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
4213 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
4214 case Intrinsic::nvvm_tex_1d_v4u32_s32:
4215 case Intrinsic::nvvm_tex_1d_v4u32_f32:
4216 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
4217 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
4218 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
4219 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
4220 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
4221 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
4222 case Intrinsic::nvvm_tex_2d_v4u32_s32:
4223 case Intrinsic::nvvm_tex_2d_v4u32_f32:
4224 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
4225 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
4226 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
4227 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
4228 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
4229 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
4230 case Intrinsic::nvvm_tex_3d_v4u32_s32:
4231 case Intrinsic::nvvm_tex_3d_v4u32_f32:
4232 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
4233 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
4234 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
4235 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
4236 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
4237 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
4238 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
4239 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
4240 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
4241 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
4242 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
4243 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
4244 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
4245 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
4246 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
4247 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
4248 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
4249 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
4250 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
4251 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
4252 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
4253 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
4254 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
4255 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
4256 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
4257 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
4258 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
4259 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
4260 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
4261 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
4262 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
4263 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
4264 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
4265 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
4266 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
4267 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
4268 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
4269 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
4270 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
4271 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
4272 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
4273 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
4274 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
4275 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
4276 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
4277 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
4278 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
4279 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
4280 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
4281 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
4282 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
4283 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
4284 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
4285 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
4286 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
4287 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
4288 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
4289 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
4290 case Intrinsic::nvvm_tex_unified_cube_grad_v4s32_f32:
4291 case Intrinsic::nvvm_tex_unified_cube_grad_v4u32_f32:
4292 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4s32_f32:
4293 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4u32_f32:
4294 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
4295 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
4296 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
4297 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
4298 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
4299 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
4300 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
4301 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
4303 Info.memVT = MVT::v4i32;
4304 Info.ptrVal =
nullptr;
4310 case Intrinsic::nvvm_suld_1d_i8_clamp:
4311 case Intrinsic::nvvm_suld_1d_v2i8_clamp:
4312 case Intrinsic::nvvm_suld_1d_v4i8_clamp:
4313 case Intrinsic::nvvm_suld_1d_array_i8_clamp:
4314 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
4315 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
4316 case Intrinsic::nvvm_suld_2d_i8_clamp:
4317 case Intrinsic::nvvm_suld_2d_v2i8_clamp:
4318 case Intrinsic::nvvm_suld_2d_v4i8_clamp:
4319 case Intrinsic::nvvm_suld_2d_array_i8_clamp:
4320 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
4321 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
4322 case Intrinsic::nvvm_suld_3d_i8_clamp:
4323 case Intrinsic::nvvm_suld_3d_v2i8_clamp:
4324 case Intrinsic::nvvm_suld_3d_v4i8_clamp:
4325 case Intrinsic::nvvm_suld_1d_i8_trap:
4326 case Intrinsic::nvvm_suld_1d_v2i8_trap:
4327 case Intrinsic::nvvm_suld_1d_v4i8_trap:
4328 case Intrinsic::nvvm_suld_1d_array_i8_trap:
4329 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
4330 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
4331 case Intrinsic::nvvm_suld_2d_i8_trap:
4332 case Intrinsic::nvvm_suld_2d_v2i8_trap:
4333 case Intrinsic::nvvm_suld_2d_v4i8_trap:
4334 case Intrinsic::nvvm_suld_2d_array_i8_trap:
4335 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
4336 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
4337 case Intrinsic::nvvm_suld_3d_i8_trap:
4338 case Intrinsic::nvvm_suld_3d_v2i8_trap:
4339 case Intrinsic::nvvm_suld_3d_v4i8_trap:
4340 case Intrinsic::nvvm_suld_1d_i8_zero:
4341 case Intrinsic::nvvm_suld_1d_v2i8_zero:
4342 case Intrinsic::nvvm_suld_1d_v4i8_zero:
4343 case Intrinsic::nvvm_suld_1d_array_i8_zero:
4344 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
4345 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
4346 case Intrinsic::nvvm_suld_2d_i8_zero:
4347 case Intrinsic::nvvm_suld_2d_v2i8_zero:
4348 case Intrinsic::nvvm_suld_2d_v4i8_zero:
4349 case Intrinsic::nvvm_suld_2d_array_i8_zero:
4350 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
4351 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
4352 case Intrinsic::nvvm_suld_3d_i8_zero:
4353 case Intrinsic::nvvm_suld_3d_v2i8_zero:
4354 case Intrinsic::nvvm_suld_3d_v4i8_zero:
4356 Info.memVT = MVT::i8;
4357 Info.ptrVal =
nullptr;
4363 case Intrinsic::nvvm_suld_1d_i16_clamp:
4364 case Intrinsic::nvvm_suld_1d_v2i16_clamp:
4365 case Intrinsic::nvvm_suld_1d_v4i16_clamp:
4366 case Intrinsic::nvvm_suld_1d_array_i16_clamp:
4367 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
4368 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
4369 case Intrinsic::nvvm_suld_2d_i16_clamp:
4370 case Intrinsic::nvvm_suld_2d_v2i16_clamp:
4371 case Intrinsic::nvvm_suld_2d_v4i16_clamp:
4372 case Intrinsic::nvvm_suld_2d_array_i16_clamp:
4373 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
4374 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
4375 case Intrinsic::nvvm_suld_3d_i16_clamp:
4376 case Intrinsic::nvvm_suld_3d_v2i16_clamp:
4377 case Intrinsic::nvvm_suld_3d_v4i16_clamp:
4378 case Intrinsic::nvvm_suld_1d_i16_trap:
4379 case Intrinsic::nvvm_suld_1d_v2i16_trap:
4380 case Intrinsic::nvvm_suld_1d_v4i16_trap:
4381 case Intrinsic::nvvm_suld_1d_array_i16_trap:
4382 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
4383 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
4384 case Intrinsic::nvvm_suld_2d_i16_trap:
4385 case Intrinsic::nvvm_suld_2d_v2i16_trap:
4386 case Intrinsic::nvvm_suld_2d_v4i16_trap:
4387 case Intrinsic::nvvm_suld_2d_array_i16_trap:
4388 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
4389 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
4390 case Intrinsic::nvvm_suld_3d_i16_trap:
4391 case Intrinsic::nvvm_suld_3d_v2i16_trap:
4392 case Intrinsic::nvvm_suld_3d_v4i16_trap:
4393 case Intrinsic::nvvm_suld_1d_i16_zero:
4394 case Intrinsic::nvvm_suld_1d_v2i16_zero:
4395 case Intrinsic::nvvm_suld_1d_v4i16_zero:
4396 case Intrinsic::nvvm_suld_1d_array_i16_zero:
4397 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
4398 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
4399 case Intrinsic::nvvm_suld_2d_i16_zero:
4400 case Intrinsic::nvvm_suld_2d_v2i16_zero:
4401 case Intrinsic::nvvm_suld_2d_v4i16_zero:
4402 case Intrinsic::nvvm_suld_2d_array_i16_zero:
4403 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
4404 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
4405 case Intrinsic::nvvm_suld_3d_i16_zero:
4406 case Intrinsic::nvvm_suld_3d_v2i16_zero:
4407 case Intrinsic::nvvm_suld_3d_v4i16_zero:
4409 Info.memVT = MVT::i16;
4410 Info.ptrVal =
nullptr;
4416 case Intrinsic::nvvm_suld_1d_i32_clamp:
4417 case Intrinsic::nvvm_suld_1d_v2i32_clamp:
4418 case Intrinsic::nvvm_suld_1d_v4i32_clamp:
4419 case Intrinsic::nvvm_suld_1d_array_i32_clamp:
4420 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
4421 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
4422 case Intrinsic::nvvm_suld_2d_i32_clamp:
4423 case Intrinsic::nvvm_suld_2d_v2i32_clamp:
4424 case Intrinsic::nvvm_suld_2d_v4i32_clamp:
4425 case Intrinsic::nvvm_suld_2d_array_i32_clamp:
4426 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
4427 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
4428 case Intrinsic::nvvm_suld_3d_i32_clamp:
4429 case Intrinsic::nvvm_suld_3d_v2i32_clamp:
4430 case Intrinsic::nvvm_suld_3d_v4i32_clamp:
4431 case Intrinsic::nvvm_suld_1d_i32_trap:
4432 case Intrinsic::nvvm_suld_1d_v2i32_trap:
4433 case Intrinsic::nvvm_suld_1d_v4i32_trap:
4434 case Intrinsic::nvvm_suld_1d_array_i32_trap:
4435 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
4436 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
4437 case Intrinsic::nvvm_suld_2d_i32_trap:
4438 case Intrinsic::nvvm_suld_2d_v2i32_trap:
4439 case Intrinsic::nvvm_suld_2d_v4i32_trap:
4440 case Intrinsic::nvvm_suld_2d_array_i32_trap:
4441 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
4442 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
4443 case Intrinsic::nvvm_suld_3d_i32_trap:
4444 case Intrinsic::nvvm_suld_3d_v2i32_trap:
4445 case Intrinsic::nvvm_suld_3d_v4i32_trap:
4446 case Intrinsic::nvvm_suld_1d_i32_zero:
4447 case Intrinsic::nvvm_suld_1d_v2i32_zero:
4448 case Intrinsic::nvvm_suld_1d_v4i32_zero:
4449 case Intrinsic::nvvm_suld_1d_array_i32_zero:
4450 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
4451 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
4452 case Intrinsic::nvvm_suld_2d_i32_zero:
4453 case Intrinsic::nvvm_suld_2d_v2i32_zero:
4454 case Intrinsic::nvvm_suld_2d_v4i32_zero:
4455 case Intrinsic::nvvm_suld_2d_array_i32_zero:
4456 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
4457 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
4458 case Intrinsic::nvvm_suld_3d_i32_zero:
4459 case Intrinsic::nvvm_suld_3d_v2i32_zero:
4460 case Intrinsic::nvvm_suld_3d_v4i32_zero:
4462 Info.memVT = MVT::i32;
4463 Info.ptrVal =
nullptr;
4469 case Intrinsic::nvvm_suld_1d_i64_clamp:
4470 case Intrinsic::nvvm_suld_1d_v2i64_clamp:
4471 case Intrinsic::nvvm_suld_1d_array_i64_clamp:
4472 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
4473 case Intrinsic::nvvm_suld_2d_i64_clamp:
4474 case Intrinsic::nvvm_suld_2d_v2i64_clamp:
4475 case Intrinsic::nvvm_suld_2d_array_i64_clamp:
4476 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
4477 case Intrinsic::nvvm_suld_3d_i64_clamp:
4478 case Intrinsic::nvvm_suld_3d_v2i64_clamp:
4479 case Intrinsic::nvvm_suld_1d_i64_trap:
4480 case Intrinsic::nvvm_suld_1d_v2i64_trap:
4481 case Intrinsic::nvvm_suld_1d_array_i64_trap:
4482 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
4483 case Intrinsic::nvvm_suld_2d_i64_trap:
4484 case Intrinsic::nvvm_suld_2d_v2i64_trap:
4485 case Intrinsic::nvvm_suld_2d_array_i64_trap:
4486 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
4487 case Intrinsic::nvvm_suld_3d_i64_trap:
4488 case Intrinsic::nvvm_suld_3d_v2i64_trap:
4489 case Intrinsic::nvvm_suld_1d_i64_zero:
4490 case Intrinsic::nvvm_suld_1d_v2i64_zero:
4491 case Intrinsic::nvvm_suld_1d_array_i64_zero:
4492 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
4493 case Intrinsic::nvvm_suld_2d_i64_zero:
4494 case Intrinsic::nvvm_suld_2d_v2i64_zero:
4495 case Intrinsic::nvvm_suld_2d_array_i64_zero:
4496 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
4497 case Intrinsic::nvvm_suld_3d_i64_zero:
4498 case Intrinsic::nvvm_suld_3d_v2i64_zero:
4500 Info.memVT = MVT::i64;
4501 Info.ptrVal =
nullptr;
4507 case Intrinsic::nvvm_tcgen05_ld_16x64b_x1:
4508 case Intrinsic::nvvm_tcgen05_ld_32x32b_x1:
4509 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x1: {
4511 Info.memVT = MVT::v1i32;
4512 Info.ptrVal =
I.getArgOperand(0);
4519 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
4520 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
4521 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
4522 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2: {
4524 Info.memVT = MVT::v2i32;
4525 Info.ptrVal =
I.getArgOperand(0);
4532 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
4533 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
4534 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
4535 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
4536 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4: {
4538 Info.memVT = MVT::v4i32;
4539 Info.ptrVal =
I.getArgOperand(0);
4546 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
4547 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
4548 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
4549 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
4550 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8: {
4552 Info.memVT = MVT::v8i32;
4553 Info.ptrVal =
I.getArgOperand(0);
4560 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
4561 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
4562 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
4563 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
4564 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16: {
4566 Info.memVT = MVT::v16i32;
4567 Info.ptrVal =
I.getArgOperand(0);
4574 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
4575 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
4576 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
4577 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
4578 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32: {
4580 Info.memVT = MVT::v32i32;
4581 Info.ptrVal =
I.getArgOperand(0);
4588 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
4589 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
4590 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
4591 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
4592 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64: {
4594 Info.memVT = MVT::v64i32;
4595 Info.ptrVal =
I.getArgOperand(0);
4602 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
4603 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
4604 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
4605 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
4606 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128: {
4608 Info.memVT = MVT::v128i32;
4609 Info.ptrVal =
I.getArgOperand(0);
4616 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
4617 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
4618 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1: {
4620 Info.memVT = MVT::i32;
4621 Info.ptrVal =
I.getArgOperand(0);
4628 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
4629 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
4630 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
4631 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2: {
4633 Info.memVT = MVT::v2i32;
4634 Info.ptrVal =
I.getArgOperand(0);
4641 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
4642 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
4643 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
4644 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
4645 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4: {
4647 Info.memVT = MVT::v4i32;
4648 Info.ptrVal =
I.getArgOperand(0);
4655 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
4656 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
4657 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
4658 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
4659 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8: {
4661 Info.memVT = MVT::v8i32;
4662 Info.ptrVal =
I.getArgOperand(0);
4669 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
4670 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
4671 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
4672 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
4673 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16: {
4675 Info.memVT = MVT::v16i32;
4676 Info.ptrVal =
I.getArgOperand(0);
4683 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
4684 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
4685 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
4686 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
4687 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32: {
4689 Info.memVT = MVT::v32i32;
4690 Info.ptrVal =
I.getArgOperand(0);
4697 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
4698 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
4699 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
4700 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
4701 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64: {
4703 Info.memVT = MVT::v64i32;
4704 Info.ptrVal =
I.getArgOperand(0);
4711 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
4712 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
4713 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
4714 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
4715 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128: {
4717 Info.memVT = MVT::v128i32;
4718 Info.ptrVal =
I.getArgOperand(0);
4739 const Align ABITypeAlign = std::min(
Align(128),
DL.getABITypeAlign(ArgTy));
4744 if (!
F || !
F->hasLocalLinkage() ||
4745 F->hasAddressTaken(
nullptr,
4749 return ABITypeAlign;
4752 return std::max(
Align(16), ABITypeAlign);
4759 Align ArgAlign = InitialAlign;
4774 ArgAlign = std::max(ArgAlign,
Align(4));
4784 std::string ParamName;
4789 ParamStr <<
"_vararg";
4791 ParamStr <<
"_param_" <<
Idx;
4843 if (Constraint.
size() == 1) {
4844 switch (Constraint[0]) {
4863std::pair<unsigned, const TargetRegisterClass *>
4867 if (Constraint.
size() == 1) {
4868 switch (Constraint[0]) {
4870 return std::make_pair(0U, &NVPTX::B1RegClass);
4873 return std::make_pair(0U, &NVPTX::B16RegClass);
4876 return std::make_pair(0U, &NVPTX::B32RegClass);
4880 return std::make_pair(0U, &NVPTX::B64RegClass);
4884 "supported for sm_70 and higher!");
4885 return std::make_pair(0U, &NVPTX::B128RegClass);
4914 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
4915 return Const && Const->getZExtValue() == 0;
4947 if (M->getOpcode() !=
ISD::MUL || !M.getNode()->hasOneUse())
4955 ((ZeroOpNum == 1) ? N1 : MAD),
4956 ((ZeroOpNum == 1) ? MAD : N1));
4971 (
N->getFlags().hasAllowContract() &&
4984 int nonAddCount = 0;
4993 int orderNo =
N->getIROrder();
4999 if (orderNo - orderNo2 < 500)
5005 bool opIsLive =
false;
5009 if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
5014 int orderNo3 =
User->getIROrder();
5015 if (orderNo3 > orderNo) {
5023 int orderNo3 =
User->getIROrder();
5024 if (orderNo3 > orderNo) {
5059 EVT ElementVT =
N->getValueType(0);
5070 if (U.getValueType() == MVT::Glue || U.getValueType() == MVT::Other)
5072 if (U.getUser()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
5073 if (N->getOpcode() != ISD::LOAD)
5090 return !U.getUser()->use_empty();
5098 auto *
LD = cast<MemSDNode>(
N);
5104 unsigned OldNumOutputs;
5105 switch (
LD->getOpcode()) {
5112 Operands.push_back(DCI.DAG.getIntPtrConstant(
5113 cast<LoadSDNode>(LD)->getExtensionType(),
DL));
5122 if (ElementVT != MVT::v2f32)
5133 const unsigned NewNumOutputs = OldNumOutputs * 2;
5136 NewVTs.append(
LD->value_begin() + OldNumOutputs,
LD->value_end());
5139 SDValue NewLoad = DCI.DAG.getMemIntrinsicNode(
5140 Opcode,
DL, DCI.DAG.getVTList(NewVTs),
Operands,
LD->getMemoryVT(),
5141 LD->getMemOperand());
5147 for (
unsigned I :
seq(OldNumOutputs))
5148 Results.push_back(DCI.DAG.getBuildVector(
5149 ElementVT,
DL, {NewLoad.getValue(I * 2), NewLoad.getValue(I * 2 + 1)}));
5154 return DCI.DAG.getMergeValues(
Results,
DL);
5169 unsigned Front,
unsigned Back) {
5176 EVT ElementVT =
N->getOperand(Front).getValueType();
5182 auto *ST = cast<MemSDNode>(
N);
5186 switch (
N->getOpcode()) {
5199 if (ElementVT != MVT::v2f32)
5213 for (
SDValue BV :
N->ops().drop_front(Front).drop_back(Back)) {
5219 if (!BV.hasOneUse())
5227 Op =
Op.getOperand(0);
5231 Op->getOperand(0).getValueType() == MVT::i32)
5238 Operands.append({BV.getOperand(0), BV.getOperand(1)});
5240 Operands.append(
N->op_end() - Back,
N->op_end());
5244 ST->getMemoryVT(), ST->getMemOperand());
5255 if (!ST->getValue().getValueType().isSimple())
5268 if (!
N->getValueType(0).isSimple())
5288 if (VT.
isVector() || VT != MVT::i32)
5308 if (VT.
isVector() || !(VT == MVT::f32 || VT == MVT::f64))
5330 EVT VT =
N->getValueType(0);
5334 const SDValue &Num =
N->getOperand(0);
5335 const SDValue &Den =
N->getOperand(1);
5338 if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
5339 U->getOperand(1) == Den) {
5357 if (!
Op.hasOneUse())
5359 EVT ToVT =
N->getValueType(0);
5360 EVT FromVT =
Op.getValueType();
5361 if (!((ToVT == MVT::i32 && FromVT == MVT::i16) ||
5362 (ToVT == MVT::i64 && FromVT == MVT::i32)))
5365 (
Op.getOpcode() ==
ISD::SHL && isa<ConstantSDNode>(
Op.getOperand(1)))))
5369 unsigned ExtOpcode =
N->getOpcode();
5370 unsigned Opcode = 0;
5379 const auto ShiftAmt =
Op.getConstantOperandVal(1);
5402 EVT OrigVT =
Op.getOperand(0).getValueType();
5408 EVT OrigVT =
Op.getOperand(0).getValueType();
5435 IsSigned = (LHSSign ==
Signed);
5439 const APInt &Val = CI->getAPIntValue();
5441 return Val.
isIntN(OptSize);
5450 return LHSSign == RHSSign;
5460 EVT MulType =
N->getValueType(0);
5461 if (MulType != MVT::i32 && MulType != MVT::i64) {
5472 if (isa<ConstantSDNode>(
LHS)) {
5501 if (MulType == MVT::i32) {
5502 DemotedVT = MVT::i16;
5504 DemotedVT = MVT::i32;
5525 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
5526 return Const && Const->getZExtValue() == 1;
5534 return Add->getOperand(1);
5537 return Add->getOperand(0);
5578 (ConstOpNo == 1) ?
X : NewMul,
5579 (ConstOpNo == 1) ? NewMul :
X);
5590 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
5640 unsigned int SmVersion) {
5641 EVT CCType =
N->getValueType(0);
5645 EVT AType =
A.getValueType();
5646 if (!(CCType == MVT::v2i1 && (AType == MVT::v2f16 || AType == MVT::v2bf16)))
5649 if (
A.getValueType() == MVT::v2bf16 && SmVersion < 90)
5660 DL, DCI.
DAG.
getVTList(MVT::i1, MVT::i1), {A, B, N->getOperand(2)});
5688 if (!(VectorBits == 16 || VectorBits == 32 || VectorBits == 64))
5693 if (!Index || Index->getZExtValue() == 0)
5708 if (EltVT != EltIVT)
5711 if (EltVT !=
N->getValueType(0))
5721 if (VectorVT != MVT::v4i8)
5732 for (
int I = 0;
I < 4; ++
I) {
5751 auto VT =
N->getValueType(0);
5758 auto Op0 =
N->getOperand(0);
5759 auto Op1 =
N->getOperand(1);
5766 std::pair<SDValue *, uint64_t *> OpData[2] = {{&Op0, &Op0Bytes},
5772 for (
auto &[
Op, OpBytes] : OpData) {
5775 *
Op =
Op->getOperand(0);
5778 Op->getOperand(0).getValueType() == MVT::i32))
5783 if (!
Op->hasOneUse())
5786 *
Op =
Op->getOperand(0);
5790 if (
Op->getOpcode() ==
ISD::SRL && isa<ConstantSDNode>(
Op->getOperand(1))) {
5791 if (cast<ConstantSDNode>(
Op->getOperand(1))->getZExtValue() == 16) {
5794 assert((*OpBytes == 0x10 || *OpBytes == 0x54) &&
5795 "PRMT selector values out of range");
5797 *
Op =
Op->getOperand(0);
5803 auto &DAG = DCI.
DAG;
5807 (Op1Bytes << 8) | Op0Bytes,
DL, DAG);
5813 auto *ASCN1 = cast<AddrSpaceCastSDNode>(
N);
5815 if (
auto *ASCN2 = dyn_cast<AddrSpaceCastSDNode>(ASCN1->getOperand(0))) {
5816 assert(ASCN2->getDestAddressSpace() == ASCN1->getSrcAddressSpace());
5819 if (ASCN1->getDestAddressSpace() == ASCN2->getSrcAddressSpace())
5820 return ASCN2->getOperand(0);
5838 const auto GetSelector = [](
unsigned S0,
unsigned S1,
unsigned S2,
5840 return APInt(32, S0 | (
S1 << 4) | (S2 << 8) | (S3 << 12));
5845 return GetSelector(V, V + 1, V + 2, V + 3);
5847 return GetSelector(V, (V - 1) & 7, (V - 2) & 7, (V - 3) & 7);
5849 return GetSelector(V, V, V, V);
5851 return GetSelector(V, std::max(V, 1U), std::max(V, 2U), 3U);
5853 return GetSelector(0, std::min(V, 1U), std::min(V, 2U), V);
5855 unsigned V1 = (V & 1) << 1;
5856 return GetSelector(V1, V1 + 1, V1, V1 + 1);
5864 assert(
A.getBitWidth() == 32 &&
B.getBitWidth() == 32 &&
5865 Selector.
getBitWidth() == 32 &&
"PRMT must have i32 operands");
5869 APInt Result(32, 0);
5874 APInt Byte = BitField.extractBits(8,
Idx * 8);
5876 Byte = Byte.ashr(8);
5877 Result.insertBits(Byte,
I * 8);
5888 if (isa<ConstantSDNode>(
N->getOperand(0)) &&
5889 isa<ConstantSDNode>(
N->getOperand(1)) &&
5890 isa<ConstantSDNode>(
N->getOperand(2)))
5892 N->getConstantOperandAPInt(1),
5893 N->getConstantOperandAPInt(2),
5894 N->getConstantOperandVal(3)),
5895 SDLoc(
N),
N->getValueType(0));
5910 switch (R.getOpcode()) {
5942 for (
auto &
Op : R->ops()) {
5956 R.getValueType(), V, R.getOperand(1));
5981 DAGCombinerInfo &DCI)
const {
5983 switch (
N->getOpcode()) {
6031 EVT ToVT =
Op->getValueType(0);
6032 if (ToVT != MVT::v2i8) {
6051 bool hasOffset =
false) {
6053 EVT ResVT =
N->getValueType(0);
6061 for (
unsigned i = 0; i < NumElts; ++i)
6072 Ops.push_back(
N->getOperand(3));
6073 Ops.push_back(
N->getOperand(4));
6075 Ops.push_back(
N->getOperand(3));
6084 for (
unsigned i = 0; i < NumElts; ++i) {
6091 Results.push_back(BuildVector);
6106 case Intrinsic::nvvm_ldu_global_i:
6107 case Intrinsic::nvvm_ldu_global_f:
6108 case Intrinsic::nvvm_ldu_global_p: {
6109 EVT ResVT =
N->getValueType(0);
6121 bool NeedTrunc =
false;
6127 unsigned Opcode = 0;
6135 LdResVTs = DAG.
getVTList(EltVT, EltVT, MVT::Other);
6139 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
6152 OtherOps.
append(
N->op_begin() + 2,
N->op_end());
6162 for (
unsigned i = 0; i < NumElts; ++i) {
6180 "Custom handling of non-i8 ldu/ldg?");
6203 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
6204 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
6205 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
6206 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
6207 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
6208 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
6209 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
6210 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
6211 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
6212 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
6213 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
6214 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
6215 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
6216 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
6217 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
6218 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
6219 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
6220 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
6221 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
6222 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
6223 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
6224 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
6225 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
6226 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
6227 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
6228 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
6229 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
6232 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2:
6233 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4:
6234 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8:
6235 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16:
6236 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32:
6237 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64:
6238 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128:
6252 assert(Reg.getValueType() == MVT::i128 &&
6253 "Custom lowering for CopyFromReg with 128-bit reg only");
6255 N->getValueType(2)};
6286 assert(
N->getValueType(0) == MVT::i128 &&
6287 "Custom lowering for atomic128 only supports i128");
6295 "Support for b128 atomics introduced in PTX ISA version 8.3 and "
6296 "requires target sm_90.",
6307 for (
const auto &
Op : AN->
ops().drop_front(2)) {
6322 {Result.getValue(0), Result.getValue(1)}));
6323 Results.push_back(Result.getValue(2));
6326void NVPTXTargetLowering::ReplaceNodeResults(
6328 switch (
N->getOpcode()) {
6374 const unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
6442 auto *CI = dyn_cast<AtomicCmpXchgInst>(
I);
6452 (cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth() <
6459 auto *CI = dyn_cast<AtomicCmpXchgInst>(
I);
6460 bool BitwidthSupportedAndIsSeqCst =
6462 cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth() >=
6471 if (!isa<AtomicCmpXchgInst>(Inst))
6476 SyncScope::ID SSID = cast<AtomicCmpXchgInst>(Inst)->getSyncScopeID();
6490 if (!isa<AtomicCmpXchgInst>(Inst))
6493 auto *CI = cast<AtomicCmpXchgInst>(Inst);
6495 cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth();
6523 case ISD::VP_FP_TO_UINT:
6525 return ISD::VP_FP_TO_SINT;
6546 unsigned Mode =
Op.getConstantOperandVal(3);
6556 "PRMT must have i32 operands");
6576 auto ExtType = LD->getConstantOperandVal(LD->getNumOperands() - 1);
6581 auto DestVT = LD->getValueType(0);
6582 if (DestVT.isVector())
6595 switch (
Op.getOpcode()) {
6622 APInt &Src =
Idx < 4 ? DemandedLHS : DemandedRHS;
6623 unsigned ByteStart = (
Idx % 4) * 8;
6625 Src.setBit(ByteStart + 7);
6627 Src.setBits(ByteStart, ByteStart + 8);
6630 return {DemandedLHS, DemandedRHS};
6649 SDValue Op0 = PRMT.getOperand(0);
6650 SDValue Op1 = PRMT.getOperand(1);
6651 auto *SelectorConst = dyn_cast<ConstantSDNode>(PRMT.getOperand(2));
6655 unsigned Mode = PRMT.getConstantOperandVal(3);
6660 const unsigned LeadingBytes =
DemandedBits.countLeadingZeros() / 8;
6661 const unsigned SelBits = (4 - LeadingBytes) * 4;
6677 if ((DemandedOp0 && DemandedOp0 != Op0) ||
6678 (DemandedOp1 && DemandedOp1 != Op1)) {
6679 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
6680 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
6692 switch (
Op.getOpcode()) {
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1.
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
static SDValue PerformVSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file contains the declarations of entities that describe floating point environment and related ...
Module.h This file contains the declarations for the Module class.
mir Rename Register Operands
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
Register const TargetRegisterInfo * TRI
NVPTX address space definition.
static bool shouldConvertToIndirectCall(const CallBase *CB, const GlobalAddressSDNode *Func)
static SDValue combineADDRSPACECAST(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< bool > sched4reg("nvptx-sched4reg", cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false))
static SDValue PerformEXTRACTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< NVPTX::DivPrecisionLevel > UsePrecDivF32("nvptx-prec-divf32", cl::Hidden, cl::desc("NVPTX Specific: Override the precision of the lowering for f32 fdiv"), cl::values(clEnumValN(NVPTX::DivPrecisionLevel::Approx, "0", "Use div.approx"), clEnumValN(NVPTX::DivPrecisionLevel::Full, "1", "Use div.full"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754, "2", "Use IEEE Compliant F32 div.rnd if available (default)"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754_NoFTZ, "3", "Use IEEE Compliant F32 div.rnd if available, no FTZ")), cl::init(NVPTX::DivPrecisionLevel::IEEE754))
static bool isConstOne(const SDValue &Operand)
static cl::opt< unsigned > FMAContractLevelOpt("nvptx-fma-level", cl::Hidden, cl::desc("NVPTX Specific: FMA contraction (0: don't do it" " 1: do it 2: do it aggressively"), cl::init(2))
static bool IsPTXVectorType(MVT VT)
static void ReplaceTcgen05Ld(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results, bool hasOffset=false)
static SDValue lowerLOADi1(LoadSDNode *LD, SelectionDAG &DAG)
static MachinePointerInfo refinePtrAS(SDValue &Ptr, SelectionDAG &DAG, const DataLayout &DL, const TargetLowering &TL)
static SDValue lowerROT(SDValue Op, SelectionDAG &DAG)
static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, LLVMContext &Ctx, CallingConv::ID CallConv, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > &Offsets, uint64_t StartingOffset=0)
ComputePTXValueVTs - For the given Type Ty, returns the set of primitive legal-ish MVTs that compose ...
static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static void replaceAtomicSwap128(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI, SmallVectorImpl< SDValue > &Results)
static SDValue lowerSTOREVector(SDValue Op, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static SDValue lowerLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static void replaceProxyReg(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &Results)
static void ReplaceCopyFromReg_128(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static SDValue lowerCTLZCTPOP(SDValue Op, SelectionDAG &DAG)
static SDValue combineMADConstOne(SDValue X, SDValue Add, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static SDValue combinePRMT(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue combinePackingMovIntoStore(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned Front, unsigned Back)
Fold packing movs into a store.
static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static SDValue getBuildVectorizedValue(unsigned N, const SDLoc &dl, SelectionDAG &DAG, T GetElement)
static SDValue getExtractVectorizedValue(SDValue V, unsigned I, EVT VT, const SDLoc &dl, SelectionDAG &DAG)
static unsigned canMergeParamLoadStoresStartingAt(unsigned Idx, uint32_t AccessSize, const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment)
static EVT getVectorizedVT(EVT VT, unsigned N, LLVMContext &C)
static SDValue lowerIntrinsicWOChain(SDValue Op, SelectionDAG &DAG)
static SDValue combineMulWide(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static bool isConstZero(const SDValue &Operand)
static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG)
static bool IsMulWideOperandDemotable(SDValue Op, unsigned OptSize, OperandSignedness &S)
IsMulWideOperandDemotable - Checks if the provided DAG node is an operand that can be demoted to OptS...
static std::pair< APInt, APInt > getPRMTDemandedBits(const APInt &SelectorVal, const APInt &DemandedBits)
static APInt computePRMT(APInt A, APInt B, APInt Selector, unsigned Mode)
static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode)
static SDValue PerformREMCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue PerformMULCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI)
static void computeKnownBitsForPRMT(const SDValue Op, KnownBits &Known, const SelectionDAG &DAG, unsigned Depth)
static SDValue combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Fold unpacking movs into a load by increasing the number of return values.
static SDValue LowerClusterLaunchControlQueryCancel(SDValue Op, SelectionDAG &DAG)
static std::optional< NVPTXISD::NodeType > getScalar3OpcodeForReduction(unsigned ReductionOpcode)
Get 3-input scalar reduction opcode.
static std::optional< std::pair< SDValue, SDValue > > replaceLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
replaceLoadVector - Convert vector loads into multi-output scalar loads.
static SDValue expandFSH64(SDValue A, SDValue B, SDValue ShiftAmount, SDLoc DL, unsigned Opcode, SelectionDAG &DAG)
static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS, unsigned OptSize, bool &IsSigned)
AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can be demoted to OptSize bits...
static SDValue TryMULWIDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply of M/2 bits that produces...
static SDValue lowerPrmtIntrinsic(SDValue Op, SelectionDAG &DAG)
static SDValue LowerTcgen05St(SDValue Op, SelectionDAG &DAG)
static SDValue combineMulSelectConstOne(SDValue X, SDValue Select, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static SDValue buildTreeReduction(const SmallVector< SDValue > &Elements, EVT EltTy, ArrayRef< std::pair< unsigned, unsigned > > Ops, const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG)
Reduces the elements using the scalar operations provided.
static SDValue combineProxyReg(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SmallVector< unsigned, 16 > VectorizePTXValueVTs(const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment, bool IsVAArg=false)
static SDValue getPRMT(SDValue A, SDValue B, SDValue Selector, SDLoc DL, SelectionDAG &DAG, unsigned Mode=NVPTX::PTXPrmtMode::NONE)
static SDValue matchMADConstOnePattern(SDValue Add)
static SDValue LowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG)
static SDValue correctParamType(SDValue V, EVT ExpectedVT, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, SDLoc dl)
static ISD::NodeType getExtOpcode(const ISD::ArgFlagsTy &Flags)
static cl::opt< bool > UsePrecSqrtF32("nvptx-prec-sqrtf32", cl::Hidden, cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."), cl::init(true))
static void computeKnownBitsForLoadV(const SDValue Op, KnownBits &Known)
static APInt getPRMTSelector(const APInt &Selector, unsigned Mode)
static EVT promoteScalarIntegerPTX(const EVT VT)
PromoteScalarIntegerPTX Used to make sure the arguments/returns are suitable for passing and promote ...
static SDValue simplifyDemandedBitsForPRMT(SDValue PRMT, const APInt &DemandedBits, SelectionDAG &DAG, const TargetLowering &TLI, unsigned Depth)
static SDValue lowerFREM(SDValue Op, SelectionDAG &DAG)
static SDValue canonicalizePRMTInput(SDValue Op, SelectionDAG &DAG)
static SDValue sinkProxyReg(SDValue R, SDValue Chain, TargetLowering::DAGCombinerInfo &DCI)
static SDValue lowerFSH(SDValue Op, SelectionDAG &DAG)
static SDValue PromoteBinOpToF32(SDNode *N, SelectionDAG &DAG)
static SDValue PerformSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned int SmVersion)
static std::optional< std::pair< unsigned int, MVT > > getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI, unsigned AddressSpace)
static cl::opt< bool > ForceMinByValParamAlign("nvptx-force-min-byval-param-align", cl::Hidden, cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval" " params of device functions."), cl::init(false))
static cl::opt< bool > UseApproxLog2F32("nvptx-approx-log2f32", cl::desc("NVPTX Specific: whether to use lg2.approx for log2"), cl::init(false))
Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it does NOT use lg2....
static SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG)
static SDValue combineLOAD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue combineSTORE(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue PerformSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
MachineInstr unsigned OpIdx
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Class for arbitrary precision integers.
LLVM_ABI APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
bool slt(const APInt &RHS) const
Signed less than comparison.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
an instruction that atomically reads a memory location, combines it with another value,...
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
bool isFloatingPointOperation() const
BinOp getOperation() const
This is an SDNode representing atomic operations.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
FunctionType * getFunctionType() const
This class represents a function call, abstracting a target machine's calling convention.
const APInt & getAPIntValue() const
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Diagnostic information for unsupported feature in backend.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Common base class shared among various IRBuilders.
FenceInst * CreateFence(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, const Twine &Name="")
This is an important class for using LLVM in a threaded context.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
MCSection * getDataSection() const
Instances of this class represent a uniqued identifier for a section in the current translation unit.
StringRef getName() const
getName - Get the symbol name.
static auto integer_fixedlen_vector_valuetypes()
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
static auto fp_fixedlen_vector_valuetypes()
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
EVT getMemoryVT() const
Return the type of the in-memory value.
static unsigned getFromTypeWidthForLoad(const MemSDNode *Mem)
unsigned getMaxRequiredAlignment() const
bool hasAtomMinMax64() const
bool hasAtomAddF64() const
const NVPTXTargetLowering * getTargetLowering() const override
unsigned getMinCmpXchgSizeInBits() const
unsigned getPTXVersion() const
bool hasNativeBF16Support(int Opcode) const
const NVPTXRegisterInfo * getRegisterInfo() const override
unsigned int getSmVersion() const
bool hasAtomBitwise64() const
bool hasAtomSwap128() const
bool hasF32x2Instructions() const
bool allowFP16Math() const
bool has256BitVectorLoadStore(unsigned AS) const
AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const override
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const NVPTXTargetMachine * nvTM
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
NVPTXTargetLowering(const NVPTXTargetMachine &TM, const NVPTXSubtarget &STI)
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, std::optional< unsigned > FirstVAArg, const CallBase &CB, unsigned UniqueCallSite) const
unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT, EVT ToVT) const override
bool useF32FTZ(const MachineFunction &MF) const
SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const
Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
std::string getParamName(const Function *F, int Idx) const
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
NVPTX::DivPrecisionLevel getDivF32Level(const MachineFunction &MF, const SDNode &N) const
bool shouldInsertFencesForAtomic(const Instruction *) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL) const
getFunctionParamOptimizedAlign - since function arguments are passed via .param space,...
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, EVT VT) const override
Return the ValueType of the result of SETCC operations.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Align getFunctionByValParamAlign(const Function *F, Type *ArgTy, Align InitialAlign, const DataLayout &DL) const
Helper for computing alignment of a device function byval parameter.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const
bool usePrecSqrtF32(const SDNode *N=nullptr) const
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
UniqueStringSaver & getStrPool() const
MCSection * SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const override
~NVPTXTargetObjectFile() override
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
bool hasOneUse() const
Return true if there is exactly one use of this node.
unsigned getIROrder() const
Return the node ordering.
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
iterator_range< user_iterator > users()
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
SectionKind - This is a simple POD value that classifies the properties of a section.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrNegativeOneBooleanContent
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
MCSymbol * getSymbol(const GlobalValue *GV) const
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
StringRef save(const char *S)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
int getNumOccurrences() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt pow(const APInt &X, int64_t N)
Compute X^N for N>=0.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
@ Bitcast
Perform the operation on a different, but equivalently sized type.
@ ADDRESS_SPACE_SHARED_CLUSTER
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_IS_CANCELED
@ CALL
This node represents a PTX call instruction.
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X
@ UNPACK_VECTOR
This node is the inverse of NVPTX::BUILD_VECTOR.
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y
@ DeclareScalarParam
These nodes represent a parameter declaration.
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z
@ ATOMIC_CMP_SWAP_B128
These nodes are used to lower atomic instructions with i128 type.
@ BUILD_VECTOR
This node is similar to ISD::BUILD_VECTOR except that the output may be implicitly bitcast to a scala...
bool isPackedVectorTy(EVT VT)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
bool isReleaseOrStronger(AtomicOrdering AO)
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
unsigned promoteScalarArgumentSize(unsigned size)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool shouldPassAsArray(Type *Ty)
CodeGenOptLevel
Code generation optimization level.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
bool isKernelFunction(const Function &F)
Function * getMaybeBitcastedCallee(const CallBase *CB)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
ElementCount getVectorElementCount() const
bool is32BitVector() const
Return true if this is a 32-bit vector type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
LLVM_ABI const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
void resetAll()
Resets the known state of all bits.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasAllowContract() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
bool isAfterLegalizeDAG() const
bool isBeforeLegalize() const
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)