34#include "llvm/IR/IntrinsicsWebAssembly.h"
41#define DEBUG_TYPE "wasm-lower"
46 auto MVTPtr = Subtarget->
hasAddr64() ? MVT::i64 : MVT::i32;
56 Subtarget->
hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
85 for (
auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
90 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
103 for (
auto T : {MVT::externref, MVT::funcref, MVT::Other}) {
124 for (
auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
160 for (
auto T : {MVT::i32, MVT::i64})
163 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
176 for (
auto T : {MVT::i32, MVT::i64})
211 for (
auto T : {MVT::v16i8, MVT::v8i16})
215 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
219 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
227 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
232 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
238 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
243 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
251 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
258 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
263 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
273 for (
auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
279 for (
auto T : {MVT::v4f32, MVT::v2f64})
289 for (
auto T : {MVT::v2i64, MVT::v2f64})
313 for (
auto T : {MVT::i8, MVT::i16, MVT::i32})
329 for (
auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
347 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
350 if (
MVT(
T) != MemT) {
388 setLibcallName(RTLIB::RETURN_ADDRESS,
"emscripten_return_address");
399 return MVT::externref;
408 return MVT::externref;
414bool WebAssemblyTargetLowering::shouldExpandPartialReductionIntrinsic(
416 if (
I->getIntrinsicID() != Intrinsic::experimental_vector_partial_reduce_add)
420 auto Op1 =
I->getOperand(1);
422 if (
auto *InputInst = dyn_cast<Instruction>(Op1)) {
426 if (isa<Instruction>(InputInst->getOperand(0)) &&
427 isa<Instruction>(InputInst->getOperand(1))) {
429 if (cast<Instruction>(InputInst->getOperand(0))->getOpcode() !=
430 cast<Instruction>(InputInst->getOperand(1))->getOpcode())
444WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(
AtomicRMWInst *AI)
const {
460bool WebAssemblyTargetLowering::shouldScalarizeBinop(
SDValue VecOp)
const {
480FastISel *WebAssemblyTargetLowering::createFastISel(
485MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(
const DataLayout & ,
496 "32-bit shift counts ought to be enough for anyone");
501 "Unable to represent scalar shift amount type");
511 bool IsUnsigned,
bool Int64,
512 bool Float64,
unsigned LoweredOpcode) {
518 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
519 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
520 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
521 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
522 unsigned IConst =
Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
523 unsigned Eqz = WebAssembly::EQZ_I32;
524 unsigned And = WebAssembly::AND_I32;
526 int64_t Substitute = IsUnsigned ? 0 : Limit;
527 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(
double)Limit;
538 F->insert(It, FalseMBB);
539 F->insert(It, TrueMBB);
540 F->insert(It, DoneMBB);
543 DoneMBB->
splice(DoneMBB->
begin(), BB, std::next(
MI.getIterator()), BB->
end());
551 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
552 Tmp0 =
MRI.createVirtualRegister(
MRI.getRegClass(InReg));
553 Tmp1 =
MRI.createVirtualRegister(
MRI.getRegClass(InReg));
554 CmpReg =
MRI.createVirtualRegister(&WebAssembly::I32RegClass);
555 EqzReg =
MRI.createVirtualRegister(&WebAssembly::I32RegClass);
556 FalseReg =
MRI.createVirtualRegister(
MRI.getRegClass(OutReg));
557 TrueReg =
MRI.createVirtualRegister(
MRI.getRegClass(OutReg));
559 MI.eraseFromParent();
568 .
addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
573 Tmp1 =
MRI.createVirtualRegister(
MRI.getRegClass(InReg));
575 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
576 Register AndReg =
MRI.createVirtualRegister(&WebAssembly::I32RegClass);
578 .
addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
620 unsigned Eqz =
Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32;
621 unsigned MemoryCopy =
622 Int64 ? WebAssembly::MEMORY_COPY_A64 : WebAssembly::MEMORY_COPY_A32;
633 F->insert(It, TrueMBB);
634 F->insert(It, DoneMBB);
637 DoneMBB->
splice(DoneMBB->
begin(), BB, std::next(
MI.getIterator()), BB->
end());
647 EqzReg =
MRI.createVirtualRegister(&WebAssembly::I32RegClass);
650 MI.eraseFromParent();
688 unsigned Eqz =
Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32;
689 unsigned MemoryFill =
690 Int64 ? WebAssembly::MEMORY_FILL_A64 : WebAssembly::MEMORY_FILL_A32;
701 F->insert(It, TrueMBB);
702 F->insert(It, DoneMBB);
705 DoneMBB->
splice(DoneMBB->
begin(), BB, std::next(
MI.getIterator()), BB->
end());
715 EqzReg =
MRI.createVirtualRegister(&WebAssembly::I32RegClass);
718 MI.eraseFromParent();
740 CallResults.
getOpcode() == WebAssembly::RET_CALL_RESULTS);
744 bool IsRetCall = CallResults.
getOpcode() == WebAssembly::RET_CALL_RESULTS;
746 bool IsFuncrefCall =
false;
752 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass);
757 if (IsIndirect && IsRetCall) {
758 CallOp = WebAssembly::RET_CALL_INDIRECT;
759 }
else if (IsIndirect) {
760 CallOp = WebAssembly::CALL_INDIRECT;
761 }
else if (IsRetCall) {
762 CallOp = WebAssembly::RET_CALL;
764 CallOp = WebAssembly::CALL;
793 for (
auto Def : CallResults.
defs())
816 for (
auto Use : CallParams.
uses())
832 if (IsIndirect && IsFuncrefCall) {
844 BuildMI(MF,
DL,
TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref);
848 BuildMI(MF,
DL,
TII.get(WebAssembly::TABLE_SET_FUNCREF))
863 switch (
MI.getOpcode()) {
866 case WebAssembly::FP_TO_SINT_I32_F32:
868 WebAssembly::I32_TRUNC_S_F32);
869 case WebAssembly::FP_TO_UINT_I32_F32:
871 WebAssembly::I32_TRUNC_U_F32);
872 case WebAssembly::FP_TO_SINT_I64_F32:
874 WebAssembly::I64_TRUNC_S_F32);
875 case WebAssembly::FP_TO_UINT_I64_F32:
877 WebAssembly::I64_TRUNC_U_F32);
878 case WebAssembly::FP_TO_SINT_I32_F64:
880 WebAssembly::I32_TRUNC_S_F64);
881 case WebAssembly::FP_TO_UINT_I32_F64:
883 WebAssembly::I32_TRUNC_U_F64);
884 case WebAssembly::FP_TO_SINT_I64_F64:
886 WebAssembly::I64_TRUNC_S_F64);
887 case WebAssembly::FP_TO_UINT_I64_F64:
889 WebAssembly::I64_TRUNC_U_F64);
890 case WebAssembly::MEMCPY_A32:
892 case WebAssembly::MEMCPY_A64:
894 case WebAssembly::MEMSET_A32:
896 case WebAssembly::MEMSET_A64:
898 case WebAssembly::CALL_RESULTS:
899 case WebAssembly::RET_CALL_RESULTS:
905WebAssemblyTargetLowering::getTargetNodeName(
unsigned Opcode)
const {
909#define HANDLE_NODETYPE(NODE) \
910 case WebAssemblyISD::NODE: \
911 return "WebAssemblyISD::" #NODE;
912#include "WebAssemblyISD.def"
913#undef HANDLE_NODETYPE
918std::pair<unsigned, const TargetRegisterClass *>
919WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
923 if (Constraint.
size() == 1) {
924 switch (Constraint[0]) {
926 assert(VT != MVT::iPTR &&
"Pointer MVT not expected here");
929 return std::make_pair(0U, &WebAssembly::V128RegClass);
933 return std::make_pair(0U, &WebAssembly::I32RegClass);
935 return std::make_pair(0U, &WebAssembly::I64RegClass);
940 return std::make_pair(0U, &WebAssembly::F32RegClass);
942 return std::make_pair(0U, &WebAssembly::F64RegClass);
956bool WebAssemblyTargetLowering::isCheapToSpeculateCttz(
Type *Ty)
const {
961bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz(
Type *Ty)
const {
966bool WebAssemblyTargetLowering::isLegalAddressingMode(
const DataLayout &
DL,
968 Type *Ty,
unsigned AS,
984bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
998bool WebAssemblyTargetLowering::isIntDivCheap(
EVT VT,
1005bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(
SDValue ExtVal)
const {
1007 EVT MemT = cast<LoadSDNode>(ExtVal->
getOperand(0))->getValueType(0);
1008 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
1009 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
1010 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
1013bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
1020EVT WebAssemblyTargetLowering::getSetCCResultType(
const DataLayout &
DL,
1033bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1036 unsigned Intrinsic)
const {
1037 switch (Intrinsic) {
1038 case Intrinsic::wasm_memory_atomic_notify:
1040 Info.memVT = MVT::i32;
1041 Info.ptrVal =
I.getArgOperand(0);
1052 case Intrinsic::wasm_memory_atomic_wait32:
1054 Info.memVT = MVT::i32;
1055 Info.ptrVal =
I.getArgOperand(0);
1060 case Intrinsic::wasm_memory_atomic_wait64:
1062 Info.memVT = MVT::i64;
1063 Info.ptrVal =
I.getArgOperand(0);
1068 case Intrinsic::wasm_loadf16_f32:
1070 Info.memVT = MVT::f16;
1071 Info.ptrVal =
I.getArgOperand(0);
1076 case Intrinsic::wasm_storef16_f32:
1078 Info.memVT = MVT::f16;
1079 Info.ptrVal =
I.getArgOperand(1);
1089void WebAssemblyTargetLowering::computeKnownBitsForTargetNode(
1092 switch (
Op.getOpcode()) {
1096 unsigned IntNo =
Op.getConstantOperandVal(0);
1100 case Intrinsic::wasm_bitmask: {
1102 EVT VT =
Op.getOperand(1).getSimpleValueType();
1105 Known.
Zero |= ZeroMask;
1114WebAssemblyTargetLowering::getPreferredVectorAction(
MVT VT)
const {
1120 if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
1121 EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64)
1128bool WebAssemblyTargetLowering::shouldSimplifyDemandedVectorElts(
1129 SDValue Op,
const TargetLoweringOpt &TLO)
const {
1182WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
1194 "WebAssembly doesn't support language-specific or target-specific "
1195 "calling conventions yet");
1196 if (CLI.IsPatchPoint)
1197 fail(
DL, DAG,
"WebAssembly doesn't support patch point yet");
1199 if (CLI.IsTailCall) {
1200 auto NoTail = [&](
const char *Msg) {
1201 if (CLI.CB && CLI.CB->isMustTailCall())
1203 CLI.IsTailCall =
false;
1207 NoTail(
"WebAssembly 'tail-call' feature not enabled");
1211 NoTail(
"WebAssembly does not support varargs tail calls");
1221 bool TypesMatch = CallerRetTys.
size() == CalleeRetTys.
size() &&
1222 std::equal(CallerRetTys.
begin(), CallerRetTys.
end(),
1223 CalleeRetTys.
begin());
1225 NoTail(
"WebAssembly tail call requires caller and callee return types to "
1230 for (
auto &Arg : CLI.CB->args()) {
1231 Value *Val = Arg.get();
1235 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(Src))
1236 Src =
GEP->getPointerOperand();
1241 if (isa<AllocaInst>(Val)) {
1243 "WebAssembly does not support tail calling with stack arguments");
1258 Outs[0].Flags.isSRet()) {
1263 bool HasSwiftSelfArg =
false;
1264 bool HasSwiftErrorArg =
false;
1266 for (
unsigned I = 0;
I < Outs.
size(); ++
I) {
1272 fail(
DL, DAG,
"WebAssembly hasn't implemented nest arguments");
1274 fail(
DL, DAG,
"WebAssembly hasn't implemented inalloca arguments");
1276 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs arguments");
1278 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs last arguments");
1287 Chain = DAG.
getMemcpy(Chain,
DL, FINode, OutVal, SizeNode,
1298 bool IsVarArg = CLI.IsVarArg;
1306 if (!HasSwiftSelfArg) {
1310 CLI.Outs.push_back(Arg);
1312 CLI.OutVals.push_back(ArgVal);
1314 if (!HasSwiftErrorArg) {
1318 CLI.Outs.push_back(Arg);
1320 CLI.OutVals.push_back(ArgVal);
1335 assert(VT != MVT::iPTR &&
"Legalized args should be concrete");
1340 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
1347 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
1350 if (IsVarArg && NumBytes) {
1353 MaybeAlign StackAlign = Layout.getStackAlignment();
1354 assert(StackAlign &&
"data layout string is missing stack alignment");
1360 assert(ArgLocs[ValNo].getValNo() == ValNo &&
1361 "ArgLocs should remain in order and only hold varargs args");
1362 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
1370 if (!Chains.
empty())
1372 }
else if (IsVarArg) {
1402 for (
const auto &In : Ins) {
1403 assert(!
In.Flags.isByVal() &&
"byval is not valid for return values");
1404 assert(!
In.Flags.isNest() &&
"nest is not valid for return values");
1405 if (
In.Flags.isInAlloca())
1406 fail(
DL, DAG,
"WebAssembly hasn't implemented inalloca return values");
1407 if (
In.Flags.isInConsecutiveRegs())
1408 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs return values");
1409 if (
In.Flags.isInConsecutiveRegsLast())
1411 "WebAssembly hasn't implemented cons regs last return values");
1420 CLI.CB->getCalledOperand()->getType())) {
1435 WebAssemblyISD::TABLE_SET,
DL, DAG.
getVTList(MVT::Other), TableSetOps,
1440 CLI.CB->getCalledOperand()->getPointerAlignment(DAG.
getDataLayout()),
1446 if (CLI.IsTailCall) {
1449 return DAG.
getNode(WebAssemblyISD::RET_CALL,
DL, NodeTys, Ops);
1456 for (
size_t I = 0;
I <
Ins.size(); ++
I)
1463bool WebAssemblyTargetLowering::CanLowerReturn(
1472SDValue WebAssemblyTargetLowering::LowerReturn(
1478 "MVP WebAssembly can only return up to one value");
1480 fail(
DL, DAG,
"WebAssembly doesn't support non-C calling conventions");
1483 RetOps.append(OutVals.
begin(), OutVals.
end());
1484 Chain = DAG.
getNode(WebAssemblyISD::RETURN,
DL, MVT::Other, RetOps);
1490 assert(Out.
IsFixed &&
"non-fixed return value is not valid");
1492 fail(
DL, DAG,
"WebAssembly hasn't implemented inalloca results");
1494 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs results");
1496 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs last results");
1502SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1507 fail(
DL, DAG,
"WebAssembly doesn't support non-C calling conventions");
1516 bool HasSwiftErrorArg =
false;
1517 bool HasSwiftSelfArg =
false;
1519 HasSwiftSelfArg |=
In.Flags.isSwiftSelf();
1520 HasSwiftErrorArg |=
In.Flags.isSwiftError();
1521 if (
In.Flags.isInAlloca())
1522 fail(
DL, DAG,
"WebAssembly hasn't implemented inalloca arguments");
1523 if (
In.Flags.isNest())
1524 fail(
DL, DAG,
"WebAssembly hasn't implemented nest arguments");
1525 if (
In.Flags.isInConsecutiveRegs())
1526 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs arguments");
1527 if (
In.Flags.isInConsecutiveRegsLast())
1528 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs last arguments");
1537 MFI->addParam(
In.VT);
1546 if (!HasSwiftSelfArg) {
1547 MFI->addParam(PtrVT);
1549 if (!HasSwiftErrorArg) {
1550 MFI->addParam(PtrVT);
1559 MFI->setVarargBufferVreg(VarargVreg);
1561 Chain,
DL, VarargVreg,
1562 DAG.
getNode(WebAssemblyISD::ARGUMENT,
DL, PtrVT,
1564 MFI->addParam(PtrVT);
1576 assert(MFI->getParams().size() == Params.
size() &&
1577 std::equal(MFI->getParams().begin(), MFI->getParams().end(),
1583void WebAssemblyTargetLowering::ReplaceNodeResults(
1585 switch (
N->getOpcode()) {
1599 Results.push_back(Replace128Op(
N, DAG));
1603 "ReplaceNodeResults not implemented for this op for WebAssembly!");
1614 switch (
Op.getOpcode()) {
1619 return LowerFrameIndex(
Op, DAG);
1621 return LowerGlobalAddress(
Op, DAG);
1623 return LowerGlobalTLSAddress(
Op, DAG);
1625 return LowerExternalSymbol(
Op, DAG);
1627 return LowerJumpTable(
Op, DAG);
1629 return LowerBR_JT(
Op, DAG);
1631 return LowerVASTART(
Op, DAG);
1634 fail(
DL, DAG,
"WebAssembly hasn't implemented computed gotos");
1637 return LowerRETURNADDR(
Op, DAG);
1639 return LowerFRAMEADDR(
Op, DAG);
1641 return LowerCopyToReg(
Op, DAG);
1644 return LowerAccessVectorElement(
Op, DAG);
1648 return LowerIntrinsic(
Op, DAG);
1650 return LowerSIGN_EXTEND_INREG(
Op, DAG);
1653 return LowerEXTEND_VECTOR_INREG(
Op, DAG);
1655 return LowerBUILD_VECTOR(
Op, DAG);
1657 return LowerVECTOR_SHUFFLE(
Op, DAG);
1659 return LowerSETCC(
Op, DAG);
1663 return LowerShift(
Op, DAG);
1666 return LowerFP_TO_INT_SAT(
Op, DAG);
1668 return LowerLoad(
Op, DAG);
1670 return LowerStore(
Op, DAG);
1679 return LowerMUL_LOHI(
Op, DAG);
1694 return std::nullopt;
1727 return DAG.
getNode(WebAssemblyISD::LOCAL_SET,
DL, Tys, Ops);
1732 "Encountered an unlowerable store to the wasm_var address space",
1748 "unexpected offset when loading from webassembly global",
false);
1759 "unexpected offset when loading from webassembly local",
false);
1766 assert(
Result->getNumValues() == 2 &&
"Loads must carry a chain!");
1772 "Encountered an unlowerable load from the wasm_var address space",
1781 assert(
Op.getValueType() == MVT::i64);
1784 switch (
Op.getOpcode()) {
1786 Opcode = WebAssemblyISD::I64_MUL_WIDE_U;
1789 Opcode = WebAssemblyISD::I64_MUL_WIDE_S;
1806 assert(
N->getValueType(0) == MVT::i128);
1809 switch (
N->getOpcode()) {
1811 Opcode = WebAssemblyISD::I64_ADD128;
1814 Opcode = WebAssemblyISD::I64_SUB128;
1829 LHS_0, LHS_1, RHS_0, RHS_1);
1837 if (isa<FrameIndexSDNode>(Src.getNode())) {
1845 Register Reg = cast<RegisterSDNode>(
Op.getOperand(1))->getReg();
1846 EVT VT = Src.getValueType();
1848 : WebAssembly::COPY_I64,
1851 return Op.getNode()->getNumValues() == 1
1862 int FI = cast<FrameIndexSDNode>(
Op)->getIndex();
1872 "Non-Emscripten WebAssembly hasn't implemented "
1873 "__builtin_return_address");
1880 unsigned Depth =
Op.getConstantOperandVal(0);
1881 MakeLibCallOptions CallOptions;
1882 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS,
Op.getValueType(),
1883 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions,
DL)
1892 if (
Op.getConstantOperandVal(0) > 0)
1896 EVT VT =
Op.getValueType();
1903WebAssemblyTargetLowering::LowerGlobalTLSAddress(
SDValue Op,
1906 const auto *GA = cast<GlobalAddressSDNode>(
Op);
1933 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1934 : WebAssembly::GLOBAL_GET_I32;
1945 DAG.
getNode(WebAssemblyISD::WrapperREL,
DL, PtrVT, TLSOffset);
1952 EVT VT =
Op.getValueType();
1953 return DAG.
getNode(WebAssemblyISD::Wrapper,
DL, VT,
1962 const auto *GA = cast<GlobalAddressSDNode>(
Op);
1963 EVT VT =
Op.getValueType();
1965 "Unexpected target flags on generic GlobalAddressSDNode");
1967 fail(
DL, DAG,
"Invalid address space for WebAssembly target");
1978 const char *BaseName;
1987 DAG.
getNode(WebAssemblyISD::Wrapper,
DL, PtrVT,
1991 WebAssemblyISD::WrapperREL,
DL, VT,
2000 return DAG.
getNode(WebAssemblyISD::Wrapper,
DL, VT,
2006WebAssemblyTargetLowering::LowerExternalSymbol(
SDValue Op,
2009 const auto *ES = cast<ExternalSymbolSDNode>(
Op);
2010 EVT VT =
Op.getValueType();
2011 assert(ES->getTargetFlags() == 0 &&
2012 "Unexpected target flags on generic ExternalSymbolSDNode");
2013 return DAG.
getNode(WebAssemblyISD::Wrapper,
DL, VT,
2024 JT->getTargetFlags());
2031 const auto *
JT = cast<JumpTableSDNode>(
Op.getOperand(1));
2033 assert(
JT->getTargetFlags() == 0 &&
"WebAssembly doesn't set target flags");
2043 for (
auto *
MBB : MBBs)
2050 return DAG.
getNode(WebAssemblyISD::BR_TABLE,
DL, MVT::Other, Ops);
2059 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
2062 MFI->getVarargBufferVreg(), PtrVT);
2063 return DAG.
getStore(
Op.getOperand(0),
DL, ArgN,
Op.getOperand(1),
2071 if (
N->getConstantOperandVal(0) !=
2072 Intrinsic::experimental_vector_partial_reduce_add)
2075 assert(
N->getValueType(0) == MVT::v4i32 &&
"can only support v4i32");
2084 "expected widening mul");
2086 "expected mul to use the same extend for both operands");
2096 ExtendInLHS, ExtendInRHS);
2100 unsigned LowOpc = WebAssemblyISD::EXTEND_LOW_U;
2101 unsigned HighOpc = WebAssemblyISD::EXTEND_HIGH_U;
2115 "expected v16i8 input types");
2119 unsigned LowOpc = WebAssemblyISD::EXTEND_LOW_S;
2120 unsigned HighOpc = WebAssemblyISD::EXTEND_HIGH_S;
2126 DAG.
getNode(WebAssemblyISD::DOT,
DL, MVT::v4i32, LowLHS, LowRHS);
2128 DAG.
getNode(WebAssemblyISD::DOT,
DL, MVT::v4i32, HighLHS, HighRHS);
2133 unsigned LowOpc = WebAssemblyISD::EXTEND_LOW_U;
2134 unsigned HighOpc = WebAssemblyISD::EXTEND_HIGH_U;
2159 switch (
Op.getOpcode()) {
2162 IntNo =
Op.getConstantOperandVal(1);
2165 IntNo =
Op.getConstantOperandVal(0);
2176 case Intrinsic::wasm_lsda: {
2185 DAG.
getNode(WebAssemblyISD::Wrapper,
DL, PtrVT,
2192 return DAG.
getNode(WebAssemblyISD::Wrapper,
DL, PtrVT,
Node);
2195 case Intrinsic::wasm_shuffle: {
2199 Ops[OpIdx++] =
Op.getOperand(1);
2200 Ops[OpIdx++] =
Op.getOperand(2);
2201 while (OpIdx < 18) {
2202 const SDValue &MaskIdx =
Op.getOperand(OpIdx + 1);
2207 Ops[OpIdx++] = MaskIdx;
2210 return DAG.
getNode(WebAssemblyISD::SHUFFLE,
DL,
Op.getValueType(), Ops);
2213 case Intrinsic::thread_pointer: {
2215 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
2216 : WebAssembly::GLOBAL_GET_I32;
2227WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(
SDValue Op,
2241 const SDValue &Extract =
Op.getOperand(0);
2245 MVT ExtractedLaneT =
2246 cast<VTSDNode>(
Op.getOperand(1).getNode())->
getVT().getSimpleVT();
2249 if (ExtractedVecT == VecT)
2254 if (!isa<ConstantSDNode>(Index))
2256 unsigned IndexVal =
Index->getAsZExtVal();
2270WebAssemblyTargetLowering::LowerEXTEND_VECTOR_INREG(
SDValue Op,
2273 EVT VT =
Op.getValueType();
2275 EVT SrcVT = Src.getValueType();
2282 "Unexpected extension factor.");
2285 if (Scale != 2 && Scale != 4 && Scale != 8)
2289 switch (
Op.getOpcode()) {
2291 Ext = WebAssemblyISD::EXTEND_LOW_U;
2294 Ext = WebAssemblyISD::EXTEND_LOW_S;
2299 while (Scale != 1) {
2302 .widenIntegerVectorElementType(*DAG.
getContext())
2303 .getHalfNumVectorElementsVT(*DAG.
getContext()),
2313 if (
Op.getValueType() != MVT::v2f64)
2317 unsigned &Index) ->
bool {
2318 switch (
Op.getOpcode()) {
2320 Opcode = WebAssemblyISD::CONVERT_LOW_S;
2323 Opcode = WebAssemblyISD::CONVERT_LOW_U;
2326 Opcode = WebAssemblyISD::PROMOTE_LOW;
2332 auto ExtractVector =
Op.getOperand(0);
2336 if (!isa<ConstantSDNode>(ExtractVector.getOperand(1).getNode()))
2339 SrcVec = ExtractVector.getOperand(0);
2340 Index = ExtractVector.getConstantOperandVal(1);
2344 unsigned LHSOpcode, RHSOpcode, LHSIndex, RHSIndex;
2346 if (!GetConvertedLane(
Op.getOperand(0), LHSOpcode, LHSSrcVec, LHSIndex) ||
2347 !GetConvertedLane(
Op.getOperand(1), RHSOpcode, RHSSrcVec, RHSIndex))
2350 if (LHSOpcode != RHSOpcode)
2354 switch (LHSOpcode) {
2355 case WebAssemblyISD::CONVERT_LOW_S:
2356 case WebAssemblyISD::CONVERT_LOW_U:
2357 ExpectedSrcVT = MVT::v4i32;
2359 case WebAssemblyISD::PROMOTE_LOW:
2360 ExpectedSrcVT = MVT::v4f32;
2366 auto Src = LHSSrcVec;
2367 if (LHSIndex != 0 || RHSIndex != 1 || LHSSrcVec != RHSSrcVec) {
2370 ExpectedSrcVT,
DL, LHSSrcVec, RHSSrcVec,
2371 {
static_cast<int>(LHSIndex),
static_cast<int>(RHSIndex) + 4, -1, -1});
2373 return DAG.
getNode(LHSOpcode,
DL, MVT::v2f64, Src);
2378 MVT VT =
Op.getSimpleValueType();
2379 if (VT == MVT::v8f16) {
2394 const EVT VecT =
Op.getValueType();
2395 const EVT LaneT =
Op.getOperand(0).getValueType();
2397 bool CanSwizzle = VecT == MVT::v16i8;
2418 auto GetSwizzleSrcs = [](
size_t I,
const SDValue &Lane) {
2433 Index->getConstantOperandVal(1) !=
I)
2435 return std::make_pair(SwizzleSrc, SwizzleIndices);
2442 auto GetShuffleSrc = [&](
const SDValue &Lane) {
2445 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
2447 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
2453 using ValueEntry = std::pair<SDValue, size_t>;
2456 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>,
size_t>;
2459 using ShuffleEntry = std::pair<SDValue, size_t>;
2462 auto AddCount = [](
auto &Counts,
const auto &Val) {
2464 llvm::find_if(Counts, [&Val](
auto E) {
return E.first == Val; });
2465 if (CountIt == Counts.end()) {
2466 Counts.emplace_back(Val, 1);
2472 auto GetMostCommon = [](
auto &Counts) {
2475 assert(CommonIt != Counts.end() &&
"Unexpected all-undef build_vector");
2479 size_t NumConstantLanes = 0;
2482 for (
size_t I = 0;
I < Lanes; ++
I) {
2487 AddCount(SplatValueCounts, Lane);
2491 if (
auto ShuffleSrc = GetShuffleSrc(Lane))
2492 AddCount(ShuffleCounts, ShuffleSrc);
2494 auto SwizzleSrcs = GetSwizzleSrcs(
I, Lane);
2495 if (SwizzleSrcs.first)
2496 AddCount(SwizzleCounts, SwizzleSrcs);
2501 size_t NumSplatLanes;
2502 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
2506 size_t NumSwizzleLanes = 0;
2507 if (SwizzleCounts.
size())
2508 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
2509 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
2513 SDValue ShuffleSrc1, ShuffleSrc2;
2514 size_t NumShuffleLanes = 0;
2515 if (ShuffleCounts.
size()) {
2516 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
2518 [&](
const auto &Pair) {
return Pair.first == ShuffleSrc1; });
2520 if (ShuffleCounts.
size()) {
2521 size_t AdditionalShuffleLanes;
2522 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
2523 GetMostCommon(ShuffleCounts);
2524 NumShuffleLanes += AdditionalShuffleLanes;
2529 std::function<
bool(
size_t,
const SDValue &)> IsLaneConstructed;
2532 if (NumSwizzleLanes >= NumShuffleLanes &&
2533 NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
2536 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
2537 IsLaneConstructed = [&, Swizzled](
size_t I,
const SDValue &Lane) {
2538 return Swizzled == GetSwizzleSrcs(
I, Lane);
2540 }
else if (NumShuffleLanes >= NumConstantLanes &&
2541 NumShuffleLanes >= NumSplatLanes) {
2551 assert(LaneSize > DestLaneSize);
2552 Scale1 = LaneSize / DestLaneSize;
2558 assert(LaneSize > DestLaneSize);
2559 Scale2 = LaneSize / DestLaneSize;
2564 assert(DestLaneCount <= 16);
2565 for (
size_t I = 0;
I < DestLaneCount; ++
I) {
2567 SDValue Src = GetShuffleSrc(Lane);
2568 if (Src == ShuffleSrc1) {
2570 }
else if (Src && Src == ShuffleSrc2) {
2578 IsLaneConstructed = [&](size_t,
const SDValue &Lane) {
2579 auto Src = GetShuffleSrc(Lane);
2580 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
2582 }
else if (NumConstantLanes >= NumSplatLanes) {
2584 for (
const SDValue &Lane :
Op->op_values()) {
2592 auto *
Const = dyn_cast<ConstantSDNode>(Lane.
getNode());
2593 int64_t Val =
Const ?
Const->getSExtValue() : 0;
2595 assert((LaneBits == 64 || Val >= -(1ll << (LaneBits - 1))) &&
2596 "Unexpected out of bounds negative value");
2597 if (Const && LaneBits != 64 && Val > (1ll << (LaneBits - 1)) - 1) {
2599 auto NewVal = (((
uint64_t)Val & Mask) - (1ll << LaneBits)) &
Mask;
2616 if (NumSplatLanes == 1 &&
Op->getOperand(0) == SplatValue &&
2617 (DestLaneSize == 32 || DestLaneSize == 64)) {
2624 IsLaneConstructed = [&SplatValue](
size_t _,
const SDValue &Lane) {
2625 return Lane == SplatValue;
2630 assert(IsLaneConstructed);
2633 for (
size_t I = 0;
I < Lanes; ++
I) {
2635 if (!Lane.
isUndef() && !IsLaneConstructed(
I, Lane))
2644WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(
SDValue Op,
2649 assert(
VecType.is128BitVector() &&
"Unexpected shuffle vector type");
2650 size_t LaneBytes =
VecType.getVectorElementType().getSizeInBits() / 8;
2655 Ops[OpIdx++] =
Op.getOperand(0);
2656 Ops[OpIdx++] =
Op.getOperand(1);
2659 for (
int M : Mask) {
2660 for (
size_t J = 0; J < LaneBytes; ++J) {
2669 return DAG.
getNode(WebAssemblyISD::SHUFFLE,
DL,
Op.getValueType(), Ops);
2677 assert(
Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64);
2682 auto MakeLane = [&](
unsigned I) {
2688 {MakeLane(0), MakeLane(1)});
2692WebAssemblyTargetLowering::LowerAccessVectorElement(
SDValue Op,
2696 if (isa<ConstantSDNode>(IdxNode)) {
2709 EVT LaneT =
Op.getSimpleValueType().getVectorElementType();
2711 if (LaneT.
bitsGE(MVT::i32))
2715 size_t NumLanes =
Op.getSimpleValueType().getVectorNumElements();
2717 unsigned ShiftOpcode =
Op.getOpcode();
2723 for (
size_t i = 0; i < NumLanes; ++i) {
2726 SDValue ShiftedValue = ShiftedElements[i];
2731 DAG.
getNode(ShiftOpcode,
DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2741 assert(
Op.getSimpleValueType().isVector());
2743 uint64_t LaneBits =
Op.getValueType().getScalarSizeInBits();
2744 auto ShiftVal =
Op.getOperand(1);
2758 MaskVal == MaskBits)
2761 if (!isa<ConstantSDNode>(
RHS.getNode()))
2764 auto ConstantRHS = dyn_cast<ConstantSDNode>(
RHS.getNode());
2765 if (ConstantRHS && ConstantRHS->getAPIntValue() == MaskBits)
2773 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2779 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2784 switch (
Op.getOpcode()) {
2786 Opcode = WebAssemblyISD::VEC_SHL;
2789 Opcode = WebAssemblyISD::VEC_SHR_S;
2792 Opcode = WebAssemblyISD::VEC_SHR_U;
2798 return DAG.
getNode(Opcode,
DL,
Op.getValueType(),
Op.getOperand(0), ShiftVal);
2804 EVT ResT =
Op.getValueType();
2805 EVT SatVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2807 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2808 (SatVT == MVT::i32 || SatVT == MVT::i64))
2811 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2814 if (ResT == MVT::v8i16 && SatVT == MVT::i16)
2825 auto &DAG = DCI.
DAG;
2826 auto Shuffle = cast<ShuffleVectorSDNode>(
N);
2832 SDValue Bitcast =
N->getOperand(0);
2835 if (!
N->getOperand(1).isUndef())
2837 SDValue CastOp = Bitcast.getOperand(0);
2839 EVT DstType = Bitcast.getValueType();
2844 SrcType,
SDLoc(
N), CastOp, DAG.
getUNDEF(SrcType), Shuffle->getMask());
2854 auto &DAG = DCI.
DAG;
2858 EVT InVT =
N->getOperand(0)->getValueType(0);
2859 EVT ResVT =
N->getValueType(0);
2861 if (ResVT == MVT::v4f32 && (InVT == MVT::v4i16 || InVT == MVT::v4i8))
2863 else if (ResVT == MVT::v2f64 && (InVT == MVT::v2i16 || InVT == MVT::v2i8))
2876 auto &DAG = DCI.
DAG;
2882 auto Extract =
N->getOperand(0);
2886 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.
getOperand(1));
2887 if (IndexNode ==
nullptr)
2889 auto Index = IndexNode->getZExtValue();
2893 EVT ResVT =
N->getValueType(0);
2894 if (ResVT == MVT::v8i16) {
2896 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2898 }
else if (ResVT == MVT::v4i32) {
2900 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2902 }
else if (ResVT == MVT::v2i64) {
2904 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
2911 bool IsLow = Index == 0;
2913 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
2914 : WebAssemblyISD::EXTEND_HIGH_S)
2915 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
2916 : WebAssemblyISD::EXTEND_HIGH_U);
2923 auto &DAG = DCI.
DAG;
2925 auto GetWasmConversionOp = [](
unsigned Op) {
2928 return WebAssemblyISD::TRUNC_SAT_ZERO_S;
2930 return WebAssemblyISD::TRUNC_SAT_ZERO_U;
2932 return WebAssemblyISD::DEMOTE_ZERO;
2937 auto IsZeroSplat = [](
SDValue SplatVal) {
2938 auto *
Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode());
2939 APInt SplatValue, SplatUndef;
2940 unsigned SplatBitSize;
2945 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
2963 EVT ExpectedConversionType;
2966 switch (ConversionOp) {
2970 ExpectedConversionType = MVT::v2i32;
2974 ExpectedConversionType = MVT::v2f32;
2980 if (
N->getValueType(0) != ResVT)
2983 if (
Conversion.getValueType() != ExpectedConversionType)
2987 if (Source.getValueType() != MVT::v2f64)
2990 if (!IsZeroSplat(
N->getOperand(1)) ||
2991 N->getOperand(1).getValueType() != ExpectedConversionType)
2994 unsigned Op = GetWasmConversionOp(ConversionOp);
3010 auto ConversionOp =
N->getOpcode();
3011 switch (ConversionOp) {
3023 if (
N->getValueType(0) != ResVT)
3026 auto Concat =
N->getOperand(0);
3027 if (
Concat.getValueType() != MVT::v4f64)
3030 auto Source =
Concat.getOperand(0);
3031 if (Source.getValueType() != MVT::v2f64)
3034 if (!IsZeroSplat(
Concat.getOperand(1)) ||
3035 Concat.getOperand(1).getValueType() != MVT::v2f64)
3038 unsigned Op = GetWasmConversionOp(ConversionOp);
3044 const SDLoc &
DL,
unsigned VectorWidth) {
3052 unsigned ElemsPerChunk = VectorWidth / ElVT.
getSizeInBits();
3057 IdxVal &= ~(ElemsPerChunk - 1);
3062 Vec->
ops().slice(IdxVal, ElemsPerChunk));
3074 EVT SrcVT = In.getValueType();
3092 EVT InVT = MVT::i16, OutVT = MVT::i8;
3097 unsigned SubSizeInBits = SrcSizeInBits / 2;
3099 OutVT =
EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
3125 auto &DAG = DCI.
DAG;
3128 EVT InVT = In.getValueType();
3132 EVT OutVT =
N->getValueType(0);
3139 if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) &&
3140 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && OutVT.
is128BitVector()))
3152 auto &DAG = DCI.
DAG;
3155 EVT VT =
N->getValueType(0);
3156 EVT SrcVT = Src.getValueType();
3163 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
3168 {DAG.getConstant(Intrinsic::wasm_bitmask, DL, MVT::i32),
3169 DAG.getSExtOrTrunc(N->getOperand(0), DL,
3170 SrcVT.changeVectorElementType(Width))}),
3179 auto &DAG = DCI.
DAG;
3185 EVT VT =
N->getValueType(0);
3199 EVT FromVT =
LHS->getOperand(0).getValueType();
3203 : Intrinsic::wasm_alltrue;
3205 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
3211 {DAG.getConstant(Intrin, DL, MVT::i32),
3212 DAG.getSExtOrTrunc(LHS->getOperand(0), DL,
3213 FromVT.changeVectorElementType(Width))}),
3217 Ret = DAG.
getNOT(
DL, Ret, MVT::i1);
3227WebAssemblyTargetLowering::PerformDAGCombine(
SDNode *
N,
3228 DAGCombinerInfo &DCI)
const {
3229 switch (
N->getOpcode()) {
unsigned const MachineRegisterInfo * MRI
static SDValue performTruncateCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
static unsigned NumFixedArgs
unsigned const TargetRegisterInfo * TRI
static SDValue performVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static MachineBasicBlock * LowerFPToInt(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool IsUnsigned, bool Int64, bool Float64, unsigned LoweredOpcode)
static bool callingConvSupported(CallingConv::ID CallConv)
static MachineBasicBlock * LowerMemcpy(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool Int64)
static std::optional< unsigned > IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG)
static SDValue performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG)
static MachineBasicBlock * LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB, const WebAssemblySubtarget *Subtarget, const TargetInstrInfo &TII)
static SDValue performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool IsWebAssemblyGlobal(SDValue Op)
static MachineBasicBlock * LowerMemset(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool Int64)
static SDValue performVectorExtendToFPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get split up into scalar instr...
static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG)
SDValue performLowerPartialReduction(SDNode *N, SelectionDAG &DAG)
static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG, const SDLoc &DL, unsigned VectorWidth)
static SDValue performBitcastCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue truncateVectorWithNARROW(EVT DstVT, SDValue In, const SDLoc &DL, SelectionDAG &DAG)
This file defines the interfaces that WebAssembly uses to lower LLVM code into a selection DAG.
This file provides WebAssembly-specific target descriptions.
This file declares WebAssembly-specific per-machine-function information.
This file declares the WebAssembly-specific subclass of TargetSubtarget.
This file declares the WebAssembly-specific subclass of TargetMachine.
This file contains the declaration of the WebAssembly-specific type parsing utility functions.
This file contains the declaration of the WebAssembly-specific utility functions.
static constexpr int Concat[]
Class for arbitrary precision integers.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
LLVM Basic Block Representation.
CCState - This class holds information needed while lowering arguments and return values.
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
This class represents a function call, abstracting a target machine's calling convention.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
Diagnostic information for unsupported feature in backend.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
FunctionType * getFunctionType() const
Returns the FunctionType for me.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
int64_t getOffset() const
unsigned getAddressSpace() const
unsigned getTargetFlags() const
const GlobalValue * getGlobal() const
ThreadLocalMode getThreadLocalMode() const
Type * getValueType() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
Describe properties that are true of each instruction in the target description file.
@ INVALID_SIMPLE_VALUE_TYPE
static auto integer_fixedlen_vector_valuetypes()
MVT changeVectorElementType(MVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool isFixedLengthVector() const
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, DebugLoc DL, bool NoImplicit=false)
CreateMachineInstr - Allocate a new MachineInstr.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
iterator_range< mop_iterator > uses()
Returns a range that includes all operands which may be register uses.
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
iterator_range< mop_iterator > defs()
Returns a range over all explicit operands that are register definitions.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
unsigned getAddressSpace() const
Return the address space for the associated pointer.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getBasicBlock(MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
MachineFunction & getMachineFunction() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
The instances of the Type class are immutable: once they are created, they are never changed.
static Type * getDoubleTy(LLVMContext &C)
bool isFunctionTy() const
True if this is an instance of FunctionType.
static Type * getFloatTy(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
static std::optional< unsigned > getLocalForStackObject(MachineFunction &MF, int FrameIndex)
This class is derived from MachineFunctionInfo and contains private WebAssembly-specific information ...
Register getFrameRegister(const MachineFunction &MF) const override
bool hasCallIndirectOverlong() const
const Triple & getTargetTriple() const
const WebAssemblyInstrInfo * getInstrInfo() const override
bool hasBulkMemory() const
const WebAssemblyRegisterInfo * getRegisterInfo() const override
bool hasWideArithmetic() const
bool hasReferenceTypes() const
bool hasExceptionHandling() const
bool hasNontrappingFPToInt() const
WebAssemblyTargetLowering(const TargetMachine &TM, const WebAssemblySubtarget &STI)
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const override
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Swift
Calling convention for Swift.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ CXX_FAST_TLS
Used for access functions.
@ WASM_EmscriptenInvoke
For emscripten __invoke_* functions.
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ BR_CC
BR_CC - Conditional branch.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isExtOpcode(unsigned Opcode)
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
ID ArrayRef< Type * > Tys
OperandFlags
These are flags set on operands, but should be considered private, all access should go through the M...
MCSymbolWasm * getOrCreateFunctionTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __indirect_function_table, for use in call_indirect and in function bitcasts.
@ WASM_ADDRESS_SPACE_EXTERNREF
@ WASM_ADDRESS_SPACE_FUNCREF
bool isWebAssemblyFuncrefType(const Type *Ty)
Return true if this is a WebAssembly Funcref Type.
bool isWebAssemblyTableType(const Type *Ty)
Return true if the table represents a WebAssembly table type.
MCSymbolWasm * getOrCreateFuncrefCallTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __funcref_call_table, for use in funcref calls when lowered to table.set + call_indirect.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
bool isValidAddressSpace(unsigned AS)
bool canLowerReturn(size_t ResultSize, const WebAssemblySubtarget *Subtarget)
Returns true if the function's return value(s) can be lowered directly, i.e., not indirectly via a po...
bool isWasmVarAddressSpace(unsigned AS)
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
void computeSignatureVTs(const FunctionType *Ty, const Function *TargetFunc, const Function &ContextFunc, const TargetMachine &TM, SmallVectorImpl< MVT > &Params, SmallVectorImpl< MVT > &Results)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
void computeLegalValueVTs(const WebAssemblyTargetLowering &TLI, LLVMContext &Ctx, const DataLayout &DL, Type *Ty, SmallVectorImpl< MVT > &ValueVTs)
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInConsecutiveRegs() const
Align getNonZeroOrigAlign() const
bool isSwiftError() const
unsigned getByValSize() const
bool isInConsecutiveRegsLast() const
Align getNonZeroByValAlign() const
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
bool IsFixed
IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
unsigned getBitWidth() const
Get the bit width of this value.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
bool isBeforeLegalize() const
Function object to check whether the second component of a container supported by std::get (like std:...