35#include "llvm/IR/IntrinsicsWebAssembly.h"
42#define DEBUG_TYPE "wasm-lower"
47 auto MVTPtr = Subtarget->
hasAddr64() ? MVT::i64 : MVT::i32;
61 Subtarget->
hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
90 for (
auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
95 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
108 for (
auto T : {MVT::externref, MVT::funcref, MVT::Other}) {
129 for (
auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64, MVT::v8f16}) {
130 if (!Subtarget->
hasFP16() &&
T == MVT::v8f16) {
153 if (
T != MVT::v8f16) {
166 for (
auto T : {MVT::i32, MVT::i64})
169 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
183 for (
auto T : {MVT::i32, MVT::i64})
221 for (
auto T : {MVT::v16i8, MVT::v8i16})
225 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
229 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
237 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
245 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
253 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
258 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
266 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
273 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
278 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
288 for (
auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
294 for (
auto T : {MVT::v4f32, MVT::v2f64})
304 for (
auto T : {MVT::v2i64, MVT::v2f64})
328 for (
auto T : {MVT::i8, MVT::i16, MVT::i32})
344 for (
auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
362 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
365 if (
MVT(
T) != MemT) {
404 return MVT::externref;
413 return MVT::externref;
419bool WebAssemblyTargetLowering::shouldExpandPartialReductionIntrinsic(
421 if (
I->getIntrinsicID() != Intrinsic::experimental_vector_partial_reduce_add)
425 auto Op1 =
I->getOperand(1);
427 if (
auto *InputInst = dyn_cast<Instruction>(Op1)) {
431 if (isa<Instruction>(InputInst->getOperand(0)) &&
432 isa<Instruction>(InputInst->getOperand(1))) {
434 if (cast<Instruction>(InputInst->getOperand(0))->getOpcode() !=
435 cast<Instruction>(InputInst->getOperand(1))->getOpcode())
449WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(
AtomicRMWInst *AI)
const {
465bool WebAssemblyTargetLowering::shouldScalarizeBinop(
SDValue VecOp)
const {
485FastISel *WebAssemblyTargetLowering::createFastISel(
490MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(
const DataLayout & ,
501 "32-bit shift counts ought to be enough for anyone");
506 "Unable to represent scalar shift amount type");
516 bool IsUnsigned,
bool Int64,
517 bool Float64,
unsigned LoweredOpcode) {
523 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
524 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
525 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
526 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
527 unsigned IConst =
Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
528 unsigned Eqz = WebAssembly::EQZ_I32;
529 unsigned And = WebAssembly::AND_I32;
531 int64_t Substitute = IsUnsigned ? 0 : Limit;
532 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(
double)Limit;
543 F->insert(It, FalseMBB);
544 F->insert(It, TrueMBB);
545 F->insert(It, DoneMBB);
548 DoneMBB->
splice(DoneMBB->
begin(), BB, std::next(
MI.getIterator()), BB->
end());
556 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
557 Tmp0 =
MRI.createVirtualRegister(
MRI.getRegClass(InReg));
558 Tmp1 =
MRI.createVirtualRegister(
MRI.getRegClass(InReg));
559 CmpReg =
MRI.createVirtualRegister(&WebAssembly::I32RegClass);
560 EqzReg =
MRI.createVirtualRegister(&WebAssembly::I32RegClass);
561 FalseReg =
MRI.createVirtualRegister(
MRI.getRegClass(OutReg));
562 TrueReg =
MRI.createVirtualRegister(
MRI.getRegClass(OutReg));
564 MI.eraseFromParent();
573 .
addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
578 Tmp1 =
MRI.createVirtualRegister(
MRI.getRegClass(InReg));
580 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
581 Register AndReg =
MRI.createVirtualRegister(&WebAssembly::I32RegClass);
583 .
addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
625 unsigned Eqz =
Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32;
626 unsigned MemoryCopy =
627 Int64 ? WebAssembly::MEMORY_COPY_A64 : WebAssembly::MEMORY_COPY_A32;
638 F->insert(It, TrueMBB);
639 F->insert(It, DoneMBB);
642 DoneMBB->
splice(DoneMBB->
begin(), BB, std::next(
MI.getIterator()), BB->
end());
652 EqzReg =
MRI.createVirtualRegister(&WebAssembly::I32RegClass);
655 MI.eraseFromParent();
693 unsigned Eqz =
Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32;
694 unsigned MemoryFill =
695 Int64 ? WebAssembly::MEMORY_FILL_A64 : WebAssembly::MEMORY_FILL_A32;
706 F->insert(It, TrueMBB);
707 F->insert(It, DoneMBB);
710 DoneMBB->
splice(DoneMBB->
begin(), BB, std::next(
MI.getIterator()), BB->
end());
720 EqzReg =
MRI.createVirtualRegister(&WebAssembly::I32RegClass);
723 MI.eraseFromParent();
745 CallResults.
getOpcode() == WebAssembly::RET_CALL_RESULTS);
749 bool IsRetCall = CallResults.
getOpcode() == WebAssembly::RET_CALL_RESULTS;
751 bool IsFuncrefCall =
false;
757 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass);
762 if (IsIndirect && IsRetCall) {
763 CallOp = WebAssembly::RET_CALL_INDIRECT;
764 }
else if (IsIndirect) {
765 CallOp = WebAssembly::CALL_INDIRECT;
766 }
else if (IsRetCall) {
767 CallOp = WebAssembly::RET_CALL;
769 CallOp = WebAssembly::CALL;
798 for (
auto Def : CallResults.
defs())
822 for (
auto Use : CallParams.
uses())
838 if (IsIndirect && IsFuncrefCall) {
850 BuildMI(MF,
DL,
TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref);
854 BuildMI(MF,
DL,
TII.get(WebAssembly::TABLE_SET_FUNCREF))
869 switch (
MI.getOpcode()) {
872 case WebAssembly::FP_TO_SINT_I32_F32:
874 WebAssembly::I32_TRUNC_S_F32);
875 case WebAssembly::FP_TO_UINT_I32_F32:
877 WebAssembly::I32_TRUNC_U_F32);
878 case WebAssembly::FP_TO_SINT_I64_F32:
880 WebAssembly::I64_TRUNC_S_F32);
881 case WebAssembly::FP_TO_UINT_I64_F32:
883 WebAssembly::I64_TRUNC_U_F32);
884 case WebAssembly::FP_TO_SINT_I32_F64:
886 WebAssembly::I32_TRUNC_S_F64);
887 case WebAssembly::FP_TO_UINT_I32_F64:
889 WebAssembly::I32_TRUNC_U_F64);
890 case WebAssembly::FP_TO_SINT_I64_F64:
892 WebAssembly::I64_TRUNC_S_F64);
893 case WebAssembly::FP_TO_UINT_I64_F64:
895 WebAssembly::I64_TRUNC_U_F64);
896 case WebAssembly::MEMCPY_A32:
898 case WebAssembly::MEMCPY_A64:
900 case WebAssembly::MEMSET_A32:
902 case WebAssembly::MEMSET_A64:
904 case WebAssembly::CALL_RESULTS:
905 case WebAssembly::RET_CALL_RESULTS:
911WebAssemblyTargetLowering::getTargetNodeName(
unsigned Opcode)
const {
915#define HANDLE_NODETYPE(NODE) \
916 case WebAssemblyISD::NODE: \
917 return "WebAssemblyISD::" #NODE;
918#include "WebAssemblyISD.def"
919#undef HANDLE_NODETYPE
924std::pair<unsigned, const TargetRegisterClass *>
925WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
929 if (Constraint.
size() == 1) {
930 switch (Constraint[0]) {
932 assert(VT != MVT::iPTR &&
"Pointer MVT not expected here");
935 return std::make_pair(0U, &WebAssembly::V128RegClass);
939 return std::make_pair(0U, &WebAssembly::I32RegClass);
941 return std::make_pair(0U, &WebAssembly::I64RegClass);
946 return std::make_pair(0U, &WebAssembly::F32RegClass);
948 return std::make_pair(0U, &WebAssembly::F64RegClass);
962bool WebAssemblyTargetLowering::isCheapToSpeculateCttz(
Type *Ty)
const {
967bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz(
Type *Ty)
const {
972bool WebAssemblyTargetLowering::isLegalAddressingMode(
const DataLayout &
DL,
974 Type *Ty,
unsigned AS,
990bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
1004bool WebAssemblyTargetLowering::isIntDivCheap(
EVT VT,
1011bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(
SDValue ExtVal)
const {
1013 EVT MemT = cast<LoadSDNode>(ExtVal->
getOperand(0))->getValueType(0);
1014 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
1015 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
1016 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
1019bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
1026EVT WebAssemblyTargetLowering::getSetCCResultType(
const DataLayout &
DL,
1039bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1042 unsigned Intrinsic)
const {
1043 switch (Intrinsic) {
1044 case Intrinsic::wasm_memory_atomic_notify:
1046 Info.memVT = MVT::i32;
1047 Info.ptrVal =
I.getArgOperand(0);
1058 case Intrinsic::wasm_memory_atomic_wait32:
1060 Info.memVT = MVT::i32;
1061 Info.ptrVal =
I.getArgOperand(0);
1066 case Intrinsic::wasm_memory_atomic_wait64:
1068 Info.memVT = MVT::i64;
1069 Info.ptrVal =
I.getArgOperand(0);
1074 case Intrinsic::wasm_loadf16_f32:
1076 Info.memVT = MVT::f16;
1077 Info.ptrVal =
I.getArgOperand(0);
1082 case Intrinsic::wasm_storef16_f32:
1084 Info.memVT = MVT::f16;
1085 Info.ptrVal =
I.getArgOperand(1);
1095void WebAssemblyTargetLowering::computeKnownBitsForTargetNode(
1098 switch (
Op.getOpcode()) {
1102 unsigned IntNo =
Op.getConstantOperandVal(0);
1106 case Intrinsic::wasm_bitmask: {
1108 EVT VT =
Op.getOperand(1).getSimpleValueType();
1111 Known.
Zero |= ZeroMask;
1121 case WebAssemblyISD::I64_ADD128:
1122 if (
Op.getResNo() == 1) {
1133WebAssemblyTargetLowering::getPreferredVectorAction(
MVT VT)
const {
1139 if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
1140 EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64)
1147bool WebAssemblyTargetLowering::shouldSimplifyDemandedVectorElts(
1148 SDValue Op,
const TargetLoweringOpt &TLO)
const {
1201WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
1213 "WebAssembly doesn't support language-specific or target-specific "
1214 "calling conventions yet");
1215 if (CLI.IsPatchPoint)
1216 fail(
DL, DAG,
"WebAssembly doesn't support patch point yet");
1218 if (CLI.IsTailCall) {
1219 auto NoTail = [&](
const char *Msg) {
1220 if (CLI.CB && CLI.CB->isMustTailCall())
1222 CLI.IsTailCall =
false;
1226 NoTail(
"WebAssembly 'tail-call' feature not enabled");
1230 NoTail(
"WebAssembly does not support varargs tail calls");
1240 bool TypesMatch = CallerRetTys.
size() == CalleeRetTys.
size() &&
1241 std::equal(CallerRetTys.
begin(), CallerRetTys.
end(),
1242 CalleeRetTys.
begin());
1244 NoTail(
"WebAssembly tail call requires caller and callee return types to "
1249 for (
auto &Arg : CLI.CB->args()) {
1250 Value *Val = Arg.get();
1254 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(Src))
1255 Src =
GEP->getPointerOperand();
1260 if (isa<AllocaInst>(Val)) {
1262 "WebAssembly does not support tail calling with stack arguments");
1277 Outs[0].Flags.isSRet()) {
1282 bool HasSwiftSelfArg =
false;
1283 bool HasSwiftErrorArg =
false;
1284 unsigned NumFixedArgs = 0;
1285 for (
unsigned I = 0;
I < Outs.
size(); ++
I) {
1291 fail(
DL, DAG,
"WebAssembly hasn't implemented nest arguments");
1293 fail(
DL, DAG,
"WebAssembly hasn't implemented inalloca arguments");
1295 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs arguments");
1297 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs last arguments");
1306 Chain = DAG.
getMemcpy(Chain,
DL, FINode, OutVal, SizeNode,
1317 bool IsVarArg = CLI.IsVarArg;
1326 if (!HasSwiftSelfArg) {
1329 Flags.setSwiftSelf();
1331 CLI.Outs.push_back(Arg);
1333 CLI.OutVals.push_back(ArgVal);
1335 if (!HasSwiftErrorArg) {
1338 Flags.setSwiftError();
1340 CLI.Outs.push_back(Arg);
1342 CLI.OutVals.push_back(ArgVal);
1353 for (
unsigned I = NumFixedArgs;
I < Outs.
size(); ++
I) {
1357 assert(VT != MVT::iPTR &&
"Legalized args should be concrete");
1362 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
1369 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
1372 if (IsVarArg && NumBytes) {
1375 MaybeAlign StackAlign = Layout.getStackAlignment();
1376 assert(StackAlign &&
"data layout string is missing stack alignment");
1382 assert(ArgLocs[ValNo].getValNo() == ValNo &&
1383 "ArgLocs should remain in order and only hold varargs args");
1384 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
1392 if (!Chains.
empty())
1394 }
else if (IsVarArg) {
1418 IsVarArg ? OutVals.
begin() + NumFixedArgs : OutVals.
end());
1424 for (
const auto &In : Ins) {
1425 assert(!
In.Flags.isByVal() &&
"byval is not valid for return values");
1426 assert(!
In.Flags.isNest() &&
"nest is not valid for return values");
1427 if (
In.Flags.isInAlloca())
1428 fail(
DL, DAG,
"WebAssembly hasn't implemented inalloca return values");
1429 if (
In.Flags.isInConsecutiveRegs())
1430 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs return values");
1431 if (
In.Flags.isInConsecutiveRegsLast())
1433 "WebAssembly hasn't implemented cons regs last return values");
1442 CLI.CB->getCalledOperand()->getType())) {
1457 WebAssemblyISD::TABLE_SET,
DL, DAG.
getVTList(MVT::Other), TableSetOps,
1462 CLI.CB->getCalledOperand()->getPointerAlignment(DAG.
getDataLayout()),
1468 if (CLI.IsTailCall) {
1471 return DAG.
getNode(WebAssemblyISD::RET_CALL,
DL, NodeTys, Ops);
1478 for (
size_t I = 0;
I <
Ins.size(); ++
I)
1485bool WebAssemblyTargetLowering::CanLowerReturn(
1493SDValue WebAssemblyTargetLowering::LowerReturn(
1499 "MVP WebAssembly can only return up to one value");
1501 fail(
DL, DAG,
"WebAssembly doesn't support non-C calling conventions");
1504 RetOps.append(OutVals.
begin(), OutVals.
end());
1505 Chain = DAG.
getNode(WebAssemblyISD::RETURN,
DL, MVT::Other, RetOps);
1513 fail(
DL, DAG,
"WebAssembly hasn't implemented inalloca results");
1515 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs results");
1517 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs last results");
1523SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1528 fail(
DL, DAG,
"WebAssembly doesn't support non-C calling conventions");
1537 bool HasSwiftErrorArg =
false;
1538 bool HasSwiftSelfArg =
false;
1540 HasSwiftSelfArg |=
In.Flags.isSwiftSelf();
1541 HasSwiftErrorArg |=
In.Flags.isSwiftError();
1542 if (
In.Flags.isInAlloca())
1543 fail(
DL, DAG,
"WebAssembly hasn't implemented inalloca arguments");
1544 if (
In.Flags.isNest())
1545 fail(
DL, DAG,
"WebAssembly hasn't implemented nest arguments");
1546 if (
In.Flags.isInConsecutiveRegs())
1547 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs arguments");
1548 if (
In.Flags.isInConsecutiveRegsLast())
1549 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs last arguments");
1558 MFI->addParam(
In.VT);
1567 if (!HasSwiftSelfArg) {
1568 MFI->addParam(PtrVT);
1570 if (!HasSwiftErrorArg) {
1571 MFI->addParam(PtrVT);
1580 MFI->setVarargBufferVreg(VarargVreg);
1582 Chain,
DL, VarargVreg,
1583 DAG.
getNode(WebAssemblyISD::ARGUMENT,
DL, PtrVT,
1585 MFI->addParam(PtrVT);
1597 assert(MFI->getParams().size() == Params.
size() &&
1598 std::equal(MFI->getParams().begin(), MFI->getParams().end(),
1604void WebAssemblyTargetLowering::ReplaceNodeResults(
1606 switch (
N->getOpcode()) {
1620 Results.push_back(Replace128Op(
N, DAG));
1624 "ReplaceNodeResults not implemented for this op for WebAssembly!");
1635 switch (
Op.getOpcode()) {
1640 return LowerFrameIndex(
Op, DAG);
1642 return LowerGlobalAddress(
Op, DAG);
1644 return LowerGlobalTLSAddress(
Op, DAG);
1646 return LowerExternalSymbol(
Op, DAG);
1648 return LowerJumpTable(
Op, DAG);
1650 return LowerBR_JT(
Op, DAG);
1652 return LowerVASTART(
Op, DAG);
1655 fail(
DL, DAG,
"WebAssembly hasn't implemented computed gotos");
1658 return LowerRETURNADDR(
Op, DAG);
1660 return LowerFRAMEADDR(
Op, DAG);
1662 return LowerCopyToReg(
Op, DAG);
1665 return LowerAccessVectorElement(
Op, DAG);
1669 return LowerIntrinsic(
Op, DAG);
1671 return LowerSIGN_EXTEND_INREG(
Op, DAG);
1674 return LowerEXTEND_VECTOR_INREG(
Op, DAG);
1676 return LowerBUILD_VECTOR(
Op, DAG);
1678 return LowerVECTOR_SHUFFLE(
Op, DAG);
1680 return LowerSETCC(
Op, DAG);
1684 return LowerShift(
Op, DAG);
1687 return LowerFP_TO_INT_SAT(
Op, DAG);
1689 return LowerLoad(
Op, DAG);
1691 return LowerStore(
Op, DAG);
1700 return LowerMUL_LOHI(
Op, DAG);
1702 return LowerUADDO(
Op, DAG);
1717 return std::nullopt;
1750 return DAG.
getNode(WebAssemblyISD::LOCAL_SET,
DL, Tys, Ops);
1755 "Encountered an unlowerable store to the wasm_var address space",
1771 "unexpected offset when loading from webassembly global",
false);
1782 "unexpected offset when loading from webassembly local",
false);
1789 assert(
Result->getNumValues() == 2 &&
"Loads must carry a chain!");
1795 "Encountered an unlowerable load from the wasm_var address space",
1804 assert(
Op.getValueType() == MVT::i64);
1807 switch (
Op.getOpcode()) {
1809 Opcode = WebAssemblyISD::I64_MUL_WIDE_U;
1812 Opcode = WebAssemblyISD::I64_MUL_WIDE_S;
1834 assert(
Op.getValueType() == MVT::i64);
1841 DAG.
getNode(WebAssemblyISD::I64_ADD128,
DL,
1842 DAG.
getVTList(MVT::i64, MVT::i64), LHS, Zero, RHS, Zero);
1852 assert(
N->getValueType(0) == MVT::i128);
1855 switch (
N->getOpcode()) {
1857 Opcode = WebAssemblyISD::I64_ADD128;
1860 Opcode = WebAssemblyISD::I64_SUB128;
1875 LHS_0, LHS_1, RHS_0, RHS_1);
1883 if (isa<FrameIndexSDNode>(Src.getNode())) {
1891 Register Reg = cast<RegisterSDNode>(
Op.getOperand(1))->getReg();
1892 EVT VT = Src.getValueType();
1894 : WebAssembly::COPY_I64,
1897 return Op.getNode()->getNumValues() == 1
1908 int FI = cast<FrameIndexSDNode>(
Op)->getIndex();
1918 "Non-Emscripten WebAssembly hasn't implemented "
1919 "__builtin_return_address");
1923 unsigned Depth =
Op.getConstantOperandVal(0);
1924 MakeLibCallOptions CallOptions;
1925 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS,
Op.getValueType(),
1926 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions,
DL)
1935 if (
Op.getConstantOperandVal(0) > 0)
1939 EVT VT =
Op.getValueType();
1946WebAssemblyTargetLowering::LowerGlobalTLSAddress(
SDValue Op,
1949 const auto *GA = cast<GlobalAddressSDNode>(
Op);
1976 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1977 : WebAssembly::GLOBAL_GET_I32;
1988 DAG.
getNode(WebAssemblyISD::WrapperREL,
DL, PtrVT, TLSOffset);
1995 EVT VT =
Op.getValueType();
1996 return DAG.
getNode(WebAssemblyISD::Wrapper,
DL, VT,
2005 const auto *GA = cast<GlobalAddressSDNode>(
Op);
2006 EVT VT =
Op.getValueType();
2008 "Unexpected target flags on generic GlobalAddressSDNode");
2010 fail(
DL, DAG,
"Invalid address space for WebAssembly target");
2021 const char *BaseName;
2030 DAG.
getNode(WebAssemblyISD::Wrapper,
DL, PtrVT,
2034 WebAssemblyISD::WrapperREL,
DL, VT,
2043 return DAG.
getNode(WebAssemblyISD::Wrapper,
DL, VT,
2049WebAssemblyTargetLowering::LowerExternalSymbol(
SDValue Op,
2052 const auto *ES = cast<ExternalSymbolSDNode>(
Op);
2053 EVT VT =
Op.getValueType();
2054 assert(ES->getTargetFlags() == 0 &&
2055 "Unexpected target flags on generic ExternalSymbolSDNode");
2056 return DAG.
getNode(WebAssemblyISD::Wrapper,
DL, VT,
2067 JT->getTargetFlags());
2074 const auto *
JT = cast<JumpTableSDNode>(
Op.getOperand(1));
2076 assert(
JT->getTargetFlags() == 0 &&
"WebAssembly doesn't set target flags");
2086 for (
auto *
MBB : MBBs)
2093 return DAG.
getNode(WebAssemblyISD::BR_TABLE,
DL, MVT::Other, Ops);
2102 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
2105 MFI->getVarargBufferVreg(), PtrVT);
2106 return DAG.
getStore(
Op.getOperand(0),
DL, ArgN,
Op.getOperand(1),
2114 if (
N->getConstantOperandVal(0) !=
2115 Intrinsic::experimental_vector_partial_reduce_add)
2118 assert(
N->getValueType(0) == MVT::v4i32 &&
"can only support v4i32");
2127 "expected widening mul");
2129 "expected mul to use the same extend for both operands");
2139 ExtendInLHS, ExtendInRHS);
2143 unsigned LowOpc = WebAssemblyISD::EXTEND_LOW_U;
2144 unsigned HighOpc = WebAssemblyISD::EXTEND_HIGH_U;
2158 "expected v16i8 input types");
2162 unsigned LowOpc = WebAssemblyISD::EXTEND_LOW_S;
2163 unsigned HighOpc = WebAssemblyISD::EXTEND_HIGH_S;
2169 DAG.
getNode(WebAssemblyISD::DOT,
DL, MVT::v4i32, LowLHS, LowRHS);
2171 DAG.
getNode(WebAssemblyISD::DOT,
DL, MVT::v4i32, HighLHS, HighRHS);
2176 unsigned LowOpc = WebAssemblyISD::EXTEND_LOW_U;
2177 unsigned HighOpc = WebAssemblyISD::EXTEND_HIGH_U;
2202 switch (
Op.getOpcode()) {
2205 IntNo =
Op.getConstantOperandVal(1);
2208 IntNo =
Op.getConstantOperandVal(0);
2219 case Intrinsic::wasm_lsda: {
2228 DAG.
getNode(WebAssemblyISD::Wrapper,
DL, PtrVT,
2235 return DAG.
getNode(WebAssemblyISD::Wrapper,
DL, PtrVT,
Node);
2238 case Intrinsic::wasm_shuffle: {
2242 Ops[
OpIdx++] =
Op.getOperand(1);
2243 Ops[
OpIdx++] =
Op.getOperand(2);
2244 while (
OpIdx < 18) {
2250 Ops[
OpIdx++] = MaskIdx;
2253 return DAG.
getNode(WebAssemblyISD::SHUFFLE,
DL,
Op.getValueType(), Ops);
2256 case Intrinsic::thread_pointer: {
2258 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
2259 : WebAssembly::GLOBAL_GET_I32;
2270WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(
SDValue Op,
2284 const SDValue &Extract =
Op.getOperand(0);
2288 MVT ExtractedLaneT =
2289 cast<VTSDNode>(
Op.getOperand(1).getNode())->
getVT().getSimpleVT();
2292 if (ExtractedVecT == VecT)
2297 if (!isa<ConstantSDNode>(Index))
2299 unsigned IndexVal =
Index->getAsZExtVal();
2317 assert((UserOpc == WebAssemblyISD::EXTEND_LOW_U ||
2318 UserOpc == WebAssemblyISD::EXTEND_LOW_S) &&
2319 "expected extend_low");
2320 auto *Shuffle = cast<ShuffleVectorSDNode>(
Op.getNode());
2324 size_t FirstIdx = Mask.size() / 2;
2325 for (
size_t i = 0; i < Mask.size() / 2; ++i) {
2326 if (Mask[i] !=
static_cast<int>(FirstIdx + i)) {
2332 unsigned Opc = UserOpc == WebAssemblyISD::EXTEND_LOW_S
2333 ? WebAssemblyISD::EXTEND_HIGH_S
2334 : WebAssemblyISD::EXTEND_HIGH_U;
2335 return DAG.
getNode(
Opc,
DL, VT, Shuffle->getOperand(0));
2339WebAssemblyTargetLowering::LowerEXTEND_VECTOR_INREG(
SDValue Op,
2342 EVT VT =
Op.getValueType();
2344 EVT SrcVT = Src.getValueType();
2351 "Unexpected extension factor.");
2354 if (Scale != 2 && Scale != 4 && Scale != 8)
2358 switch (
Op.getOpcode()) {
2360 Ext = WebAssemblyISD::EXTEND_LOW_U;
2363 Ext = WebAssemblyISD::EXTEND_LOW_S;
2374 while (Scale != 1) {
2377 .widenIntegerVectorElementType(*DAG.
getContext())
2378 .getHalfNumVectorElementsVT(*DAG.
getContext()),
2388 if (
Op.getValueType() != MVT::v2f64)
2392 unsigned &Index) ->
bool {
2393 switch (
Op.getOpcode()) {
2395 Opcode = WebAssemblyISD::CONVERT_LOW_S;
2398 Opcode = WebAssemblyISD::CONVERT_LOW_U;
2401 Opcode = WebAssemblyISD::PROMOTE_LOW;
2407 auto ExtractVector =
Op.getOperand(0);
2411 if (!isa<ConstantSDNode>(ExtractVector.getOperand(1).getNode()))
2414 SrcVec = ExtractVector.getOperand(0);
2415 Index = ExtractVector.getConstantOperandVal(1);
2419 unsigned LHSOpcode, RHSOpcode, LHSIndex, RHSIndex;
2421 if (!GetConvertedLane(
Op.getOperand(0), LHSOpcode, LHSSrcVec, LHSIndex) ||
2422 !GetConvertedLane(
Op.getOperand(1), RHSOpcode, RHSSrcVec, RHSIndex))
2425 if (LHSOpcode != RHSOpcode)
2429 switch (LHSOpcode) {
2430 case WebAssemblyISD::CONVERT_LOW_S:
2431 case WebAssemblyISD::CONVERT_LOW_U:
2432 ExpectedSrcVT = MVT::v4i32;
2434 case WebAssemblyISD::PROMOTE_LOW:
2435 ExpectedSrcVT = MVT::v4f32;
2441 auto Src = LHSSrcVec;
2442 if (LHSIndex != 0 || RHSIndex != 1 || LHSSrcVec != RHSSrcVec) {
2445 ExpectedSrcVT,
DL, LHSSrcVec, RHSSrcVec,
2446 {
static_cast<int>(LHSIndex),
static_cast<int>(RHSIndex) + 4, -1, -1});
2448 return DAG.
getNode(LHSOpcode,
DL, MVT::v2f64, Src);
2453 MVT VT =
Op.getSimpleValueType();
2454 if (VT == MVT::v8f16) {
2469 const EVT VecT =
Op.getValueType();
2470 const EVT LaneT =
Op.getOperand(0).getValueType();
2472 bool CanSwizzle = VecT == MVT::v16i8;
2493 auto GetSwizzleSrcs = [](
size_t I,
const SDValue &Lane) {
2508 Index->getConstantOperandVal(1) !=
I)
2510 return std::make_pair(SwizzleSrc, SwizzleIndices);
2517 auto GetShuffleSrc = [&](
const SDValue &Lane) {
2520 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
2522 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
2528 using ValueEntry = std::pair<SDValue, size_t>;
2531 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>,
size_t>;
2534 using ShuffleEntry = std::pair<SDValue, size_t>;
2537 auto AddCount = [](
auto &Counts,
const auto &Val) {
2539 llvm::find_if(Counts, [&Val](
auto E) {
return E.first == Val; });
2540 if (CountIt == Counts.end()) {
2541 Counts.emplace_back(Val, 1);
2547 auto GetMostCommon = [](
auto &Counts) {
2549 assert(CommonIt != Counts.end() &&
"Unexpected all-undef build_vector");
2553 size_t NumConstantLanes = 0;
2556 for (
size_t I = 0;
I < Lanes; ++
I) {
2561 AddCount(SplatValueCounts, Lane);
2565 if (
auto ShuffleSrc = GetShuffleSrc(Lane))
2566 AddCount(ShuffleCounts, ShuffleSrc);
2568 auto SwizzleSrcs = GetSwizzleSrcs(
I, Lane);
2569 if (SwizzleSrcs.first)
2570 AddCount(SwizzleCounts, SwizzleSrcs);
2575 size_t NumSplatLanes;
2576 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
2580 size_t NumSwizzleLanes = 0;
2581 if (SwizzleCounts.
size())
2582 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
2583 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
2587 SDValue ShuffleSrc1, ShuffleSrc2;
2588 size_t NumShuffleLanes = 0;
2589 if (ShuffleCounts.
size()) {
2590 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
2592 [&](
const auto &Pair) {
return Pair.first == ShuffleSrc1; });
2594 if (ShuffleCounts.
size()) {
2595 size_t AdditionalShuffleLanes;
2596 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
2597 GetMostCommon(ShuffleCounts);
2598 NumShuffleLanes += AdditionalShuffleLanes;
2603 std::function<
bool(
size_t,
const SDValue &)> IsLaneConstructed;
2606 if (NumSwizzleLanes >= NumShuffleLanes &&
2607 NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
2610 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
2611 IsLaneConstructed = [&, Swizzled](
size_t I,
const SDValue &Lane) {
2612 return Swizzled == GetSwizzleSrcs(
I, Lane);
2614 }
else if (NumShuffleLanes >= NumConstantLanes &&
2615 NumShuffleLanes >= NumSplatLanes) {
2625 assert(LaneSize > DestLaneSize);
2626 Scale1 = LaneSize / DestLaneSize;
2632 assert(LaneSize > DestLaneSize);
2633 Scale2 = LaneSize / DestLaneSize;
2638 assert(DestLaneCount <= 16);
2639 for (
size_t I = 0;
I < DestLaneCount; ++
I) {
2641 SDValue Src = GetShuffleSrc(Lane);
2642 if (Src == ShuffleSrc1) {
2644 }
else if (Src && Src == ShuffleSrc2) {
2652 IsLaneConstructed = [&](size_t,
const SDValue &Lane) {
2653 auto Src = GetShuffleSrc(Lane);
2654 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
2656 }
else if (NumConstantLanes >= NumSplatLanes) {
2658 for (
const SDValue &Lane :
Op->op_values()) {
2666 auto *
Const = dyn_cast<ConstantSDNode>(Lane.
getNode());
2667 int64_t Val =
Const ?
Const->getSExtValue() : 0;
2669 assert((LaneBits == 64 || Val >= -(1ll << (LaneBits - 1))) &&
2670 "Unexpected out of bounds negative value");
2671 if (Const && LaneBits != 64 && Val > (1ll << (LaneBits - 1)) - 1) {
2673 auto NewVal = (((
uint64_t)Val & Mask) - (1ll << LaneBits)) &
Mask;
2690 if (NumSplatLanes == 1 &&
Op->getOperand(0) == SplatValue &&
2691 (DestLaneSize == 32 || DestLaneSize == 64)) {
2698 IsLaneConstructed = [&SplatValue](
size_t _,
const SDValue &Lane) {
2699 return Lane == SplatValue;
2704 assert(IsLaneConstructed);
2707 for (
size_t I = 0;
I < Lanes; ++
I) {
2709 if (!Lane.
isUndef() && !IsLaneConstructed(
I, Lane))
2718WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(
SDValue Op,
2722 MVT VecType =
Op.getOperand(0).getSimpleValueType();
2729 Ops[
OpIdx++] =
Op.getOperand(0);
2730 Ops[
OpIdx++] =
Op.getOperand(1);
2733 for (
int M : Mask) {
2734 for (
size_t J = 0; J < LaneBytes; ++J) {
2743 return DAG.
getNode(WebAssemblyISD::SHUFFLE,
DL,
Op.getValueType(), Ops);
2751 assert(
Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64);
2756 auto MakeLane = [&](
unsigned I) {
2762 {MakeLane(0), MakeLane(1)});
2766WebAssemblyTargetLowering::LowerAccessVectorElement(
SDValue Op,
2770 if (isa<ConstantSDNode>(IdxNode)) {
2783 EVT LaneT =
Op.getSimpleValueType().getVectorElementType();
2785 if (LaneT.
bitsGE(MVT::i32))
2789 size_t NumLanes =
Op.getSimpleValueType().getVectorNumElements();
2791 unsigned ShiftOpcode =
Op.getOpcode();
2797 for (
size_t i = 0; i < NumLanes; ++i) {
2800 SDValue ShiftedValue = ShiftedElements[i];
2805 DAG.
getNode(ShiftOpcode,
DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2815 assert(
Op.getSimpleValueType().isVector());
2817 uint64_t LaneBits =
Op.getValueType().getScalarSizeInBits();
2818 auto ShiftVal =
Op.getOperand(1);
2832 MaskVal == MaskBits)
2835 if (!isa<ConstantSDNode>(
RHS.getNode()))
2838 auto ConstantRHS = dyn_cast<ConstantSDNode>(
RHS.getNode());
2839 if (ConstantRHS && ConstantRHS->getAPIntValue() == MaskBits)
2847 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2853 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2858 switch (
Op.getOpcode()) {
2860 Opcode = WebAssemblyISD::VEC_SHL;
2863 Opcode = WebAssemblyISD::VEC_SHR_S;
2866 Opcode = WebAssemblyISD::VEC_SHR_U;
2872 return DAG.
getNode(Opcode,
DL,
Op.getValueType(),
Op.getOperand(0), ShiftVal);
2877 EVT ResT =
Op.getValueType();
2878 EVT SatVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2880 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2881 (SatVT == MVT::i32 || SatVT == MVT::i64))
2884 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2887 if (ResT == MVT::v8i16 && SatVT == MVT::i16)
2898 auto &DAG = DCI.
DAG;
2899 auto Shuffle = cast<ShuffleVectorSDNode>(
N);
2905 SDValue Bitcast =
N->getOperand(0);
2908 if (!
N->getOperand(1).isUndef())
2910 SDValue CastOp = Bitcast.getOperand(0);
2912 EVT DstType = Bitcast.getValueType();
2917 SrcType,
SDLoc(
N), CastOp, DAG.
getUNDEF(SrcType), Shuffle->getMask());
2927 auto &DAG = DCI.
DAG;
2931 EVT InVT =
N->getOperand(0)->getValueType(0);
2932 EVT ResVT =
N->getValueType(0);
2934 if (ResVT == MVT::v4f32 && (InVT == MVT::v4i16 || InVT == MVT::v4i8))
2936 else if (ResVT == MVT::v2f64 && (InVT == MVT::v2i16 || InVT == MVT::v2i8))
2950 auto &DAG = DCI.
DAG;
2954 EVT VT =
N->getValueType(0);
2968 auto &DAG = DCI.
DAG;
2974 auto Extract =
N->getOperand(0);
2978 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.
getOperand(1));
2979 if (IndexNode ==
nullptr)
2981 auto Index = IndexNode->getZExtValue();
2985 EVT ResVT =
N->getValueType(0);
2986 if (ResVT == MVT::v8i16) {
2988 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2990 }
else if (ResVT == MVT::v4i32) {
2992 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2994 }
else if (ResVT == MVT::v2i64) {
2996 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
3003 bool IsLow = Index == 0;
3005 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
3006 : WebAssemblyISD::EXTEND_HIGH_S)
3007 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
3008 : WebAssemblyISD::EXTEND_HIGH_U);
3015 auto &DAG = DCI.
DAG;
3017 auto GetWasmConversionOp = [](
unsigned Op) {
3020 return WebAssemblyISD::TRUNC_SAT_ZERO_S;
3022 return WebAssemblyISD::TRUNC_SAT_ZERO_U;
3024 return WebAssemblyISD::DEMOTE_ZERO;
3029 auto IsZeroSplat = [](
SDValue SplatVal) {
3030 auto *
Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode());
3031 APInt SplatValue, SplatUndef;
3032 unsigned SplatBitSize;
3037 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
3055 EVT ExpectedConversionType;
3058 switch (ConversionOp) {
3062 ExpectedConversionType = MVT::v2i32;
3066 ExpectedConversionType = MVT::v2f32;
3072 if (
N->getValueType(0) != ResVT)
3075 if (
Conversion.getValueType() != ExpectedConversionType)
3079 if (Source.getValueType() != MVT::v2f64)
3082 if (!IsZeroSplat(
N->getOperand(1)) ||
3083 N->getOperand(1).getValueType() != ExpectedConversionType)
3086 unsigned Op = GetWasmConversionOp(ConversionOp);
3102 auto ConversionOp =
N->getOpcode();
3103 switch (ConversionOp) {
3115 if (
N->getValueType(0) != ResVT)
3118 auto Concat =
N->getOperand(0);
3119 if (
Concat.getValueType() != MVT::v4f64)
3122 auto Source =
Concat.getOperand(0);
3123 if (Source.getValueType() != MVT::v2f64)
3126 if (!IsZeroSplat(
Concat.getOperand(1)) ||
3127 Concat.getOperand(1).getValueType() != MVT::v2f64)
3130 unsigned Op = GetWasmConversionOp(ConversionOp);
3136 const SDLoc &
DL,
unsigned VectorWidth) {
3144 unsigned ElemsPerChunk = VectorWidth / ElVT.
getSizeInBits();
3149 IdxVal &= ~(ElemsPerChunk - 1);
3154 Vec->
ops().slice(IdxVal, ElemsPerChunk));
3166 EVT SrcVT = In.getValueType();
3184 EVT InVT = MVT::i16, OutVT = MVT::i8;
3189 unsigned SubSizeInBits = SrcSizeInBits / 2;
3191 OutVT =
EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
3217 auto &DAG = DCI.
DAG;
3220 EVT InVT = In.getValueType();
3224 EVT OutVT =
N->getValueType(0);
3231 if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) &&
3232 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && OutVT.
is128BitVector()))
3245 auto &DAG = DCI.
DAG;
3248 EVT VT =
N->getValueType(0);
3249 EVT SrcVT = Src.getValueType();
3260 if (NumElts == 2 || NumElts == 4 || NumElts == 8 || NumElts == 16) {
3263 {DAG.getConstant(Intrinsic::wasm_bitmask, DL, MVT::i32),
3264 DAG.getSExtOrTrunc(N->getOperand(0), DL,
3265 SrcVT.changeVectorElementType(Width))}),
3270 if (NumElts == 32 || NumElts == 64) {
3278 m_CondCode(SetCond)))))
3296 MVT ReturnType = VectorsToShuffle.
size() == 2 ? MVT::i32 : MVT::i64;
3299 for (
SDValue V : VectorsToShuffle) {
3300 ReturningInteger = DAG.
getNode(
3309 return ReturningInteger;
3324 if (!sd_match(
N->getOperand(1),
3327 EVT LT =
LHS.getValueType();
3328 if (LT.getScalarSizeInBits() > 128 / LT.getVectorNumElements())
3331 auto CombineSetCC = [&
N, &DAG](Intrinsic::WASMIntrinsics InPre,
3333 Intrinsic::WASMIntrinsics InPost) {
3334 if (
N->getConstantOperandVal(0) != InPre)
3339 m_SpecificCondCode(SetType))))
3345 {DAG.getConstant(InPost, DL, MVT::i32), LHS}),
3348 Ret = DAG.
getNOT(
DL, Ret, MVT::i1);
3353 Intrinsic::wasm_alltrue))
3356 Intrinsic::wasm_anytrue))
3359 Intrinsic::wasm_anytrue))
3362 Intrinsic::wasm_alltrue))
3368template <
int MatchRHS,
ISD::CondCode MatchCond,
bool RequiresNegate,
3374 if (MatchCond != cast<CondCodeSDNode>(
Cond)->
get())
3377 if (MatchRHS != cast<ConstantSDNode>(
RHS)->getSExtValue())
3383 {DAG.getConstant(Intrin, DL, MVT::i32),
3384 DAG.getSExtOrTrunc(LHS->getOperand(0), DL, VecVT)}),
3387 Ret = DAG.
getNOT(
DL, Ret, MVT::i1);
3400 EVT VT =
N->getValueType(0);
3401 EVT OpVT =
X.getValueType();
3405 Attribute::NoImplicitFloat))
3408 ISD::CondCode CC = cast<CondCodeSDNode>(
N->getOperand(2))->get();
3411 !Subtarget->
hasSIMD128() || !isIntEqualitySetCC(CC))
3415 auto IsVectorBitCastCheap = [](
SDValue X) {
3417 return isa<ConstantSDNode>(
X) ||
X.getOpcode() ==
ISD::LOAD;
3420 if (!IsVectorBitCastCheap(
X) || !IsVectorBitCastCheap(
Y))
3430 : Intrinsic::wasm_anytrue,
3444 EVT VT =
N->getValueType(0);
3455 EVT FromVT =
LHS->getOperand(0).getValueType();
3460 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
3463 if (!cast<ConstantSDNode>(
N->getOperand(1)))
3467 auto &DAG = DCI.
DAG;
3470 if (
auto Match = TryMatchTrue<0, ISD::SETNE, false, Intrinsic::wasm_anytrue>(
3476 if (
auto Match = TryMatchTrue<0, ISD::SETEQ, true, Intrinsic::wasm_anytrue>(
3482 if (
auto Match = TryMatchTrue<-1, ISD::SETEQ, false, Intrinsic::wasm_alltrue>(
3488 if (
auto Match = TryMatchTrue<-1, ISD::SETNE, true, Intrinsic::wasm_alltrue>(
3496 EVT VT =
N->getValueType(0);
3497 if (VT != MVT::v8i32 && VT != MVT::v16i32)
3503 if (
LHS.getOpcode() !=
RHS.getOpcode())
3510 if (
LHS->getOperand(0).getValueType() !=
RHS->getOperand(0).getValueType())
3513 EVT FromVT =
LHS->getOperand(0).getValueType();
3515 if (EltTy != MVT::i8)
3543 unsigned ExtendLowOpc =
3544 IsSigned ? WebAssemblyISD::EXTEND_LOW_S : WebAssemblyISD::EXTEND_LOW_U;
3545 unsigned ExtendHighOpc =
3546 IsSigned ? WebAssemblyISD::EXTEND_HIGH_S : WebAssemblyISD::EXTEND_HIGH_U;
3548 auto GetExtendLow = [&DAG, &
DL, &ExtendLowOpc](
EVT VT,
SDValue Op) {
3555 if (NumElts == 16) {
3556 SDValue LowLHS = GetExtendLow(MVT::v8i16, ExtendInLHS);
3557 SDValue LowRHS = GetExtendLow(MVT::v8i16, ExtendInRHS);
3563 GetExtendLow(MVT::v4i32, MulLow),
3565 GetExtendLow(MVT::v4i32, MulHigh),
3574 SDValue Lo = GetExtendLow(MVT::v4i32, MulLow);
3584 EVT VT =
N->getValueType(0);
3601 EVT MulVT = MVT::v8i16;
3603 if (VT == MVT::v8i8) {
3609 DAG.
getNode(WebAssemblyISD::EXTEND_LOW_U,
DL, MulVT, PromotedLHS);
3611 DAG.
getNode(WebAssemblyISD::EXTEND_LOW_U,
DL, MulVT, PromotedRHS);
3616 MVT::v16i8,
DL, MulLow, DAG.
getUNDEF(MVT::v16i8),
3617 {0, 2, 4, 6, 8, 10, 12, 14, -1, -1, -1, -1, -1, -1, -1, -1});
3620 assert(VT == MVT::v16i8 &&
"Expected v16i8");
3624 DAG.
getNode(WebAssemblyISD::EXTEND_HIGH_U,
DL, MulVT,
LHS);
3626 DAG.
getNode(WebAssemblyISD::EXTEND_HIGH_U,
DL, MulVT,
RHS);
3635 VT,
DL, MulLow, MulHigh,
3636 {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
3641WebAssemblyTargetLowering::PerformDAGCombine(
SDNode *
N,
3642 DAGCombinerInfo &DCI)
const {
3643 switch (
N->getOpcode()) {
3670 return AnyAllCombine;
unsigned const MachineRegisterInfo * MRI
static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
static SDValue performTruncateCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
Register const TargetRegisterInfo * TRI
MachineInstr unsigned OpIdx
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static SDValue performVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
static SDValue combineVectorSizedSetCCEquality(EVT VT, SDValue X, SDValue Y, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
Try to map an integer comparison with size > XLEN to vector instructions before type legalization spl...
const SmallVectorImpl< MachineOperand > & Cond
Contains matchers for matching SelectionDAG nodes and values.
static MachineBasicBlock * LowerFPToInt(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool IsUnsigned, bool Int64, bool Float64, unsigned LoweredOpcode)
static bool callingConvSupported(CallingConv::ID CallConv)
static SDValue TryWideExtMulCombine(SDNode *N, SelectionDAG &DAG)
static MachineBasicBlock * LowerMemcpy(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool Int64)
static std::optional< unsigned > IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG)
static SDValue performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performVectorNonNegToFPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG)
static SDValue performAnyAllCombine(SDNode *N, SelectionDAG &DAG)
static MachineBasicBlock * LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB, const WebAssemblySubtarget *Subtarget, const TargetInstrInfo &TII)
static SDValue TryMatchTrue(SDNode *N, EVT VecVT, SelectionDAG &DAG)
static SDValue GetExtendHigh(SDValue Op, unsigned UserOpc, EVT VT, SelectionDAG &DAG)
static SDValue performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool IsWebAssemblyGlobal(SDValue Op)
static MachineBasicBlock * LowerMemset(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool Int64)
static SDValue performVectorExtendToFPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get split up into scalar instr...
static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG)
SDValue performLowerPartialReduction(SDNode *N, SelectionDAG &DAG)
static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG, const SDLoc &DL, unsigned VectorWidth)
static SDValue performBitcastCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue truncateVectorWithNARROW(EVT DstVT, SDValue In, const SDLoc &DL, SelectionDAG &DAG)
This file defines the interfaces that WebAssembly uses to lower LLVM code into a selection DAG.
This file provides WebAssembly-specific target descriptions.
This file declares WebAssembly-specific per-machine-function information.
This file declares the WebAssembly-specific subclass of TargetSubtarget.
This file declares the WebAssembly-specific subclass of TargetMachine.
This file contains the declaration of the WebAssembly-specific type parsing utility functions.
This file contains the declaration of the WebAssembly-specific utility functions.
static constexpr int Concat[]
Class for arbitrary precision integers.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
LLVM Basic Block Representation.
CCState - This class holds information needed while lowering arguments and return values.
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
This class represents a function call, abstracting a target machine's calling convention.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
Diagnostic information for unsupported feature in backend.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
FunctionType * getFunctionType() const
Returns the FunctionType for me.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
int64_t getOffset() const
LLVM_ABI unsigned getAddressSpace() const
unsigned getTargetFlags() const
const GlobalValue * getGlobal() const
ThreadLocalMode getThreadLocalMode() const
Type * getValueType() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
Describe properties that are true of each instruction in the target description file.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
@ INVALID_SIMPLE_VALUE_TYPE
static auto integer_fixedlen_vector_valuetypes()
MVT changeVectorElementType(MVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static LLVM_ABI MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool isFixedLengthVector() const
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, DebugLoc DL, bool NoImplicit=false)
CreateMachineInstr - Allocate a new MachineInstr.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
mop_range defs()
Returns all explicit operands that are register definitions.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
mop_range uses()
Returns all operands which may be register uses.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
unsigned getAddressSpace() const
Return the address space for the associated pointer.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
MachineFunction & getMachineFunction() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
bool isFunctionTy() const
True if this is an instance of FunctionType.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
static std::optional< unsigned > getLocalForStackObject(MachineFunction &MF, int FrameIndex)
This class is derived from MachineFunctionInfo and contains private WebAssembly-specific information ...
Register getFrameRegister(const MachineFunction &MF) const override
bool hasCallIndirectOverlong() const
const Triple & getTargetTriple() const
const WebAssemblyInstrInfo * getInstrInfo() const override
bool hasBulkMemory() const
const WebAssemblyRegisterInfo * getRegisterInfo() const override
bool hasWideArithmetic() const
bool hasReferenceTypes() const
bool hasExceptionHandling() const
bool hasNontrappingFPToInt() const
WebAssemblyTargetLowering(const TargetMachine &TM, const WebAssemblySubtarget &STI)
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const override
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Swift
Calling convention for Swift.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ CXX_FAST_TLS
Used for access functions.
@ WASM_EmscriptenInvoke
For emscripten __invoke_* functions.
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ BR_CC
BR_CC - Conditional branch.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isExtOpcode(unsigned Opcode)
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
OperandFlags
These are flags set on operands, but should be considered private, all access should go through the M...
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
MCSymbolWasm * getOrCreateFunctionTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __indirect_function_table, for use in call_indirect and in function bitcasts.
@ WASM_ADDRESS_SPACE_EXTERNREF
@ WASM_ADDRESS_SPACE_FUNCREF
bool isWebAssemblyFuncrefType(const Type *Ty)
Return true if this is a WebAssembly Funcref Type.
bool isWebAssemblyTableType(const Type *Ty)
Return true if the table represents a WebAssembly table type.
MCSymbolWasm * getOrCreateFuncrefCallTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __funcref_call_table, for use in funcref calls when lowered to table.set + call_indirect.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
bool isValidAddressSpace(unsigned AS)
bool canLowerReturn(size_t ResultSize, const WebAssemblySubtarget *Subtarget)
Returns true if the function's return value(s) can be lowered directly, i.e., not indirectly via a po...
bool isWasmVarAddressSpace(unsigned AS)
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
void computeSignatureVTs(const FunctionType *Ty, const Function *TargetFunc, const Function &ContextFunc, const TargetMachine &TM, SmallVectorImpl< MVT > &Params, SmallVectorImpl< MVT > &Results)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
@ Mul
Product of integers.
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
constexpr unsigned BitWidth
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
void computeLegalValueVTs(const WebAssemblyTargetLowering &TLI, LLVMContext &Ctx, const DataLayout &DL, Type *Ty, SmallVectorImpl< MVT > &ValueVTs)
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
uint64_t getScalarSizeInBits() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInConsecutiveRegs() const
Align getNonZeroOrigAlign() const
bool isSwiftError() const
unsigned getByValSize() const
bool isInConsecutiveRegsLast() const
Align getNonZeroByValAlign() const
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
unsigned getBitWidth() const
Get the bit width of this value.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
bool isBeforeLegalize() const
Function object to check whether the second component of a container supported by std::get (like std:...