22#include "llvm/IR/IntrinsicsRISCV.h"
30#define DEBUG_TYPE "riscv-isel"
31#define PASS_NAME "RISC-V DAG->DAG Pattern Instruction Selection"
34 "riscv-use-rematerializable-movimm",
cl::Hidden,
35 cl::desc(
"Use a rematerializable pseudoinstruction for 2 instruction "
36 "constant materialization"),
39#define GET_DAGISEL_BODY RISCVDAGToDAGISel
40#include "RISCVGenDAGISel.inc"
45 bool MadeChange =
false;
52 switch (
N->getOpcode()) {
56 MVT VT =
N->getSimpleValueType(0);
58 VT.
isInteger() ? RISCVISD::VMV_V_X_VL : RISCVISD::VFMV_V_F_VL;
68 case RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL: {
72 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands");
73 MVT VT =
N->getSimpleValueType(0);
79 Lo.getValueType() == MVT::i32 &&
Hi.getValueType() == MVT::i32 &&
87 int FI = cast<FrameIndexSDNode>(StackSlot.
getNode())->getIndex();
111 MVT::i64, MPI,
Align(8),
118 MVT VT =
N->getSimpleValueType(0);
131 LLVM_DEBUG(
dbgs() <<
"RISC-V DAG preprocessing replacing:\nOld: ");
150 bool MadeChange =
false;
154 if (
N->use_empty() || !
N->isMachineOpcode())
157 MadeChange |= doPeepholeSExtW(
N);
162 MadeChange |= doPeepholeMaskedRVV(cast<MachineSDNode>(
N));
173 MadeChange |= doPeepholeNoRegPassThru();
185 switch (Inst.getOpndKind()) {
224 if (Seq.
size() > 3) {
225 unsigned ShiftAmt, AddOpc;
244 SDNode *Node,
unsigned Log2SEW,
const SDLoc &
DL,
unsigned CurOp,
246 bool IsLoad,
MVT *IndexVT) {
247 SDValue Chain = Node->getOperand(0);
249 Operands.push_back(Node->getOperand(CurOp++));
251 if (IsStridedOrIndexed) {
252 Operands.push_back(Node->getOperand(CurOp++));
254 *IndexVT =
Operands.back()->getSimpleValueType(0);
258 SDValue Mask = Node->getOperand(CurOp++);
275 Policy = Node->getConstantOperandVal(CurOp++);
286 MVT VT = Node->getSimpleValueType(0);
287 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
293 Operands.push_back(Node->getOperand(CurOp++));
299 RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided,
false, Log2SEW,
300 static_cast<unsigned>(LMUL));
314 MVT VT = Node->getSimpleValueType(0);
316 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
322 Operands.push_back(Node->getOperand(CurOp++));
329 RISCV::getVLSEGPseudo(NF, IsMasked,
false,
true,
330 Log2SEW,
static_cast<unsigned>(LMUL));
345 MVT VT = Node->getSimpleValueType(0);
346 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
352 Operands.push_back(Node->getOperand(CurOp++));
363 if (DecodedLMUL.second)
364 ContainedTyNumElts /= DecodedLMUL.first;
366 ContainedTyNumElts *= DecodedLMUL.first;
368 "Element count mismatch");
373 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
375 "values when XLEN=32");
378 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
379 static_cast<unsigned>(IndexLMUL));
393 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
394 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
400 Operands.push_back(Node->getOperand(CurOp++));
406 NF, IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
418 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
419 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
425 Operands.push_back(Node->getOperand(CurOp++));
436 if (DecodedLMUL.second)
437 ContainedTyNumElts /= DecodedLMUL.first;
439 ContainedTyNumElts *= DecodedLMUL.first;
441 "Element count mismatch");
446 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
448 "values when XLEN=32");
451 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
452 static_cast<unsigned>(IndexLMUL));
470 unsigned IntNo = Node->getConstantOperandVal(0);
472 assert((IntNo == Intrinsic::riscv_vsetvli ||
473 IntNo == Intrinsic::riscv_vsetvlimax) &&
474 "Unexpected vsetvli intrinsic");
476 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
477 unsigned Offset = (VLMax ? 1 : 2);
480 "Unexpected number of operands");
485 Node->getConstantOperandVal(
Offset + 1) & 0x7);
492 unsigned Opcode = RISCV::PseudoVSETVLI;
493 if (
auto *
C = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
500 Opcode = RISCV::PseudoVSETVLIX0;
502 VLOperand = Node->getOperand(1);
504 if (
auto *
C = dyn_cast<ConstantSDNode>(VLOperand)) {
506 if (isUInt<5>(AVL)) {
509 XLenVT, VLImm, VTypeIOp));
520 MVT VT = Node->getSimpleValueType(0);
521 unsigned Opcode = Node->getOpcode();
523 "Unexpected opcode");
528 SDValue N0 = Node->getOperand(0);
529 SDValue N1 = Node->getOperand(1);
546 bool SignExt =
false;
564 uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
565 if (Opcode !=
ISD::AND && (Val & RemovedBitsMask) != 0)
568 int64_t ShiftedVal = Val >> ShAmt;
569 if (!isInt<12>(ShiftedVal))
573 if (SignExt && ShAmt >= 32)
580 case ISD::AND: BinOpc = RISCV::ANDI;
break;
581 case ISD::OR: BinOpc = RISCV::ORI;
break;
582 case ISD::XOR: BinOpc = RISCV::XORI;
break;
585 unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
600 if (Subtarget->hasVendorXTHeadBb())
602 else if (Subtarget->hasVendorXAndesPerf())
603 Opc = RISCV::NDS_BFOS;
604 else if (Subtarget->hasVendorXqcibm())
610 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
614 SDValue N0 = Node->getOperand(0);
618 auto BitfieldExtract = [&](
SDValue N0,
unsigned Msb,
unsigned Lsb,
620 if (
Opc == RISCV::QC_EXT) {
632 MVT VT = Node->getSimpleValueType(0);
633 const unsigned RightShAmt = N1C->getZExtValue();
638 auto *N01C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
642 const unsigned LeftShAmt = N01C->getZExtValue();
645 if (LeftShAmt > RightShAmt)
649 const unsigned Msb = MsbPlusOne - 1;
650 const unsigned Lsb = RightShAmt - LeftShAmt;
652 SDNode *Sbe = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
661 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
667 const unsigned Msb = ExtSize - 1;
670 const unsigned Lsb = RightShAmt > Msb ? Msb : RightShAmt;
672 SDNode *Sbe = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
682 if (!Subtarget->hasVendorXqcibm())
685 using namespace SDPatternMatch;
692 unsigned ShAmt, Width;
698 if (Width == 1 && Subtarget->hasStdExtZbs())
706 MVT VT = Node->getSimpleValueType(0);
720 if (!Subtarget->hasVendorXqcibm())
723 using namespace SDPatternMatch;
726 APInt MaskImm, OrImm;
742 unsigned ShAmt, Width;
749 MVT VT = Node->getSimpleValueType(0);
751 auto Opc = RISCV::QC_INSB;
755 if (isInt<5>(LIImm)) {
756 Opc = RISCV::QC_INSBI;
771 if (!Subtarget->hasVendorXAndesPerf())
774 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
778 SDValue N0 = Node->getOperand(0);
782 auto BitfieldInsert = [&](
SDValue N0,
unsigned Msb,
unsigned Lsb,
784 unsigned Opc = RISCV::NDS_BFOS;
794 MVT VT = Node->getSimpleValueType(0);
795 const unsigned RightShAmt = N1C->getZExtValue();
800 auto *N01C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
804 const unsigned LeftShAmt = N01C->getZExtValue();
807 if (LeftShAmt <= RightShAmt)
811 const unsigned Msb = MsbPlusOne - 1;
812 const unsigned Lsb = LeftShAmt - RightShAmt;
814 SDNode *Sbi = BitfieldInsert(N0, Msb, Lsb,
DL, VT);
827 if (!Subtarget->hasVendorXqcibm())
830 using namespace SDPatternMatch;
838 m_ConstInt(CMask))))))
841 unsigned Width, ShAmt;
850 auto Opc = RISCV::QC_INSB;
853 Opc = RISCV::QC_INSBI;
871 if (Subtarget->hasVendorXTHeadBb()) {
872 Opc = RISCV::TH_EXTU;
873 }
else if (Subtarget->hasVendorXAndesPerf()) {
874 Opc = RISCV::NDS_BFOZ;
875 }
else if (Subtarget->hasVendorXqcibm()) {
876 Opc = RISCV::QC_EXTU;
898 if (!Subtarget->hasVendorXAndesPerf())
901 unsigned Opc = RISCV::NDS_BFOZ;
915 if (!Subtarget->hasVendorXTHeadMemIdx())
929 "Unexpected addressing mode");
932 int64_t
Offset =
C->getSExtValue();
937 for (Shift = 0; Shift < 4; Shift++)
938 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
947 if (LoadVT == MVT::i8 && IsPre)
948 Opcode = IsZExt ? RISCV::TH_LBUIB : RISCV::TH_LBIB;
949 else if (LoadVT == MVT::i8 && IsPost)
950 Opcode = IsZExt ? RISCV::TH_LBUIA : RISCV::TH_LBIA;
951 else if (LoadVT == MVT::i16 && IsPre)
952 Opcode = IsZExt ? RISCV::TH_LHUIB : RISCV::TH_LHIB;
953 else if (LoadVT == MVT::i16 && IsPost)
954 Opcode = IsZExt ? RISCV::TH_LHUIA : RISCV::TH_LHIA;
955 else if (LoadVT == MVT::i32 && IsPre)
956 Opcode = IsZExt ? RISCV::TH_LWUIB : RISCV::TH_LWIB;
957 else if (LoadVT == MVT::i32 && IsPost)
958 Opcode = IsZExt ? RISCV::TH_LWUIA : RISCV::TH_LWIA;
959 else if (LoadVT == MVT::i64 && IsPre)
960 Opcode = RISCV::TH_LDIB;
961 else if (LoadVT == MVT::i64 && IsPost)
962 Opcode = RISCV::TH_LDIA;
989 unsigned IntNo = Node->getConstantOperandVal(1);
991 assert((IntNo == Intrinsic::riscv_sf_vc_x_se ||
992 IntNo == Intrinsic::riscv_sf_vc_i_se) &&
993 "Unexpected vsetvli intrinsic");
996 unsigned Log2SEW =
Log2_32(Node->getConstantOperandVal(6));
1000 Node->getOperand(4), Node->getOperand(5),
1001 Node->getOperand(8), SEWOp,
1002 Node->getOperand(0)};
1005 auto *LMulSDNode = cast<ConstantSDNode>(Node->getOperand(7));
1006 switch (LMulSDNode->getSExtValue()) {
1008 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_MF8
1009 : RISCV::PseudoSF_VC_I_SE_MF8;
1012 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_MF4
1013 : RISCV::PseudoSF_VC_I_SE_MF4;
1016 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_MF2
1017 : RISCV::PseudoSF_VC_I_SE_MF2;
1020 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_M1
1021 : RISCV::PseudoSF_VC_I_SE_M1;
1024 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_M2
1025 : RISCV::PseudoSF_VC_I_SE_M2;
1028 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_M4
1029 : RISCV::PseudoSF_VC_I_SE_M4;
1032 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_M8
1033 : RISCV::PseudoSF_VC_I_SE_M8;
1038 Opcode,
DL, Node->getSimpleValueType(0),
Operands));
1042#define INST_NF_CASE(NAME, NF) \
1043 case Intrinsic::riscv_##NAME##NF: \
1045#define INST_NF_CASE_MASK(NAME, NF) \
1046 case Intrinsic::riscv_##NAME##NF##_mask: \
1048#define INST_NF_CASE_FF(NAME, NF) \
1049 case Intrinsic::riscv_##NAME##NF##ff: \
1051#define INST_NF_CASE_FF_MASK(NAME, NF) \
1052 case Intrinsic::riscv_##NAME##NF##ff_mask: \
1054#define INST_ALL_NF_CASE_BASE(MACRO_NAME, NAME) \
1055 MACRO_NAME(NAME, 2) \
1056 MACRO_NAME(NAME, 3) \
1057 MACRO_NAME(NAME, 4) \
1058 MACRO_NAME(NAME, 5) \
1059 MACRO_NAME(NAME, 6) \
1060 MACRO_NAME(NAME, 7) \
1062#define INST_ALL_NF_CASE(NAME) \
1063 INST_ALL_NF_CASE_BASE(INST_NF_CASE, NAME) \
1064 INST_ALL_NF_CASE_BASE(INST_NF_CASE_MASK, NAME)
1065#define INST_ALL_NF_CASE_WITH_FF(NAME) \
1066 INST_ALL_NF_CASE(NAME) \
1067 INST_ALL_NF_CASE_BASE(INST_NF_CASE_FF, NAME) \
1068 INST_ALL_NF_CASE_BASE(INST_NF_CASE_FF_MASK, NAME)
1069 switch (Intrinsic) {
1085 if (Node->isMachineOpcode()) {
1087 Node->setNodeId(-1);
1093 unsigned Opcode = Node->getOpcode();
1096 MVT VT = Node->getSimpleValueType(0);
1098 bool HasBitTest = Subtarget->hasStdExtZbs() || Subtarget->hasVendorXTHeadBs();
1102 assert((VT == Subtarget->
getXLenVT() || VT == MVT::i32) &&
"Unexpected VT");
1103 auto *ConstNode = cast<ConstantSDNode>(Node);
1104 if (ConstNode->isZero()) {
1110 int64_t Imm = ConstNode->getSExtValue();
1114 if (isUInt<8>(Imm) && isInt<6>(SignExtend64<8>(Imm)) &&
hasAllBUsers(Node))
1115 Imm = SignExtend64<8>(Imm);
1118 if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
1120 Imm = SignExtend64<16>(Imm);
1123 if (!isInt<32>(Imm) && isUInt<32>(Imm) &&
hasAllWUsers(Node))
1124 Imm = SignExtend64<32>(Imm);
1130 const APFloat &APF = cast<ConstantFPSDNode>(Node)->getValueAPF();
1132 bool Is64Bit = Subtarget->
is64Bit();
1133 bool HasZdinx = Subtarget->hasStdExtZdinx();
1135 bool NegZeroF64 = APF.
isNegZero() && VT == MVT::f64;
1140 if (VT == MVT::f64 && HasZdinx && !Is64Bit)
1154 assert(Subtarget->hasStdExtZfbfmin());
1155 Opc = RISCV::FMV_H_X;
1158 Opc = Subtarget->hasStdExtZhinxmin() ? RISCV::COPY : RISCV::FMV_H_X;
1161 Opc = Subtarget->hasStdExtZfinx() ? RISCV::COPY : RISCV::FMV_W_X;
1170 Opc = Is64Bit ? RISCV::FMV_D_X : RISCV::FCVT_D_W;
1175 if (VT.
SimpleTy == MVT::f16 &&
Opc == RISCV::COPY) {
1178 }
else if (VT.
SimpleTy == MVT::f32 &&
Opc == RISCV::COPY) {
1181 }
else if (
Opc == RISCV::FCVT_D_W_IN32X ||
Opc == RISCV::FCVT_D_W)
1190 Opc = RISCV::FSGNJN_D;
1192 Opc = Is64Bit ? RISCV::FSGNJN_D_INX : RISCV::FSGNJN_D_IN32X;
1200 case RISCVISD::BuildGPRPair:
1201 case RISCVISD::BuildPairF64: {
1202 if (Opcode == RISCVISD::BuildPairF64 && !Subtarget->hasStdExtZdinx())
1205 assert((!Subtarget->
is64Bit() || Opcode == RISCVISD::BuildGPRPair) &&
1206 "BuildPairF64 only handled here on rv32i_zdinx");
1210 Node->getOperand(0),
1212 Node->getOperand(1),
1219 case RISCVISD::SplitGPRPair:
1220 case RISCVISD::SplitF64: {
1221 if (Subtarget->hasStdExtZdinx() || Opcode != RISCVISD::SplitF64) {
1222 assert((!Subtarget->
is64Bit() || Opcode == RISCVISD::SplitGPRPair) &&
1223 "SplitF64 only handled here on rv32i_zdinx");
1225 if (!
SDValue(Node, 0).use_empty()) {
1227 Node->getValueType(0),
1228 Node->getOperand(0));
1232 if (!
SDValue(Node, 1).use_empty()) {
1234 RISCV::sub_gpr_odd,
DL, Node->getValueType(1), Node->getOperand(0));
1242 assert(Opcode != RISCVISD::SplitGPRPair &&
1243 "SplitGPRPair should already be handled");
1245 if (!Subtarget->hasStdExtZfa())
1248 "Unexpected subtarget");
1251 if (!
SDValue(Node, 0).use_empty()) {
1253 Node->getOperand(0));
1256 if (!
SDValue(Node, 1).use_empty()) {
1258 Node->getOperand(0));
1266 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1269 SDValue N0 = Node->getOperand(0);
1273 unsigned ShAmt = N1C->getZExtValue();
1277 unsigned XLen = Subtarget->
getXLen();
1280 if (ShAmt <= 32 && TrailingZeros > 0 && LeadingZeros == 32) {
1292 if (TrailingZeros == 0 && LeadingZeros > ShAmt &&
1293 XLen - LeadingZeros > 11 && LeadingZeros != 32) {
1315 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1318 SDValue N0 = Node->getOperand(0);
1321 unsigned ShAmt = N1C->getZExtValue();
1327 unsigned XLen = Subtarget->
getXLen();
1330 if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
1349 Mask |= maskTrailingOnes<uint64_t>(ShAmt);
1353 if (ShAmt >= TrailingOnes)
1356 if (TrailingOnes == 32) {
1358 Subtarget->
is64Bit() ? RISCV::SRLIW : RISCV::SRLI,
DL, VT,
1369 if (HasBitTest && ShAmt + 1 == TrailingOnes) {
1371 Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST,
DL, VT,
1377 const unsigned Msb = TrailingOnes - 1;
1378 const unsigned Lsb = ShAmt;
1382 unsigned LShAmt = Subtarget->
getXLen() - TrailingOnes;
1407 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1410 SDValue N0 = Node->getOperand(0);
1413 unsigned ShAmt = N1C->getZExtValue();
1415 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
1417 if (ExtSize >= 32 || ShAmt >= ExtSize)
1419 unsigned LShAmt = Subtarget->
getXLen() - ExtSize;
1450 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1454 SDValue N0 = Node->getOperand(0);
1458 auto *
C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
1461 unsigned C2 =
C->getZExtValue();
1462 unsigned XLen = Subtarget->
getXLen();
1463 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1471 bool IsCANDI = isInt<6>(N1C->getSExtValue());
1477 C1 &= maskTrailingZeros<uint64_t>(C2);
1479 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
1483 bool OneUseOrZExtW = N0.
hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
1493 if (C2 + 32 == Leading) {
1505 if (C2 >= 32 && (Leading - C2) == 1 && N0.
hasOneUse() &&
1507 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32) {
1512 RISCV::SRLIW,
DL, VT,
SDValue(SRAIW, 0),
1526 const unsigned Lsb = C2;
1532 bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
1534 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32;
1536 Skip |= HasBitTest && Leading == XLen - 1;
1537 if (OneUseOrZExtW && !Skip) {
1539 RISCV::SLLI,
DL, VT,
X,
1555 if (C2 + Leading < XLen &&
1556 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
1558 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
1574 const unsigned Msb = XLen - Leading - 1;
1575 const unsigned Lsb = C2;
1580 if (OneUseOrZExtW && !IsCANDI) {
1582 RISCV::SLLI,
DL, VT,
X,
1598 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
1600 unsigned SrliOpc = RISCV::SRLI;
1603 isa<ConstantSDNode>(
X.getOperand(1)) &&
1604 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
1605 SrliOpc = RISCV::SRLIW;
1606 X =
X.getOperand(0);
1618 if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
1619 OneUseOrZExtW && !IsCANDI) {
1621 RISCV::SRLIW,
DL, VT,
X,
1630 if (Trailing > 0 && Leading + Trailing == 32 && C2 + Trailing < XLen &&
1631 OneUseOrZExtW && Subtarget->hasStdExtZba()) {
1633 RISCV::SRLI,
DL, VT,
X,
1636 RISCV::SLLI_UW,
DL, VT,
SDValue(SRLI, 0),
1648 if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1650 RISCV::SRLI,
DL, VT,
X,
1659 if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1661 RISCV::SRLIW,
DL, VT,
X,
1671 if (C2 < Trailing && Leading + Trailing == 32 && OneUseOrZExtW &&
1672 Subtarget->hasStdExtZba()) {
1674 RISCV::SRLI,
DL, VT,
X,
1677 RISCV::SLLI_UW,
DL, VT,
SDValue(SRLI, 0),
1685 const uint64_t C1 = N1C->getZExtValue();
1690 unsigned XLen = Subtarget->
getXLen();
1691 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1696 bool Skip = C2 > 32 && isInt<12>(N1C->getSExtValue()) &&
1698 isa<ConstantSDNode>(
X.getOperand(1)) &&
1699 X.getConstantOperandVal(1) == 32;
1706 RISCV::SRAI,
DL, VT,
X,
1722 if (C2 > Leading && Leading > 0 && Trailing > 0) {
1743 if (
isMask_64(C1) && !isInt<12>(N1C->getSExtValue()) &&
1744 !(C1 == 0xffff && Subtarget->hasStdExtZbb()) &&
1745 !(C1 == 0xffffffff && Subtarget->hasStdExtZba())) {
1764 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1765 if (!N1C || !N1C->hasOneUse())
1769 SDValue N0 = Node->getOperand(0);
1786 (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb());
1788 IsANDIOrZExt |= C2 == UINT64_C(0xFFFF) && Subtarget->hasVendorXTHeadBb();
1789 if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1793 bool IsZExtW = C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba();
1795 IsZExtW |= C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasVendorXTHeadBb();
1796 if (IsZExtW && (isInt<32>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1802 unsigned XLen = Subtarget->
getXLen();
1808 unsigned ConstantShift = XLen - LeadingZeros;
1812 uint64_t ShiftedC1 = C1 << ConstantShift;
1815 ShiftedC1 = SignExtend64<32>(ShiftedC1);
1831 if (Subtarget->hasVendorXCVmem() && !Subtarget->
is64Bit()) {
1837 SDValue Chain = Node->getOperand(0);
1841 bool Simm12 =
false;
1842 bool SignExtend = Load->getExtensionType() ==
ISD::SEXTLOAD;
1844 if (
auto ConstantOffset = dyn_cast<ConstantSDNode>(
Offset)) {
1845 int ConstantVal = ConstantOffset->getSExtValue();
1846 Simm12 = isInt<12>(ConstantVal);
1852 unsigned Opcode = 0;
1853 switch (Load->getMemoryVT().getSimpleVT().SimpleTy) {
1855 if (Simm12 && SignExtend)
1856 Opcode = RISCV::CV_LB_ri_inc;
1857 else if (Simm12 && !SignExtend)
1858 Opcode = RISCV::CV_LBU_ri_inc;
1859 else if (!Simm12 && SignExtend)
1860 Opcode = RISCV::CV_LB_rr_inc;
1862 Opcode = RISCV::CV_LBU_rr_inc;
1865 if (Simm12 && SignExtend)
1866 Opcode = RISCV::CV_LH_ri_inc;
1867 else if (Simm12 && !SignExtend)
1868 Opcode = RISCV::CV_LHU_ri_inc;
1869 else if (!Simm12 && SignExtend)
1870 Opcode = RISCV::CV_LH_rr_inc;
1872 Opcode = RISCV::CV_LHU_rr_inc;
1876 Opcode = RISCV::CV_LW_ri_inc;
1878 Opcode = RISCV::CV_LW_rr_inc;
1893 case RISCVISD::LD_RV32: {
1894 assert(Subtarget->hasStdExtZilsd() &&
"LD_RV32 is only used with Zilsd");
1897 SDValue Chain = Node->getOperand(0);
1903 RISCV::LD_RV32,
DL, {MVT::Untyped, MVT::Other}, Ops);
1915 case RISCVISD::SD_RV32: {
1917 SDValue Chain = Node->getOperand(0);
1947 unsigned IntNo = Node->getConstantOperandVal(0);
1952 case Intrinsic::riscv_vmsgeu:
1953 case Intrinsic::riscv_vmsge: {
1954 SDValue Src1 = Node->getOperand(1);
1955 SDValue Src2 = Node->getOperand(2);
1956 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1957 bool IsCmpConstant =
false;
1958 bool IsCmpMinimum =
false;
1965 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1966 IsCmpConstant =
true;
1967 CVal =
C->getSExtValue();
1968 if (CVal >= -15 && CVal <= 16) {
1969 if (!IsUnsigned || CVal != 0)
1971 IsCmpMinimum =
true;
1975 IsCmpMinimum =
true;
1978 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode, VMSGTOpcode;
1982#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
1983 case RISCVVType::lmulenum: \
1984 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1985 : RISCV::PseudoVMSLT_VX_##suffix; \
1986 VMSGTOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix \
1987 : RISCV::PseudoVMSGT_VX_##suffix; \
1996#undef CASE_VMSLT_OPCODES
2002#define CASE_VMNAND_VMSET_OPCODES(lmulenum, suffix) \
2003 case RISCVVType::lmulenum: \
2004 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
2005 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix; \
2014#undef CASE_VMNAND_VMSET_OPCODES
2029 if (IsCmpConstant) {
2034 {Src1, Imm, VL, SEW}));
2044 {Cmp, Cmp, VL, MaskSEW}));
2047 case Intrinsic::riscv_vmsgeu_mask:
2048 case Intrinsic::riscv_vmsge_mask: {
2049 SDValue Src1 = Node->getOperand(2);
2050 SDValue Src2 = Node->getOperand(3);
2051 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
2052 bool IsCmpConstant =
false;
2053 bool IsCmpMinimum =
false;
2060 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
2061 IsCmpConstant =
true;
2062 CVal =
C->getSExtValue();
2063 if (CVal >= -15 && CVal <= 16) {
2064 if (!IsUnsigned || CVal != 0)
2066 IsCmpMinimum =
true;
2070 IsCmpMinimum =
true;
2073 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
2074 VMOROpcode, VMSGTMaskOpcode;
2078#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
2079 case RISCVVType::lmulenum: \
2080 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
2081 : RISCV::PseudoVMSLT_VX_##suffix; \
2082 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
2083 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
2084 VMSGTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix##_MASK \
2085 : RISCV::PseudoVMSGT_VX_##suffix##_MASK; \
2094#undef CASE_VMSLT_OPCODES
2100#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
2101 case RISCVVType::lmulenum: \
2102 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
2103 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
2104 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
2113#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
2120 SDValue MaskedOff = Node->getOperand(1);
2121 SDValue Mask = Node->getOperand(4);
2127 if (Mask == MaskedOff) {
2133 {Mask, MaskedOff, VL, MaskSEW}));
2140 if (Mask == MaskedOff) {
2145 {Mask, Cmp, VL, MaskSEW}));
2152 if (IsCmpConstant) {
2157 VMSGTMaskOpcode,
DL, VT,
2158 {MaskedOff, Src1, Imm, Mask, VL, SEW, PolicyOp}));
2168 {MaskedOff, Src1, Src2, Mask,
2169 VL, SEW, PolicyOp}),
2173 {Cmp, Mask, VL, MaskSEW}));
2176 case Intrinsic::riscv_vsetvli:
2177 case Intrinsic::riscv_vsetvlimax:
2183 unsigned IntNo = Node->getConstantOperandVal(1);
2188 case Intrinsic::riscv_vlseg2:
2189 case Intrinsic::riscv_vlseg3:
2190 case Intrinsic::riscv_vlseg4:
2191 case Intrinsic::riscv_vlseg5:
2192 case Intrinsic::riscv_vlseg6:
2193 case Intrinsic::riscv_vlseg7:
2194 case Intrinsic::riscv_vlseg8: {
2199 case Intrinsic::riscv_vlseg2_mask:
2200 case Intrinsic::riscv_vlseg3_mask:
2201 case Intrinsic::riscv_vlseg4_mask:
2202 case Intrinsic::riscv_vlseg5_mask:
2203 case Intrinsic::riscv_vlseg6_mask:
2204 case Intrinsic::riscv_vlseg7_mask:
2205 case Intrinsic::riscv_vlseg8_mask: {
2210 case Intrinsic::riscv_vlsseg2:
2211 case Intrinsic::riscv_vlsseg3:
2212 case Intrinsic::riscv_vlsseg4:
2213 case Intrinsic::riscv_vlsseg5:
2214 case Intrinsic::riscv_vlsseg6:
2215 case Intrinsic::riscv_vlsseg7:
2216 case Intrinsic::riscv_vlsseg8: {
2221 case Intrinsic::riscv_vlsseg2_mask:
2222 case Intrinsic::riscv_vlsseg3_mask:
2223 case Intrinsic::riscv_vlsseg4_mask:
2224 case Intrinsic::riscv_vlsseg5_mask:
2225 case Intrinsic::riscv_vlsseg6_mask:
2226 case Intrinsic::riscv_vlsseg7_mask:
2227 case Intrinsic::riscv_vlsseg8_mask: {
2232 case Intrinsic::riscv_vloxseg2:
2233 case Intrinsic::riscv_vloxseg3:
2234 case Intrinsic::riscv_vloxseg4:
2235 case Intrinsic::riscv_vloxseg5:
2236 case Intrinsic::riscv_vloxseg6:
2237 case Intrinsic::riscv_vloxseg7:
2238 case Intrinsic::riscv_vloxseg8:
2242 case Intrinsic::riscv_vluxseg2:
2243 case Intrinsic::riscv_vluxseg3:
2244 case Intrinsic::riscv_vluxseg4:
2245 case Intrinsic::riscv_vluxseg5:
2246 case Intrinsic::riscv_vluxseg6:
2247 case Intrinsic::riscv_vluxseg7:
2248 case Intrinsic::riscv_vluxseg8:
2252 case Intrinsic::riscv_vloxseg2_mask:
2253 case Intrinsic::riscv_vloxseg3_mask:
2254 case Intrinsic::riscv_vloxseg4_mask:
2255 case Intrinsic::riscv_vloxseg5_mask:
2256 case Intrinsic::riscv_vloxseg6_mask:
2257 case Intrinsic::riscv_vloxseg7_mask:
2258 case Intrinsic::riscv_vloxseg8_mask:
2262 case Intrinsic::riscv_vluxseg2_mask:
2263 case Intrinsic::riscv_vluxseg3_mask:
2264 case Intrinsic::riscv_vluxseg4_mask:
2265 case Intrinsic::riscv_vluxseg5_mask:
2266 case Intrinsic::riscv_vluxseg6_mask:
2267 case Intrinsic::riscv_vluxseg7_mask:
2268 case Intrinsic::riscv_vluxseg8_mask:
2272 case Intrinsic::riscv_vlseg8ff:
2273 case Intrinsic::riscv_vlseg7ff:
2274 case Intrinsic::riscv_vlseg6ff:
2275 case Intrinsic::riscv_vlseg5ff:
2276 case Intrinsic::riscv_vlseg4ff:
2277 case Intrinsic::riscv_vlseg3ff:
2278 case Intrinsic::riscv_vlseg2ff: {
2282 case Intrinsic::riscv_vlseg8ff_mask:
2283 case Intrinsic::riscv_vlseg7ff_mask:
2284 case Intrinsic::riscv_vlseg6ff_mask:
2285 case Intrinsic::riscv_vlseg5ff_mask:
2286 case Intrinsic::riscv_vlseg4ff_mask:
2287 case Intrinsic::riscv_vlseg3ff_mask:
2288 case Intrinsic::riscv_vlseg2ff_mask: {
2292 case Intrinsic::riscv_vloxei:
2293 case Intrinsic::riscv_vloxei_mask:
2294 case Intrinsic::riscv_vluxei:
2295 case Intrinsic::riscv_vluxei_mask: {
2296 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
2297 IntNo == Intrinsic::riscv_vluxei_mask;
2298 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
2299 IntNo == Intrinsic::riscv_vloxei_mask;
2301 MVT VT = Node->getSimpleValueType(0);
2306 Operands.push_back(Node->getOperand(CurOp++));
2314 "Element count mismatch");
2319 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
2321 "values when XLEN=32");
2324 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
2325 static_cast<unsigned>(IndexLMUL));
2334 case Intrinsic::riscv_vlm:
2335 case Intrinsic::riscv_vle:
2336 case Intrinsic::riscv_vle_mask:
2337 case Intrinsic::riscv_vlse:
2338 case Intrinsic::riscv_vlse_mask: {
2339 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
2340 IntNo == Intrinsic::riscv_vlse_mask;
2342 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
2344 MVT VT = Node->getSimpleValueType(0);
2353 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
2356 if (HasPassthruOperand)
2357 Operands.push_back(Node->getOperand(CurOp++));
2370 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
2371 static_cast<unsigned>(LMUL));
2380 case Intrinsic::riscv_vleff:
2381 case Intrinsic::riscv_vleff_mask: {
2382 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
2384 MVT VT = Node->getSimpleValueType(0);
2389 Operands.push_back(Node->getOperand(CurOp++));
2396 RISCV::getVLEPseudo(IsMasked,
false,
true,
2397 Log2SEW,
static_cast<unsigned>(LMUL));
2405 case Intrinsic::riscv_nds_vln:
2406 case Intrinsic::riscv_nds_vln_mask:
2407 case Intrinsic::riscv_nds_vlnu:
2408 case Intrinsic::riscv_nds_vlnu_mask: {
2409 bool IsMasked = IntNo == Intrinsic::riscv_nds_vln_mask ||
2410 IntNo == Intrinsic::riscv_nds_vlnu_mask;
2411 bool IsUnsigned = IntNo == Intrinsic::riscv_nds_vlnu ||
2412 IntNo == Intrinsic::riscv_nds_vlnu_mask;
2414 MVT VT = Node->getSimpleValueType(0);
2419 Operands.push_back(Node->getOperand(CurOp++));
2426 IsMasked, IsUnsigned, Log2SEW,
static_cast<unsigned>(LMUL));
2430 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2440 unsigned IntNo = Node->getConstantOperandVal(1);
2442 case Intrinsic::riscv_vsseg2:
2443 case Intrinsic::riscv_vsseg3:
2444 case Intrinsic::riscv_vsseg4:
2445 case Intrinsic::riscv_vsseg5:
2446 case Intrinsic::riscv_vsseg6:
2447 case Intrinsic::riscv_vsseg7:
2448 case Intrinsic::riscv_vsseg8: {
2453 case Intrinsic::riscv_vsseg2_mask:
2454 case Intrinsic::riscv_vsseg3_mask:
2455 case Intrinsic::riscv_vsseg4_mask:
2456 case Intrinsic::riscv_vsseg5_mask:
2457 case Intrinsic::riscv_vsseg6_mask:
2458 case Intrinsic::riscv_vsseg7_mask:
2459 case Intrinsic::riscv_vsseg8_mask: {
2464 case Intrinsic::riscv_vssseg2:
2465 case Intrinsic::riscv_vssseg3:
2466 case Intrinsic::riscv_vssseg4:
2467 case Intrinsic::riscv_vssseg5:
2468 case Intrinsic::riscv_vssseg6:
2469 case Intrinsic::riscv_vssseg7:
2470 case Intrinsic::riscv_vssseg8: {
2475 case Intrinsic::riscv_vssseg2_mask:
2476 case Intrinsic::riscv_vssseg3_mask:
2477 case Intrinsic::riscv_vssseg4_mask:
2478 case Intrinsic::riscv_vssseg5_mask:
2479 case Intrinsic::riscv_vssseg6_mask:
2480 case Intrinsic::riscv_vssseg7_mask:
2481 case Intrinsic::riscv_vssseg8_mask: {
2486 case Intrinsic::riscv_vsoxseg2:
2487 case Intrinsic::riscv_vsoxseg3:
2488 case Intrinsic::riscv_vsoxseg4:
2489 case Intrinsic::riscv_vsoxseg5:
2490 case Intrinsic::riscv_vsoxseg6:
2491 case Intrinsic::riscv_vsoxseg7:
2492 case Intrinsic::riscv_vsoxseg8:
2496 case Intrinsic::riscv_vsuxseg2:
2497 case Intrinsic::riscv_vsuxseg3:
2498 case Intrinsic::riscv_vsuxseg4:
2499 case Intrinsic::riscv_vsuxseg5:
2500 case Intrinsic::riscv_vsuxseg6:
2501 case Intrinsic::riscv_vsuxseg7:
2502 case Intrinsic::riscv_vsuxseg8:
2506 case Intrinsic::riscv_vsoxseg2_mask:
2507 case Intrinsic::riscv_vsoxseg3_mask:
2508 case Intrinsic::riscv_vsoxseg4_mask:
2509 case Intrinsic::riscv_vsoxseg5_mask:
2510 case Intrinsic::riscv_vsoxseg6_mask:
2511 case Intrinsic::riscv_vsoxseg7_mask:
2512 case Intrinsic::riscv_vsoxseg8_mask:
2516 case Intrinsic::riscv_vsuxseg2_mask:
2517 case Intrinsic::riscv_vsuxseg3_mask:
2518 case Intrinsic::riscv_vsuxseg4_mask:
2519 case Intrinsic::riscv_vsuxseg5_mask:
2520 case Intrinsic::riscv_vsuxseg6_mask:
2521 case Intrinsic::riscv_vsuxseg7_mask:
2522 case Intrinsic::riscv_vsuxseg8_mask:
2526 case Intrinsic::riscv_vsoxei:
2527 case Intrinsic::riscv_vsoxei_mask:
2528 case Intrinsic::riscv_vsuxei:
2529 case Intrinsic::riscv_vsuxei_mask: {
2530 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
2531 IntNo == Intrinsic::riscv_vsuxei_mask;
2532 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
2533 IntNo == Intrinsic::riscv_vsoxei_mask;
2535 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2540 Operands.push_back(Node->getOperand(CurOp++));
2548 "Element count mismatch");
2553 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
2555 "values when XLEN=32");
2558 IsMasked, IsOrdered, IndexLog2EEW,
2559 static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
2568 case Intrinsic::riscv_vsm:
2569 case Intrinsic::riscv_vse:
2570 case Intrinsic::riscv_vse_mask:
2571 case Intrinsic::riscv_vsse:
2572 case Intrinsic::riscv_vsse_mask: {
2573 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
2574 IntNo == Intrinsic::riscv_vsse_mask;
2576 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
2578 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2583 Operands.push_back(Node->getOperand(CurOp++));
2590 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
2598 case Intrinsic::riscv_sf_vc_x_se:
2599 case Intrinsic::riscv_sf_vc_i_se:
2606 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
2618 case RISCVISD::TUPLE_INSERT: {
2619 SDValue V = Node->getOperand(0);
2620 SDValue SubV = Node->getOperand(1);
2622 auto Idx = Node->getConstantOperandVal(2);
2626 MVT SubVecContainerVT = SubVecVT;
2629 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(SubVecVT);
2631 [[maybe_unused]]
bool ExactlyVecRegSized =
2633 .isKnownMultipleOf(Subtarget->
expandVScale(VecRegSize));
2635 .getKnownMinValue()));
2636 assert(
Idx == 0 && (ExactlyVecRegSized || V.isUndef()));
2638 MVT ContainerVT = VT;
2640 ContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2644 std::tie(SubRegIdx,
Idx) =
2646 ContainerVT, SubVecContainerVT,
Idx,
TRI);
2656 [[maybe_unused]]
bool IsSubVecPartReg =
2660 assert((V.getValueType().isRISCVVectorTuple() || !IsSubVecPartReg ||
2662 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
2663 "the subvector is smaller than a full-sized register");
2667 if (SubRegIdx == RISCV::NoSubRegister) {
2668 unsigned InRegClassID =
2672 "Unexpected subvector extraction");
2685 case RISCVISD::TUPLE_EXTRACT: {
2686 SDValue V = Node->getOperand(0);
2687 auto Idx = Node->getConstantOperandVal(1);
2688 MVT InVT = V.getSimpleValueType();
2692 MVT SubVecContainerVT = VT;
2696 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2699 InVT =
TLI.getContainerForFixedLengthVector(InVT);
2703 std::tie(SubRegIdx,
Idx) =
2705 InVT, SubVecContainerVT,
Idx,
TRI);
2715 if (SubRegIdx == RISCV::NoSubRegister) {
2719 "Unexpected subvector extraction");
2731 case RISCVISD::VMV_S_X_VL:
2732 case RISCVISD::VFMV_S_F_VL:
2733 case RISCVISD::VMV_V_X_VL:
2734 case RISCVISD::VFMV_V_F_VL: {
2736 bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
2737 Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
2738 if (!Node->getOperand(0).isUndef())
2740 SDValue Src = Node->getOperand(1);
2741 auto *Ld = dyn_cast<LoadSDNode>(Src);
2744 if (!Ld || Ld->isIndexed())
2746 EVT MemVT = Ld->getMemoryVT();
2772 if (IsStrided && !Subtarget->hasOptimizedZeroStrideLoad())
2782 Operands.append({VL, SEW, PolicyOp, Ld->getChain()});
2786 false, IsStrided,
false,
2787 Log2SEW,
static_cast<unsigned>(LMUL));
2799 unsigned Locality = Node->getConstantOperandVal(3);
2803 auto *LoadStoreMem = cast<MemSDNode>(Node);
2807 int NontemporalLevel = 0;
2810 NontemporalLevel = 3;
2813 NontemporalLevel = 1;
2816 NontemporalLevel = 0;
2822 if (NontemporalLevel & 0b1)
2824 if (NontemporalLevel & 0b10)
2835 std::vector<SDValue> &OutOps) {
2838 switch (ConstraintID) {
2843 assert(Found &&
"SelectAddrRegImm should always succeed");
2844 OutOps.push_back(Op0);
2845 OutOps.push_back(Op1);
2849 OutOps.push_back(
Op);
2863 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr)) {
2876 bool IsPrefetch =
false) {
2877 if (!isa<ConstantSDNode>(
Addr))
2880 int64_t CVal = cast<ConstantSDNode>(
Addr)->getSExtValue();
2885 int64_t Lo12 = SignExtend64<12>(CVal);
2887 if (!Subtarget->
is64Bit() || isInt<32>(
Hi)) {
2888 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2891 int64_t Hi20 = (
Hi >> 12) & 0xfffff;
2908 if (Seq.
back().getOpcode() != RISCV::ADDI)
2910 Lo12 = Seq.
back().getImm();
2911 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2916 assert(!Seq.
empty() &&
"Expected more instructions in sequence");
2926 for (
auto *
User :
Add->users()) {
2928 User->getOpcode() != RISCVISD::LD_RV32 &&
2929 User->getOpcode() != RISCVISD::SD_RV32 &&
2933 EVT VT = cast<MemSDNode>(
User)->getMemoryVT();
2939 cast<StoreSDNode>(
User)->getValue() ==
Add)
2942 cast<AtomicSDNode>(
User)->getVal() ==
Add)
2944 if (
User->getOpcode() == RISCVISD::SD_RV32 &&
2960 MVT VT =
Addr.getSimpleValueType();
2962 if (
Addr.getOpcode() == RISCVISD::ADD_LO) {
2969 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2970 if (isInt<12>(CVal)) {
2972 if (
Base.getOpcode() == RISCVISD::ADD_LO) {
2974 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
2982 GA->getGlobal()->getPointerAlignment(
DL), GA->getOffset());
2983 if ((CVal == 0 || Alignment > CVal)) {
2984 int64_t CombinedOffset = CVal + GA->getOffset();
2988 CombinedOffset, GA->getTargetFlags());
2994 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
3002 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
3003 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
3004 assert(!isInt<12>(CVal) &&
"simm12 not already handled?");
3009 if (CVal >= -4096 && CVal <= 4094) {
3010 int64_t Adj = CVal < 0 ? -2048 : 2047;
3052 MVT VT =
Addr.getSimpleValueType();
3055 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
3056 if (isUInt<9>(CVal)) {
3059 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
3079 MVT VT =
Addr.getSimpleValueType();
3082 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
3083 if (isInt<12>(CVal)) {
3087 if ((CVal & 0b11111) != 0) {
3093 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
3101 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
3102 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
3103 assert(!isInt<12>(CVal) &&
"simm12 not already handled?");
3107 if ((-2049 >= CVal && CVal >= -4096) || (4065 >= CVal && CVal >= 2017)) {
3108 int64_t Adj = CVal < 0 ? -2048 : 2016;
3109 int64_t AdjustedOffset = CVal - Adj;
3112 RISCV::ADDI,
DL, VT,
Addr.getOperand(0),
3143 EVT VT = cast<MemSDNode>(
User)->getMemoryVT();
3145 (Subtarget.hasVendorXTHeadMemIdx() || Subtarget.hasVendorXqcisls())) &&
3146 !((VT == MVT::f32 || VT == MVT::f64) &&
3147 Subtarget.hasVendorXTHeadFMemIdx()))
3151 cast<StoreSDNode>(
User)->getValue() ==
Add)
3165 bool FoundADDI =
false;
3166 for (
auto *
User :
Add->users()) {
3171 if (!Shift || FoundADDI ||
User->getOpcode() !=
ISD::ADD ||
3173 !isInt<12>(cast<ConstantSDNode>(
User->
getOperand(1))->getSExtValue()))
3180 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
3181 if ((ShiftAmt <= 3 &&
3182 (Subtarget.hasStdExtZba() || Subtarget.hasVendorXTHeadBa())) ||
3183 (ShiftAmt >= 4 && ShiftAmt <= 7 && Subtarget.hasVendorXqciac()))
3196 unsigned MaxShiftAmount,
3204 EVT VT =
Addr.getSimpleValueType();
3205 auto SelectShl = [
this, VT, MaxShiftAmount](
SDValue N,
SDValue &Index,
3207 if (
N.getOpcode() !=
ISD::SHL || !isa<ConstantSDNode>(
N.getOperand(1)))
3211 unsigned ShiftAmt =
N.getConstantOperandVal(1);
3212 if (ShiftAmt > MaxShiftAmount)
3215 Index =
N.getOperand(0);
3220 if (
auto *C1 = dyn_cast<ConstantSDNode>(
RHS)) {
3223 !isa<ConstantSDNode>(
LHS.getOperand(1)) &&
3224 isInt<12>(C1->getSExtValue())) {
3225 if (SelectShl(
LHS.getOperand(1), Index, Scale) &&
3230 LHS.getOperand(0), C1Val),
3236 if (SelectShl(
LHS.getOperand(0), Index, Scale) &&
3241 LHS.getOperand(1), C1Val),
3254 if (SelectShl(
RHS, Index, Scale)) {
3262 if (SelectShl(
LHS, Index, Scale)) {
3279 unsigned MaxShiftAmount,
3286 if (Index.getOpcode() ==
ISD::AND) {
3287 auto *
C = dyn_cast<ConstantSDNode>(Index.getOperand(1));
3288 if (
C &&
C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
3289 Index = Index.getOperand(0);
3302 if (isa<ConstantSDNode>(
Addr.getOperand(1)))
3347 if (Imm != 0 && Imm % ShiftWidth == 0) {
3356 if (Imm != 0 && Imm % ShiftWidth == 0) {
3360 unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
3368 if (Imm % ShiftWidth == ShiftWidth - 1) {
3390 "Unexpected condition code!");
3397 ISD::CondCode CCVal = cast<CondCodeSDNode>(
N->getOperand(2))->get();
3398 if (CCVal != ExpectedCCVal)
3404 if (!
LHS.getValueType().isScalarInteger())
3415 if (
auto *
C = dyn_cast<ConstantSDNode>(
RHS)) {
3416 int64_t CVal =
C->getSExtValue();
3419 if (CVal == -2048) {
3422 RISCV::XORI,
DL,
N->getValueType(0),
LHS,
3429 if (isInt<12>(CVal) || CVal == 2048) {
3432 RISCV::ADDI,
DL,
N->getValueType(0),
LHS,
3440 RISCV::BINVI,
DL,
N->getValueType(0),
LHS,
3448 if (Subtarget->hasVendorXqcilia() && isInt<26>(CVal) &&
3449 (CVal & 0xFFF) != 0) {
3452 RISCV::QC_E_ADDI,
DL,
N->getValueType(0),
LHS,
3468 cast<VTSDNode>(
N.getOperand(1))->getVT().getSizeInBits() == Bits) {
3469 Val =
N.getOperand(0);
3473 auto UnwrapShlSra = [](
SDValue N,
unsigned ShiftAmt) {
3474 if (
N.getOpcode() !=
ISD::SRA || !isa<ConstantSDNode>(
N.getOperand(1)))
3479 N.getConstantOperandVal(1) == ShiftAmt &&
3486 MVT VT =
N.getSimpleValueType();
3497 auto *
C = dyn_cast<ConstantSDNode>(
N.getOperand(1));
3498 if (
C &&
C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
3499 Val =
N.getOperand(0);
3503 MVT VT =
N.getSimpleValueType();
3518 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1))) {
3524 uint64_t Mask =
N.getConstantOperandVal(1);
3527 unsigned XLen = Subtarget->
getXLen();
3529 Mask &= maskTrailingZeros<uint64_t>(C2);
3531 Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
3536 if (Trailing != ShAmt)
3543 if (LeftShift && Leading == 0 && C2 < Trailing)
3544 Opcode = RISCV::SRLI;
3548 else if (LeftShift && Leading == 32 - C2 && C2 < Trailing)
3549 Opcode = RISCV::SRLIW;
3553 else if (!LeftShift && Leading == C2)
3554 Opcode = RISCV::SRLI;
3558 else if (!LeftShift && Leading == 32 + C2)
3559 Opcode = RISCV::SRLIW;
3564 EVT VT =
N.getValueType();
3565 ShAmt = LeftShift ? Trailing - C2 : Trailing + C2;
3574 uint64_t Mask =
N.getConstantOperandVal(1);
3582 unsigned XLen = Subtarget->
getXLen();
3585 if (C2 > Leading && Leading > 0 && Trailing == ShAmt) {
3587 EVT VT =
N.getValueType();
3593 RISCV::SRLI,
DL, VT, Val,
3600 }
else if (
bool LeftShift =
N.getOpcode() ==
ISD::SHL;
3601 (LeftShift ||
N.getOpcode() ==
ISD::SRL) &&
3602 isa<ConstantSDNode>(
N.getOperand(1))) {
3608 unsigned C1 =
N.getConstantOperandVal(1);
3609 unsigned XLen = Subtarget->
getXLen();
3614 if (LeftShift && Leading == 32 && Trailing > 0 &&
3615 (Trailing + C1) == ShAmt) {
3617 EVT VT =
N.getValueType();
3626 if (!LeftShift && Leading == 32 && Trailing > C1 &&
3627 (Trailing - C1) == ShAmt) {
3629 EVT VT =
N.getValueType();
3648 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1)) &&
3653 uint64_t Mask =
N.getConstantOperandVal(1);
3656 Mask &= maskTrailingZeros<uint64_t>(C2);
3664 if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
3666 EVT VT =
N.getValueType();
3681 assert(
N->getOpcode() ==
ISD::OR ||
N->getOpcode() == RISCVISD::OR_VL);
3682 if (
N->getFlags().hasDisjoint())
3687bool RISCVDAGToDAGISel::selectImm64IfCheaper(int64_t Imm, int64_t OrigImm,
3693 if (OrigCost <=
Cost)
3701 if (!isa<ConstantSDNode>(
N))
3703 int64_t Imm = cast<ConstantSDNode>(
N)->getSExtValue();
3704 if ((Imm >> 31) != 1)
3707 for (
const SDNode *U :
N->users()) {
3708 switch (U->getOpcode()) {
3720 return selectImm64IfCheaper(0xffffffff00000000 | Imm, Imm,
N, Val);
3724 if (!isa<ConstantSDNode>(
N))
3726 int64_t Imm = cast<ConstantSDNode>(
N)->getSExtValue();
3730 for (
const SDNode *U :
N->users()) {
3731 switch (U->getOpcode()) {
3734 case RISCVISD::VMV_V_X_VL:
3736 return V->getOpcode() == ISD::ADD ||
3737 V->getOpcode() == RISCVISD::ADD_VL;
3746 return selectImm64IfCheaper(-Imm, Imm,
N, Val);
3750 if (!isa<ConstantSDNode>(
N))
3752 int64_t Imm = cast<ConstantSDNode>(
N)->getSExtValue();
3755 if (isInt<32>(Imm) && ((Imm & 0xfff) != 0xfff || Imm == -1))
3759 for (
const SDNode *U :
N->users()) {
3760 switch (U->getOpcode()) {
3764 if (!(Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbkb()))
3767 case RISCVISD::VMV_V_X_VL:
3768 if (!Subtarget->hasStdExtZvkb())
3771 return V->getOpcode() == ISD::AND ||
3772 V->getOpcode() == RISCVISD::AND_VL;
3781 if (isInt<32>(Imm)) {
3789 return selectImm64IfCheaper(~Imm, Imm,
N, Val);
3807 bool HasChainOp =
User->
getOperand(ChainOpIdx).getValueType() == MVT::Other;
3810 const unsigned Log2SEW =
User->getConstantOperandVal(VLIdx + 1);
3812 if (UserOpNo == VLIdx)
3815 auto NumDemandedBits =
3817 return NumDemandedBits && Bits >= *NumDemandedBits;
3830 const unsigned Depth)
const {
3836 isa<ConstantSDNode>(Node) ||
Depth != 0) &&
3837 "Unexpected opcode");
3844 if (
Depth == 0 && !Node->getValueType(0).isScalarInteger())
3850 if (!
User->isMachineOpcode())
3854 switch (
User->getMachineOpcode()) {
3879 case RISCV::SLLI_UW:
3880 case RISCV::FMV_W_X:
3881 case RISCV::FCVT_H_W:
3882 case RISCV::FCVT_H_W_INX:
3883 case RISCV::FCVT_H_WU:
3884 case RISCV::FCVT_H_WU_INX:
3885 case RISCV::FCVT_S_W:
3886 case RISCV::FCVT_S_W_INX:
3887 case RISCV::FCVT_S_WU:
3888 case RISCV::FCVT_S_WU_INX:
3889 case RISCV::FCVT_D_W:
3890 case RISCV::FCVT_D_W_INX:
3891 case RISCV::FCVT_D_WU:
3892 case RISCV::FCVT_D_WU_INX:
3893 case RISCV::TH_REVW:
3894 case RISCV::TH_SRRIW:
3912 if (Bits >= Subtarget->
getXLen() -
User->getConstantOperandVal(1))
3921 if (Bits >= (
unsigned)llvm::bit_width<uint64_t>(~Imm))
3940 unsigned ShAmt =
User->getConstantOperandVal(1);
3954 case RISCV::FMV_H_X:
3955 case RISCV::ZEXT_H_RV32:
3956 case RISCV::ZEXT_H_RV64:
3962 if (Bits >= (Subtarget->
getXLen() / 2))
3966 case RISCV::SH1ADD_UW:
3967 case RISCV::SH2ADD_UW:
3968 case RISCV::SH3ADD_UW:
3995 auto *
C = dyn_cast<ConstantSDNode>(
N);
3999 int64_t
Offset =
C->getSExtValue();
4000 for (
unsigned Shift = 0; Shift < 4; Shift++) {
4001 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0)) {
4002 EVT VT =
N->getValueType(0);
4015 auto *
C = dyn_cast<ConstantSDNode>(
N);
4016 if (
C && isUInt<5>(
C->getZExtValue())) {
4018 N->getValueType(0));
4019 }
else if (
C &&
C->isAllOnes()) {
4022 N->getValueType(0));
4023 }
else if (isa<RegisterSDNode>(
N) &&
4024 cast<RegisterSDNode>(
N)->
getReg() == RISCV::X0) {
4030 N->getValueType(0));
4040 if (!
N.getOperand(0).isUndef())
4042 N =
N.getOperand(1);
4045 if ((
Splat.getOpcode() != RISCVISD::VMV_V_X_VL &&
4046 Splat.getOpcode() != RISCVISD::VMV_S_X_VL) ||
4047 !
Splat.getOperand(0).isUndef())
4049 assert(
Splat.getNumOperands() == 3 &&
"Unexpected number of operands");
4058 SplatVal =
Splat.getOperand(1);
4065 std::function<
bool(int64_t)> ValidateImm,
4066 bool Decrement =
false) {
4068 if (!
Splat || !isa<ConstantSDNode>(
Splat.getOperand(1)))
4071 const unsigned SplatEltSize =
Splat.getScalarValueSizeInBits();
4073 "Unexpected splat operand type");
4082 APInt SplatConst =
Splat.getConstantOperandAPInt(1).sextOrTrunc(SplatEltSize);
4086 if (!ValidateImm(SplatImm))
4099 [](int64_t Imm) {
return isInt<5>(Imm); });
4104 N, SplatVal, *
CurDAG, *Subtarget,
4105 [](int64_t Imm) {
return (isInt<5>(Imm) && Imm != -16) || Imm == 16; },
4111 N, SplatVal, *
CurDAG, *Subtarget,
4112 [](int64_t Imm) {
return (isInt<5>(Imm) && Imm != -16) || Imm == 16; },
4119 N, SplatVal, *
CurDAG, *Subtarget,
4121 return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
4129 N, SplatVal, *
CurDAG, *Subtarget,
4130 [Bits](int64_t Imm) {
return isUIntN(Bits, Imm); });
4139 auto IsExtOrTrunc = [](
SDValue N) {
4140 switch (
N->getOpcode()) {
4145 case RISCVISD::TRUNCATE_VECTOR_VL:
4146 case RISCVISD::VSEXT_VL:
4147 case RISCVISD::VZEXT_VL:
4155 while (IsExtOrTrunc(
N)) {
4156 if (!
N.hasOneUse() ||
N.getScalarValueSizeInBits() < 8)
4158 N =
N->getOperand(0);
4167 N.getOperand(0).getValueType() == Subtarget->
getXLenVT()) {
4168 Imm =
N.getOperand(0);
4172 if (
N.getOpcode() == RISCVISD::FMV_H_X ||
4173 N.getOpcode() == RISCVISD::FMV_W_X_RV64) {
4174 Imm =
N.getOperand(0);
4190 if (VT == MVT::f64 && !Subtarget->
is64Bit()) {
4202 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
4205 if (!isInt<5>(ImmVal))
4218bool RISCVDAGToDAGISel::doPeepholeSExtW(
SDNode *
N) {
4220 if (
N->getMachineOpcode() != RISCV::ADDIW ||
4242 case RISCV::ADD:
Opc = RISCV::ADDW;
break;
4243 case RISCV::ADDI:
Opc = RISCV::ADDIW;
break;
4244 case RISCV::SUB:
Opc = RISCV::SUBW;
break;
4245 case RISCV::MUL:
Opc = RISCV::MULW;
break;
4246 case RISCV::SLLI:
Opc = RISCV::SLLIW;
break;
4254 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
4269 case RISCV::TH_MULAW:
4270 case RISCV::TH_MULAH:
4271 case RISCV::TH_MULSW:
4272 case RISCV::TH_MULSH:
4286 const auto IsVMSet = [](
unsigned Opc) {
4287 return Opc == RISCV::PseudoVMSET_M_B1 ||
Opc == RISCV::PseudoVMSET_M_B16 ||
4288 Opc == RISCV::PseudoVMSET_M_B2 ||
Opc == RISCV::PseudoVMSET_M_B32 ||
4289 Opc == RISCV::PseudoVMSET_M_B4 ||
Opc == RISCV::PseudoVMSET_M_B64 ||
4290 Opc == RISCV::PseudoVMSET_M_B8;
4300 if (!V.isMachineOpcode())
4302 if (V.getMachineOpcode() == TargetOpcode::REG_SEQUENCE) {
4303 for (
unsigned I = 1;
I < V.getNumOperands();
I += 2)
4308 return V.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
4315 RISCV::getMaskedPseudoInfo(
N->getMachineOpcode());
4319 unsigned MaskOpIdx =
I->MaskOpIdx;
4325 const unsigned Opc =
I->UnmaskedPseudo;
4334 "Unmasked pseudo has policy but masked pseudo doesn't?");
4336 "Unexpected pseudo structure");
4337 assert(!(HasPassthru && !MaskedHasPassthru) &&
4338 "Unmasked pseudo has passthru but masked pseudo doesn't?");
4342 bool ShouldSkip = !HasPassthru && MaskedHasPassthru;
4346 N->getOperand(
N->getNumOperands() - 1).getValueType() == MVT::Other;
4347 unsigned LastOpNum =
N->getNumOperands() - 1 - HasChainOp;
4348 for (
unsigned I = ShouldSkip, E =
N->getNumOperands();
I != E;
I++) {
4353 if (DropPolicy &&
I == LastOpNum)
4361 if (!
N->memoperands_empty())
4364 Result->setFlags(
N->getFlags());
4375bool RISCVDAGToDAGISel::doPeepholeNoRegPassThru() {
4376 bool MadeChange =
false;
4381 if (
N->use_empty() || !
N->isMachineOpcode())
4384 const unsigned Opc =
N->getMachineOpcode();
4385 if (!RISCVVPseudosTable::getPseudoInfo(
Opc) ||
4392 for (
unsigned I = 1, E =
N->getNumOperands();
I != E;
I++) {
4399 Result->setFlags(
N->getFlags());
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
mir Rename Register Operands
Register const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static bool usesAllOnesMask(SDValue MaskOp)
static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
static bool isRegRegScaleLoadOrStore(SDNode *User, SDValue Add, const RISCVSubtarget &Subtarget)
Return true if this a load/store that we have a RegRegScale instruction for.
#define CASE_VMNAND_VMSET_OPCODES(lmulenum, suffix)
static bool isWorthFoldingAdd(SDValue Add)
static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, RISCVMatInt::InstSeq &Seq)
static bool isImplicitDef(SDValue V)
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
static bool selectVSplatImmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, std::function< bool(int64_t)> ValidateImm, bool Decrement=false)
static unsigned getSegInstNF(unsigned Intrinsic)
static bool isWorthFoldingIntoRegRegScale(const RISCVSubtarget &Subtarget, SDValue Add, SDValue Shift=SDValue())
Is it profitable to fold this Add into RegRegScale load/store.
static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, unsigned Bits, const TargetInstrInfo *TII)
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, const RISCVSubtarget *Subtarget, SDValue Addr, SDValue &Base, SDValue &Offset, bool IsPrefetch=false)
#define INST_ALL_NF_CASE_WITH_FF(NAME)
#define CASE_VMSLT_OPCODES(lmulenum, suffix)
static cl::opt< bool > UsePseudoMovImm("riscv-use-rematerializable-movimm", cl::Hidden, cl::desc("Use a rematerializable pseudoinstruction for 2 instruction " "constant materialization"), cl::init(false))
static SDValue findVSplat(SDValue N)
#define INST_ALL_NF_CASE(NAME)
Contains matchers for matching SelectionDAG nodes and values.
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isShiftedMask() const
Return true if this APInt value contains a non-empty sequence of ones with the remainder zero.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
This class is used to form a handle around another node that is persistent and is updated across invo...
const SDValue & getValue() const
static StringRef getMemConstraintName(ConstraintCode C)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Describe properties that are true of each instruction in the target description file.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT changeVectorElementType(MVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
MVT getVectorElementType() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
void setFlags(Flags f)
Bitwise OR the current flags with the given flags.
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
RISCVDAGToDAGISelLegacy(RISCVTargetMachine &TargetMachine, CodeGenOptLevel OptLevel)
bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val)
RISC-V doesn't have general instructions for integer setne/seteq, but we can check for equality with ...
bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectNegImm(SDValue N, SDValue &Val)
bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD_UW.
bool hasAllNBitUsers(SDNode *Node, unsigned Bits, const unsigned Depth=0) const
bool trySignedBitfieldInsertInMask(SDNode *Node)
bool SelectAddrRegImmLsb00000(SDValue Addr, SDValue &Base, SDValue &Offset)
Similar to SelectAddrRegImm, except that the least significant 5 bits of Offset should be all zeros.
bool selectZExtImm32(SDValue N, SDValue &Val)
bool SelectAddrRegZextRegScale(SDValue Addr, unsigned MaxShiftAmount, unsigned Bits, SDValue &Base, SDValue &Index, SDValue &Scale)
bool SelectAddrRegReg(SDValue Addr, SDValue &Base, SDValue &Offset)
void selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsOrdered)
void selectVLSEGFF(SDNode *Node, unsigned NF, bool IsMasked)
bool selectVSplatSimm5Plus1NoDec(SDValue N, SDValue &SplatVal)
bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2)
void selectSF_VC_X_SE(SDNode *Node)
bool orDisjoint(const SDNode *Node) const
bool selectLow8BitsVSplat(SDValue N, SDValue &SplatVal)
bool hasAllHUsers(SDNode *Node) const
bool SelectInlineAsmMemoryOperand(const SDValue &Op, InlineAsm::ConstraintCode ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
bool tryUnsignedBitfieldInsertInZero(SDNode *Node, const SDLoc &DL, MVT VT, SDValue X, unsigned Msb, unsigned Lsb)
bool hasAllWUsers(SDNode *Node) const
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
bool selectInvLogicImm(SDValue N, SDValue &Val)
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
bool trySignedBitfieldInsertInSign(SDNode *Node)
bool selectVSplat(SDValue N, SDValue &SplatVal)
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
bool SelectAddrRegImm9(SDValue Addr, SDValue &Base, SDValue &Offset)
Similar to SelectAddrRegImm, except that the offset is restricted to uimm9.
bool selectScalarFPAsInt(SDValue N, SDValue &Imm)
bool hasAllBUsers(SDNode *Node) const
void selectVLSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsStrided)
bool tryShrinkShlLogicImm(SDNode *Node)
void selectVSETVLI(SDNode *Node)
bool selectVLOp(SDValue N, SDValue &VL)
bool tryBitfieldInsertOpFromXor(SDNode *Node)
bool trySignedBitfieldExtract(SDNode *Node)
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
void selectVSSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsStrided)
bool selectVSplatImm64Neg(SDValue N, SDValue &SplatVal)
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
bool tryBitfieldInsertOpFromOrAndImm(SDNode *Node)
bool tryUnsignedBitfieldExtract(SDNode *Node, const SDLoc &DL, MVT VT, SDValue X, unsigned Msb, unsigned Lsb)
void selectVLXSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsOrdered)
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD.
bool tryIndexedLoad(SDNode *Node)
bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, SDValue &Base, SDValue &Index, SDValue &Scale)
bool selectVSplatUimm(SDValue N, unsigned Bits, SDValue &SplatVal)
Quantity expandVScale(Quantity X) const
If the ElementCount or TypeSize X is scalable and VScale (VLEN) is exactly known, returns X converted...
bool hasVInstructions() const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVVType::VLMUL getLMUL(MVT VT)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
const TargetLowering * TLI
const TargetInstrInfo * TII
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOptLevel OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI bool haveNoCommonBitsSet(SDValue A, SDValue B) const
Return true if A and B have no common bits set.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
static constexpr unsigned MaxRecursionDepth
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
allnodes_const_iterator allnodes_end() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
LLVM_ABI void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
ilist< SDNode >::iterator allnodes_iterator
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
LLVM_ABI unsigned getOperandNo() const
Return the operand # of this use in its User.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
iterator_range< user_iterator > users()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI, bool CompressionCost, bool FreeZeroes)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
LLVM_ABI unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic, bool AltFmt=false)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
static constexpr unsigned RVVBitsPerBlock
static constexpr int64_t VLMaxSentinel
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
static const MachineMemOperand::Flags MONontemporalBit0
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
unsigned M1(unsigned Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
CodeGenOptLevel
Code generation optimization level.
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOptLevel OptLevel)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.