20#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
29#define PASS_NAME "RISC-V DAG->DAG Pattern Instruction Selection"
32 "riscv-use-rematerializable-movimm",
cl::Hidden,
33 cl::desc(
"Use a rematerializable pseudoinstruction for 2 instruction "
34 "constant materialization"),
38#define GET_RISCVVSSEGTable_IMPL
39#define GET_RISCVVLSEGTable_IMPL
40#define GET_RISCVVLXSEGTable_IMPL
41#define GET_RISCVVSXSEGTable_IMPL
42#define GET_RISCVVLETable_IMPL
43#define GET_RISCVVSETable_IMPL
44#define GET_RISCVVLXTable_IMPL
45#define GET_RISCVVSXTable_IMPL
46#include "RISCVGenSearchableTables.inc"
52 bool MadeChange =
false;
59 switch (
N->getOpcode()) {
63 MVT VT =
N->getSimpleValueType(0);
79 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands");
80 MVT VT =
N->getSimpleValueType(0);
86 Lo.getValueType() == MVT::i32 &&
Hi.getValueType() == MVT::i32 &&
94 int FI = cast<FrameIndexSDNode>(StackSlot.
getNode())->getIndex();
118 MVT::i64, MPI,
Align(8),
125 MVT VT =
N->getSimpleValueType(0);
138 LLVM_DEBUG(
dbgs() <<
"RISC-V DAG preprocessing replacing:\nOld: ");
157 bool MadeChange =
false;
161 if (
N->use_empty() || !
N->isMachineOpcode())
164 MadeChange |= doPeepholeSExtW(
N);
169 MadeChange |= doPeepholeMaskedRVV(cast<MachineSDNode>(
N));
174 MadeChange |= doPeepholeMergeVVMFold();
182 MadeChange |= doPeepholeNoRegPassThru();
194 switch (Inst.getOpndKind()) {
233 if (Seq.
size() > 3) {
234 unsigned ShiftAmt, AddOpc;
253 SDNode *Node,
unsigned Log2SEW,
const SDLoc &
DL,
unsigned CurOp,
255 bool IsLoad,
MVT *IndexVT) {
256 SDValue Chain = Node->getOperand(0);
258 Operands.push_back(Node->getOperand(CurOp++));
260 if (IsStridedOrIndexed) {
261 Operands.push_back(Node->getOperand(CurOp++));
263 *IndexVT =
Operands.back()->getSimpleValueType(0);
267 SDValue Mask = Node->getOperand(CurOp++);
284 Policy = Node->getConstantOperandVal(CurOp++);
295 MVT VT = Node->getSimpleValueType(0);
296 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
302 Operands.push_back(Node->getOperand(CurOp++));
308 RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided,
false, Log2SEW,
309 static_cast<unsigned>(LMUL));
313 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
324 MVT VT = Node->getSimpleValueType(0);
326 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
332 Operands.push_back(Node->getOperand(CurOp++));
339 RISCV::getVLSEGPseudo(NF, IsMasked,
false,
true,
340 Log2SEW,
static_cast<unsigned>(LMUL));
344 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
356 MVT VT = Node->getSimpleValueType(0);
357 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
363 Operands.push_back(Node->getOperand(CurOp++));
374 if (DecodedLMUL.second)
375 ContainedTyNumElts /= DecodedLMUL.first;
377 ContainedTyNumElts *= DecodedLMUL.first;
379 "Element count mismatch");
384 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
386 "values when XLEN=32");
389 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
390 static_cast<unsigned>(IndexLMUL));
394 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
405 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
406 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
412 Operands.push_back(Node->getOperand(CurOp++));
418 NF, IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
422 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
431 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
432 unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
438 Operands.push_back(Node->getOperand(CurOp++));
449 if (DecodedLMUL.second)
450 ContainedTyNumElts /= DecodedLMUL.first;
452 ContainedTyNumElts *= DecodedLMUL.first;
454 "Element count mismatch");
459 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
461 "values when XLEN=32");
464 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
465 static_cast<unsigned>(IndexLMUL));
469 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
484 unsigned IntNo = Node->getConstantOperandVal(0);
486 assert((IntNo == Intrinsic::riscv_vsetvli ||
487 IntNo == Intrinsic::riscv_vsetvlimax) &&
488 "Unexpected vsetvli intrinsic");
490 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
491 unsigned Offset = (VLMax ? 1 : 2);
494 "Unexpected number of operands");
499 Node->getConstantOperandVal(
Offset + 1) & 0x7);
506 unsigned Opcode = RISCV::PseudoVSETVLI;
507 if (
auto *
C = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
514 Opcode = RISCV::PseudoVSETVLIX0;
516 VLOperand = Node->getOperand(1);
518 if (
auto *
C = dyn_cast<ConstantSDNode>(VLOperand)) {
520 if (isUInt<5>(AVL)) {
523 XLenVT, VLImm, VTypeIOp));
534 MVT VT = Node->getSimpleValueType(0);
535 unsigned Opcode = Node->getOpcode();
537 "Unexpected opcode");
542 SDValue N0 = Node->getOperand(0);
543 SDValue N1 = Node->getOperand(1);
560 bool SignExt =
false;
578 uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
579 if (Opcode !=
ISD::AND && (Val & RemovedBitsMask) != 0)
582 int64_t ShiftedVal = Val >> ShAmt;
583 if (!isInt<12>(ShiftedVal))
587 if (SignExt && ShAmt >= 32)
594 case ISD::AND: BinOpc = RISCV::ANDI;
break;
595 case ISD::OR: BinOpc = RISCV::ORI;
break;
596 case ISD::XOR: BinOpc = RISCV::XORI;
break;
599 unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
613 if (!Subtarget->hasVendorXTHeadBb())
616 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
620 SDValue N0 = Node->getOperand(0);
624 auto BitfieldExtract = [&](
SDValue N0,
unsigned Msb,
unsigned Lsb,
SDLoc DL,
632 MVT VT = Node->getSimpleValueType(0);
633 const unsigned RightShAmt = N1C->getZExtValue();
638 auto *N01C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
642 const unsigned LeftShAmt = N01C->getZExtValue();
645 if (LeftShAmt > RightShAmt)
649 const unsigned Msb = MsbPlusOne - 1;
650 const unsigned Lsb = RightShAmt - LeftShAmt;
652 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
661 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
667 const unsigned Msb = ExtSize - 1;
668 const unsigned Lsb = RightShAmt;
670 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
680 if (!Subtarget->hasVendorXTHeadMemIdx())
694 "Unexpected addressing mode");
697 int64_t
Offset =
C->getSExtValue();
702 for (Shift = 0; Shift < 4; Shift++)
703 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
712 if (LoadVT == MVT::i8 && IsPre)
713 Opcode = IsZExt ? RISCV::TH_LBUIB : RISCV::TH_LBIB;
714 else if (LoadVT == MVT::i8 && IsPost)
715 Opcode = IsZExt ? RISCV::TH_LBUIA : RISCV::TH_LBIA;
716 else if (LoadVT == MVT::i16 && IsPre)
717 Opcode = IsZExt ? RISCV::TH_LHUIB : RISCV::TH_LHIB;
718 else if (LoadVT == MVT::i16 && IsPost)
719 Opcode = IsZExt ? RISCV::TH_LHUIA : RISCV::TH_LHIA;
720 else if (LoadVT == MVT::i32 && IsPre)
721 Opcode = IsZExt ? RISCV::TH_LWUIB : RISCV::TH_LWIB;
722 else if (LoadVT == MVT::i32 && IsPost)
723 Opcode = IsZExt ? RISCV::TH_LWUIA : RISCV::TH_LWIA;
724 else if (LoadVT == MVT::i64 && IsPre)
725 Opcode = RISCV::TH_LDIB;
726 else if (LoadVT == MVT::i64 && IsPost)
727 Opcode = RISCV::TH_LDIA;
754 unsigned IntNo = Node->getConstantOperandVal(1);
756 assert((IntNo == Intrinsic::riscv_sf_vc_x_se ||
757 IntNo == Intrinsic::riscv_sf_vc_i_se) &&
758 "Unexpected vsetvli intrinsic");
761 unsigned Log2SEW =
Log2_32(Node->getConstantOperandVal(6));
765 Node->getOperand(4), Node->getOperand(5),
766 Node->getOperand(8), SEWOp,
767 Node->getOperand(0)};
770 auto *LMulSDNode = cast<ConstantSDNode>(Node->getOperand(7));
771 switch (LMulSDNode->getSExtValue()) {
773 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF8
774 : RISCV::PseudoVC_I_SE_MF8;
777 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF4
778 : RISCV::PseudoVC_I_SE_MF4;
781 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF2
782 : RISCV::PseudoVC_I_SE_MF2;
785 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M1
786 : RISCV::PseudoVC_I_SE_M1;
789 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M2
790 : RISCV::PseudoVC_I_SE_M2;
793 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M4
794 : RISCV::PseudoVC_I_SE_M4;
797 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M8
798 : RISCV::PseudoVC_I_SE_M8;
803 Opcode,
DL, Node->getSimpleValueType(0),
Operands));
807#define INST_NF_CASE(NAME, NF) \
808 case Intrinsic::riscv_##NAME##NF: \
810#define INST_NF_CASE_MASK(NAME, NF) \
811 case Intrinsic::riscv_##NAME##NF##_mask: \
813#define INST_NF_CASE_FF(NAME, NF) \
814 case Intrinsic::riscv_##NAME##NF##ff: \
816#define INST_NF_CASE_FF_MASK(NAME, NF) \
817 case Intrinsic::riscv_##NAME##NF##ff_mask: \
819#define INST_ALL_NF_CASE_BASE(MACRO_NAME, NAME) \
820 MACRO_NAME(NAME, 2) \
821 MACRO_NAME(NAME, 3) \
822 MACRO_NAME(NAME, 4) \
823 MACRO_NAME(NAME, 5) \
824 MACRO_NAME(NAME, 6) \
825 MACRO_NAME(NAME, 7) \
827#define INST_ALL_NF_CASE(NAME) \
828 INST_ALL_NF_CASE_BASE(INST_NF_CASE, NAME) \
829 INST_ALL_NF_CASE_BASE(INST_NF_CASE_MASK, NAME)
830#define INST_ALL_NF_CASE_WITH_FF(NAME) \
831 INST_ALL_NF_CASE(NAME) \
832 INST_ALL_NF_CASE_BASE(INST_NF_CASE_FF, NAME) \
833 INST_ALL_NF_CASE_BASE(INST_NF_CASE_FF_MASK, NAME)
850 if (Node->isMachineOpcode()) {
858 unsigned Opcode = Node->getOpcode();
861 MVT VT = Node->getSimpleValueType(0);
863 bool HasBitTest = Subtarget->hasStdExtZbs() || Subtarget->hasVendorXTHeadBs();
867 assert((VT == Subtarget->
getXLenVT() || VT == MVT::i32) &&
"Unexpected VT");
868 auto *ConstNode = cast<ConstantSDNode>(Node);
869 if (ConstNode->isZero()) {
875 int64_t Imm = ConstNode->getSExtValue();
879 if (isUInt<8>(Imm) && isInt<6>(SignExtend64<8>(Imm)) &&
hasAllBUsers(Node))
880 Imm = SignExtend64<8>(Imm);
883 if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
885 Imm = SignExtend64<16>(Imm);
888 if (!isInt<32>(Imm) && isUInt<32>(Imm) &&
hasAllWUsers(Node))
889 Imm = SignExtend64<32>(Imm);
895 const APFloat &APF = cast<ConstantFPSDNode>(Node)->getValueAPF();
897 bool NegZeroF64 = APF.
isNegZero() && VT == MVT::f64;
907 bool HasZdinx = Subtarget->hasStdExtZdinx();
908 bool Is64Bit = Subtarget->
is64Bit();
914 assert(Subtarget->hasStdExtZfbfmin());
915 Opc = RISCV::FMV_H_X;
918 Opc = Subtarget->hasStdExtZhinxmin() ? RISCV::COPY : RISCV::FMV_H_X;
921 Opc = Subtarget->hasStdExtZfinx() ? RISCV::COPY : RISCV::FMV_W_X;
928 Opc = HasZdinx ? RISCV::COPY : RISCV::FMV_D_X;
930 Opc = HasZdinx ? RISCV::FCVT_D_W_IN32X : RISCV::FCVT_D_W;
935 if (VT.
SimpleTy == MVT::f16 && Opc == RISCV::COPY) {
938 }
else if (VT.
SimpleTy == MVT::f32 && Opc == RISCV::COPY) {
941 }
else if (Opc == RISCV::FCVT_D_W_IN32X || Opc == RISCV::FCVT_D_W)
950 Opc = RISCV::FSGNJN_D;
952 Opc = Is64Bit ? RISCV::FSGNJN_D_INX : RISCV::FSGNJN_D_IN32X;
966 "BuildPairF64 only handled here on rv32i_zdinx");
983 "SplitF64 only handled here on rv32i_zdinx");
985 if (!
SDValue(Node, 0).use_empty()) {
987 Node->getValueType(0),
988 Node->getOperand(0));
992 if (!
SDValue(Node, 1).use_empty()) {
994 RISCV::sub_gpr_odd,
DL, Node->getValueType(1), Node->getOperand(0));
1003 "SplitGPRPair should already be handled");
1005 if (!Subtarget->hasStdExtZfa())
1008 "Unexpected subtarget");
1011 if (!
SDValue(Node, 0).use_empty()) {
1013 Node->getOperand(0));
1016 if (!
SDValue(Node, 1).use_empty()) {
1018 Node->getOperand(0));
1026 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1029 SDValue N0 = Node->getOperand(0);
1033 unsigned ShAmt = N1C->getZExtValue();
1037 unsigned XLen = Subtarget->
getXLen();
1040 if (TrailingZeros > 0 && LeadingZeros == 32) {
1052 if (TrailingZeros == 0 && LeadingZeros > ShAmt &&
1053 XLen - LeadingZeros > 11 && LeadingZeros != 32) {
1075 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1078 SDValue N0 = Node->getOperand(0);
1081 unsigned ShAmt = N1C->getZExtValue();
1087 unsigned XLen = Subtarget->
getXLen();
1090 if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
1109 Mask |= maskTrailingOnes<uint64_t>(ShAmt);
1113 if (ShAmt >= TrailingOnes)
1116 if (TrailingOnes == 32) {
1118 Subtarget->
is64Bit() ? RISCV::SRLIW : RISCV::SRLI,
DL, VT,
1129 if (HasBitTest && ShAmt + 1 == TrailingOnes) {
1131 Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST,
DL, VT,
1137 unsigned LShAmt = Subtarget->
getXLen() - TrailingOnes;
1138 if (Subtarget->hasVendorXTHeadBb()) {
1168 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1171 SDValue N0 = Node->getOperand(0);
1174 unsigned ShAmt = N1C->getZExtValue();
1176 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
1178 if (ExtSize >= 32 || ShAmt >= ExtSize)
1180 unsigned LShAmt = Subtarget->
getXLen() - ExtSize;
1197 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1201 SDValue N0 = Node->getOperand(0);
1206 if (!Subtarget->hasVendorXTHeadBb())
1218 auto *
C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
1221 unsigned C2 =
C->getZExtValue();
1222 unsigned XLen = Subtarget->
getXLen();
1223 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1231 bool IsCANDI = isInt<6>(N1C->getSExtValue());
1237 C1 &= maskTrailingZeros<uint64_t>(C2);
1239 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
1243 bool OneUseOrZExtW = N0.
hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
1253 if (C2 + 32 == Leading) {
1265 if (C2 >= 32 && (Leading - C2) == 1 && N0.
hasOneUse() &&
1267 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32) {
1272 RISCV::SRLIW,
DL, VT,
SDValue(SRAIW, 0),
1286 const unsigned Lsb = C2;
1287 if (tryUnsignedBitfieldExtract(Node,
DL, VT,
X, Msb, Lsb))
1292 bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
1294 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32;
1296 Skip |= HasBitTest && Leading == XLen - 1;
1297 if (OneUseOrZExtW && !Skip) {
1299 RISCV::SLLI,
DL, VT,
X,
1315 if (C2 + Leading < XLen &&
1316 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
1318 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
1327 if (OneUseOrZExtW && !IsCANDI) {
1329 RISCV::SLLI,
DL, VT,
X,
1345 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
1347 unsigned SrliOpc = RISCV::SRLI;
1350 isa<ConstantSDNode>(
X.getOperand(1)) &&
1351 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
1352 SrliOpc = RISCV::SRLIW;
1353 X =
X.getOperand(0);
1365 if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
1366 OneUseOrZExtW && !IsCANDI) {
1368 RISCV::SRLIW,
DL, VT,
X,
1377 if (Trailing > 0 && Leading + Trailing == 32 && C2 + Trailing < XLen &&
1378 OneUseOrZExtW && Subtarget->hasStdExtZba()) {
1380 RISCV::SRLI,
DL, VT,
X,
1383 RISCV::SLLI_UW,
DL, VT,
SDValue(SRLI, 0),
1395 if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1397 RISCV::SRLI,
DL, VT,
X,
1406 if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1408 RISCV::SRLIW,
DL, VT,
X,
1418 if (C2 < Trailing && Leading + Trailing == 32 && OneUseOrZExtW &&
1419 Subtarget->hasStdExtZba()) {
1421 RISCV::SRLI,
DL, VT,
X,
1424 RISCV::SLLI_UW,
DL, VT,
SDValue(SRLI, 0),
1432 const uint64_t C1 = N1C->getZExtValue();
1437 unsigned XLen = Subtarget->
getXLen();
1438 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1443 bool Skip = C2 > 32 && isInt<12>(N1C->getSExtValue()) &&
1445 isa<ConstantSDNode>(
X.getOperand(1)) &&
1446 X.getConstantOperandVal(1) == 32;
1453 RISCV::SRAI,
DL, VT,
X,
1469 if (C2 > Leading && Leading > 0 && Trailing > 0) {
1490 if (
isMask_64(C1) && !isInt<12>(N1C->getSExtValue())) {
1492 if (tryUnsignedBitfieldExtract(Node,
DL, VT, N0, Msb, 0))
1509 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1510 if (!N1C || !N1C->hasOneUse())
1514 SDValue N0 = Node->getOperand(0);
1531 (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb());
1533 IsANDIOrZExt |= C2 == UINT64_C(0xFFFF) && Subtarget->hasVendorXTHeadBb();
1534 if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1538 bool IsZExtW = C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba();
1540 IsZExtW |= C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasVendorXTHeadBb();
1541 if (IsZExtW && (isInt<32>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1547 unsigned XLen = Subtarget->
getXLen();
1553 unsigned ConstantShift = XLen - LeadingZeros;
1557 uint64_t ShiftedC1 = C1 << ConstantShift;
1560 ShiftedC1 = SignExtend64<32>(ShiftedC1);
1576 if (Subtarget->hasVendorXCVmem() && !Subtarget->
is64Bit()) {
1582 SDValue Chain = Node->getOperand(0);
1586 bool Simm12 =
false;
1587 bool SignExtend = Load->getExtensionType() ==
ISD::SEXTLOAD;
1589 if (
auto ConstantOffset = dyn_cast<ConstantSDNode>(
Offset)) {
1590 int ConstantVal = ConstantOffset->getSExtValue();
1591 Simm12 = isInt<12>(ConstantVal);
1597 unsigned Opcode = 0;
1598 switch (Load->getMemoryVT().getSimpleVT().SimpleTy) {
1600 if (Simm12 && SignExtend)
1601 Opcode = RISCV::CV_LB_ri_inc;
1602 else if (Simm12 && !SignExtend)
1603 Opcode = RISCV::CV_LBU_ri_inc;
1604 else if (!Simm12 && SignExtend)
1605 Opcode = RISCV::CV_LB_rr_inc;
1607 Opcode = RISCV::CV_LBU_rr_inc;
1610 if (Simm12 && SignExtend)
1611 Opcode = RISCV::CV_LH_ri_inc;
1612 else if (Simm12 && !SignExtend)
1613 Opcode = RISCV::CV_LHU_ri_inc;
1614 else if (!Simm12 && SignExtend)
1615 Opcode = RISCV::CV_LH_rr_inc;
1617 Opcode = RISCV::CV_LHU_rr_inc;
1621 Opcode = RISCV::CV_LW_ri_inc;
1623 Opcode = RISCV::CV_LW_rr_inc;
1639 unsigned IntNo = Node->getConstantOperandVal(0);
1644 case Intrinsic::riscv_vmsgeu:
1645 case Intrinsic::riscv_vmsge: {
1646 SDValue Src1 = Node->getOperand(1);
1647 SDValue Src2 = Node->getOperand(2);
1648 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1649 bool IsCmpConstant =
false;
1650 bool IsCmpMinimum =
false;
1657 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1658 IsCmpConstant =
true;
1659 CVal =
C->getSExtValue();
1660 if (CVal >= -15 && CVal <= 16) {
1661 if (!IsUnsigned || CVal != 0)
1663 IsCmpMinimum =
true;
1667 IsCmpMinimum =
true;
1670 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode, VMSGTOpcode;
1674#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
1675 case RISCVII::VLMUL::lmulenum: \
1676 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1677 : RISCV::PseudoVMSLT_VX_##suffix; \
1678 VMSGTOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix \
1679 : RISCV::PseudoVMSGT_VX_##suffix; \
1688#undef CASE_VMSLT_OPCODES
1694#define CASE_VMNAND_VMSET_OPCODES(lmulenum, suffix) \
1695 case RISCVII::VLMUL::lmulenum: \
1696 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1697 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix; \
1706#undef CASE_VMNAND_VMSET_OPCODES
1721 if (IsCmpConstant) {
1726 {Src1, Imm, VL, SEW}));
1736 {Cmp, Cmp, VL, MaskSEW}));
1739 case Intrinsic::riscv_vmsgeu_mask:
1740 case Intrinsic::riscv_vmsge_mask: {
1741 SDValue Src1 = Node->getOperand(2);
1742 SDValue Src2 = Node->getOperand(3);
1743 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1744 bool IsCmpConstant =
false;
1745 bool IsCmpMinimum =
false;
1752 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1753 IsCmpConstant =
true;
1754 CVal =
C->getSExtValue();
1755 if (CVal >= -15 && CVal <= 16) {
1756 if (!IsUnsigned || CVal != 0)
1758 IsCmpMinimum =
true;
1762 IsCmpMinimum =
true;
1765 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1766 VMOROpcode, VMSGTMaskOpcode;
1770#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
1771 case RISCVII::VLMUL::lmulenum: \
1772 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1773 : RISCV::PseudoVMSLT_VX_##suffix; \
1774 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
1775 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
1776 VMSGTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix##_MASK \
1777 : RISCV::PseudoVMSGT_VX_##suffix##_MASK; \
1786#undef CASE_VMSLT_OPCODES
1792#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
1793 case RISCVII::VLMUL::lmulenum: \
1794 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
1795 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
1796 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
1805#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1812 SDValue MaskedOff = Node->getOperand(1);
1813 SDValue Mask = Node->getOperand(4);
1819 if (Mask == MaskedOff) {
1825 {Mask, MaskedOff, VL, MaskSEW}));
1832 if (Mask == MaskedOff) {
1837 {Mask, Cmp, VL, MaskSEW}));
1841 if (IsCmpConstant) {
1846 VMSGTMaskOpcode,
DL, VT,
1847 {MaskedOff, Src1, Imm, Mask, VL, SEW}));
1858 {MaskedOff, Src1, Src2, Mask, VL, SEW}),
1862 {Cmp, Mask, VL, MaskSEW}));
1865 case Intrinsic::riscv_vsetvli:
1866 case Intrinsic::riscv_vsetvlimax:
1872 unsigned IntNo = Node->getConstantOperandVal(1);
1877 case Intrinsic::riscv_vlseg2:
1878 case Intrinsic::riscv_vlseg3:
1879 case Intrinsic::riscv_vlseg4:
1880 case Intrinsic::riscv_vlseg5:
1881 case Intrinsic::riscv_vlseg6:
1882 case Intrinsic::riscv_vlseg7:
1883 case Intrinsic::riscv_vlseg8: {
1888 case Intrinsic::riscv_vlseg2_mask:
1889 case Intrinsic::riscv_vlseg3_mask:
1890 case Intrinsic::riscv_vlseg4_mask:
1891 case Intrinsic::riscv_vlseg5_mask:
1892 case Intrinsic::riscv_vlseg6_mask:
1893 case Intrinsic::riscv_vlseg7_mask:
1894 case Intrinsic::riscv_vlseg8_mask: {
1899 case Intrinsic::riscv_vlsseg2:
1900 case Intrinsic::riscv_vlsseg3:
1901 case Intrinsic::riscv_vlsseg4:
1902 case Intrinsic::riscv_vlsseg5:
1903 case Intrinsic::riscv_vlsseg6:
1904 case Intrinsic::riscv_vlsseg7:
1905 case Intrinsic::riscv_vlsseg8: {
1910 case Intrinsic::riscv_vlsseg2_mask:
1911 case Intrinsic::riscv_vlsseg3_mask:
1912 case Intrinsic::riscv_vlsseg4_mask:
1913 case Intrinsic::riscv_vlsseg5_mask:
1914 case Intrinsic::riscv_vlsseg6_mask:
1915 case Intrinsic::riscv_vlsseg7_mask:
1916 case Intrinsic::riscv_vlsseg8_mask: {
1921 case Intrinsic::riscv_vloxseg2:
1922 case Intrinsic::riscv_vloxseg3:
1923 case Intrinsic::riscv_vloxseg4:
1924 case Intrinsic::riscv_vloxseg5:
1925 case Intrinsic::riscv_vloxseg6:
1926 case Intrinsic::riscv_vloxseg7:
1927 case Intrinsic::riscv_vloxseg8:
1931 case Intrinsic::riscv_vluxseg2:
1932 case Intrinsic::riscv_vluxseg3:
1933 case Intrinsic::riscv_vluxseg4:
1934 case Intrinsic::riscv_vluxseg5:
1935 case Intrinsic::riscv_vluxseg6:
1936 case Intrinsic::riscv_vluxseg7:
1937 case Intrinsic::riscv_vluxseg8:
1941 case Intrinsic::riscv_vloxseg2_mask:
1942 case Intrinsic::riscv_vloxseg3_mask:
1943 case Intrinsic::riscv_vloxseg4_mask:
1944 case Intrinsic::riscv_vloxseg5_mask:
1945 case Intrinsic::riscv_vloxseg6_mask:
1946 case Intrinsic::riscv_vloxseg7_mask:
1947 case Intrinsic::riscv_vloxseg8_mask:
1951 case Intrinsic::riscv_vluxseg2_mask:
1952 case Intrinsic::riscv_vluxseg3_mask:
1953 case Intrinsic::riscv_vluxseg4_mask:
1954 case Intrinsic::riscv_vluxseg5_mask:
1955 case Intrinsic::riscv_vluxseg6_mask:
1956 case Intrinsic::riscv_vluxseg7_mask:
1957 case Intrinsic::riscv_vluxseg8_mask:
1961 case Intrinsic::riscv_vlseg8ff:
1962 case Intrinsic::riscv_vlseg7ff:
1963 case Intrinsic::riscv_vlseg6ff:
1964 case Intrinsic::riscv_vlseg5ff:
1965 case Intrinsic::riscv_vlseg4ff:
1966 case Intrinsic::riscv_vlseg3ff:
1967 case Intrinsic::riscv_vlseg2ff: {
1971 case Intrinsic::riscv_vlseg8ff_mask:
1972 case Intrinsic::riscv_vlseg7ff_mask:
1973 case Intrinsic::riscv_vlseg6ff_mask:
1974 case Intrinsic::riscv_vlseg5ff_mask:
1975 case Intrinsic::riscv_vlseg4ff_mask:
1976 case Intrinsic::riscv_vlseg3ff_mask:
1977 case Intrinsic::riscv_vlseg2ff_mask: {
1981 case Intrinsic::riscv_vloxei:
1982 case Intrinsic::riscv_vloxei_mask:
1983 case Intrinsic::riscv_vluxei:
1984 case Intrinsic::riscv_vluxei_mask: {
1985 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1986 IntNo == Intrinsic::riscv_vluxei_mask;
1987 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1988 IntNo == Intrinsic::riscv_vloxei_mask;
1990 MVT VT = Node->getSimpleValueType(0);
1995 Operands.push_back(Node->getOperand(CurOp++));
2003 "Element count mismatch");
2008 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
2010 "values when XLEN=32");
2013 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
2014 static_cast<unsigned>(IndexLMUL));
2018 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2024 case Intrinsic::riscv_vlm:
2025 case Intrinsic::riscv_vle:
2026 case Intrinsic::riscv_vle_mask:
2027 case Intrinsic::riscv_vlse:
2028 case Intrinsic::riscv_vlse_mask: {
2029 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
2030 IntNo == Intrinsic::riscv_vlse_mask;
2032 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
2034 MVT VT = Node->getSimpleValueType(0);
2043 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
2046 if (HasPassthruOperand)
2047 Operands.push_back(Node->getOperand(CurOp++));
2060 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
2061 static_cast<unsigned>(LMUL));
2065 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2071 case Intrinsic::riscv_vleff:
2072 case Intrinsic::riscv_vleff_mask: {
2073 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
2075 MVT VT = Node->getSimpleValueType(0);
2080 Operands.push_back(Node->getOperand(CurOp++));
2087 RISCV::getVLEPseudo(IsMasked,
false,
true,
2088 Log2SEW,
static_cast<unsigned>(LMUL));
2091 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2101 unsigned IntNo = Node->getConstantOperandVal(1);
2103 case Intrinsic::riscv_vsseg2:
2104 case Intrinsic::riscv_vsseg3:
2105 case Intrinsic::riscv_vsseg4:
2106 case Intrinsic::riscv_vsseg5:
2107 case Intrinsic::riscv_vsseg6:
2108 case Intrinsic::riscv_vsseg7:
2109 case Intrinsic::riscv_vsseg8: {
2114 case Intrinsic::riscv_vsseg2_mask:
2115 case Intrinsic::riscv_vsseg3_mask:
2116 case Intrinsic::riscv_vsseg4_mask:
2117 case Intrinsic::riscv_vsseg5_mask:
2118 case Intrinsic::riscv_vsseg6_mask:
2119 case Intrinsic::riscv_vsseg7_mask:
2120 case Intrinsic::riscv_vsseg8_mask: {
2125 case Intrinsic::riscv_vssseg2:
2126 case Intrinsic::riscv_vssseg3:
2127 case Intrinsic::riscv_vssseg4:
2128 case Intrinsic::riscv_vssseg5:
2129 case Intrinsic::riscv_vssseg6:
2130 case Intrinsic::riscv_vssseg7:
2131 case Intrinsic::riscv_vssseg8: {
2136 case Intrinsic::riscv_vssseg2_mask:
2137 case Intrinsic::riscv_vssseg3_mask:
2138 case Intrinsic::riscv_vssseg4_mask:
2139 case Intrinsic::riscv_vssseg5_mask:
2140 case Intrinsic::riscv_vssseg6_mask:
2141 case Intrinsic::riscv_vssseg7_mask:
2142 case Intrinsic::riscv_vssseg8_mask: {
2147 case Intrinsic::riscv_vsoxseg2:
2148 case Intrinsic::riscv_vsoxseg3:
2149 case Intrinsic::riscv_vsoxseg4:
2150 case Intrinsic::riscv_vsoxseg5:
2151 case Intrinsic::riscv_vsoxseg6:
2152 case Intrinsic::riscv_vsoxseg7:
2153 case Intrinsic::riscv_vsoxseg8:
2157 case Intrinsic::riscv_vsuxseg2:
2158 case Intrinsic::riscv_vsuxseg3:
2159 case Intrinsic::riscv_vsuxseg4:
2160 case Intrinsic::riscv_vsuxseg5:
2161 case Intrinsic::riscv_vsuxseg6:
2162 case Intrinsic::riscv_vsuxseg7:
2163 case Intrinsic::riscv_vsuxseg8:
2167 case Intrinsic::riscv_vsoxseg2_mask:
2168 case Intrinsic::riscv_vsoxseg3_mask:
2169 case Intrinsic::riscv_vsoxseg4_mask:
2170 case Intrinsic::riscv_vsoxseg5_mask:
2171 case Intrinsic::riscv_vsoxseg6_mask:
2172 case Intrinsic::riscv_vsoxseg7_mask:
2173 case Intrinsic::riscv_vsoxseg8_mask:
2177 case Intrinsic::riscv_vsuxseg2_mask:
2178 case Intrinsic::riscv_vsuxseg3_mask:
2179 case Intrinsic::riscv_vsuxseg4_mask:
2180 case Intrinsic::riscv_vsuxseg5_mask:
2181 case Intrinsic::riscv_vsuxseg6_mask:
2182 case Intrinsic::riscv_vsuxseg7_mask:
2183 case Intrinsic::riscv_vsuxseg8_mask:
2187 case Intrinsic::riscv_vsoxei:
2188 case Intrinsic::riscv_vsoxei_mask:
2189 case Intrinsic::riscv_vsuxei:
2190 case Intrinsic::riscv_vsuxei_mask: {
2191 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
2192 IntNo == Intrinsic::riscv_vsuxei_mask;
2193 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
2194 IntNo == Intrinsic::riscv_vsoxei_mask;
2196 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2201 Operands.push_back(Node->getOperand(CurOp++));
2209 "Element count mismatch");
2214 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
2216 "values when XLEN=32");
2219 IsMasked, IsOrdered, IndexLog2EEW,
2220 static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
2224 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2230 case Intrinsic::riscv_vsm:
2231 case Intrinsic::riscv_vse:
2232 case Intrinsic::riscv_vse_mask:
2233 case Intrinsic::riscv_vsse:
2234 case Intrinsic::riscv_vsse_mask: {
2235 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
2236 IntNo == Intrinsic::riscv_vsse_mask;
2238 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
2240 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2245 Operands.push_back(Node->getOperand(CurOp++));
2252 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
2255 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2261 case Intrinsic::riscv_sf_vc_x_se:
2262 case Intrinsic::riscv_sf_vc_i_se:
2269 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
2282 SDValue V = Node->getOperand(0);
2283 SDValue SubV = Node->getOperand(1);
2285 auto Idx = Node->getConstantOperandVal(2);
2289 MVT SubVecContainerVT = SubVecVT;
2292 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(SubVecVT);
2294 [[maybe_unused]]
bool ExactlyVecRegSized =
2296 .isKnownMultipleOf(Subtarget->
expandVScale(VecRegSize));
2298 .getKnownMinValue()));
2299 assert(
Idx == 0 && (ExactlyVecRegSized || V.isUndef()));
2301 MVT ContainerVT = VT;
2303 ContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2307 std::tie(SubRegIdx,
Idx) =
2309 ContainerVT, SubVecContainerVT,
Idx,
TRI);
2318 [[maybe_unused]]
bool IsSubVecPartReg =
2322 assert((V.getValueType().isRISCVVectorTuple() || !IsSubVecPartReg ||
2324 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
2325 "the subvector is smaller than a full-sized register");
2329 if (SubRegIdx == RISCV::NoSubRegister) {
2330 unsigned InRegClassID =
2334 "Unexpected subvector extraction");
2348 SDValue V = Node->getOperand(0);
2349 auto Idx = Node->getConstantOperandVal(1);
2350 MVT InVT = V.getSimpleValueType();
2354 MVT SubVecContainerVT = VT;
2358 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2361 InVT =
TLI.getContainerForFixedLengthVector(InVT);
2365 std::tie(SubRegIdx,
Idx) =
2367 InVT, SubVecContainerVT,
Idx,
TRI);
2377 if (SubRegIdx == RISCV::NoSubRegister) {
2381 "Unexpected subvector extraction");
2400 if (!Node->getOperand(0).isUndef())
2402 SDValue Src = Node->getOperand(1);
2403 auto *Ld = dyn_cast<LoadSDNode>(Src);
2406 if (!Ld || Ld->isIndexed())
2408 EVT MemVT = Ld->getMemoryVT();
2434 if (IsStrided && !Subtarget->hasOptimizedZeroStrideLoad())
2444 Operands.append({VL, SEW, PolicyOp, Ld->getChain()});
2448 false, IsStrided,
false,
2449 Log2SEW,
static_cast<unsigned>(LMUL));
2461 unsigned Locality = Node->getConstantOperandVal(3);
2465 if (
auto *LoadStoreMem = dyn_cast<MemSDNode>(Node)) {
2469 int NontemporalLevel = 0;
2472 NontemporalLevel = 3;
2475 NontemporalLevel = 1;
2478 NontemporalLevel = 0;
2484 if (NontemporalLevel & 0b1)
2486 if (NontemporalLevel & 0b10)
2498 std::vector<SDValue> &OutOps) {
2501 switch (ConstraintID) {
2506 assert(Found &&
"SelectAddrRegImm should always succeed");
2507 OutOps.push_back(Op0);
2508 OutOps.push_back(Op1);
2512 OutOps.push_back(
Op);
2526 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr)) {
2539 bool IsPrefetch =
false,
2540 bool IsRV32Zdinx =
false) {
2541 if (!isa<ConstantSDNode>(
Addr))
2544 int64_t CVal = cast<ConstantSDNode>(
Addr)->getSExtValue();
2549 int64_t Lo12 = SignExtend64<12>(CVal);
2551 if (!Subtarget->
is64Bit() || isInt<32>(
Hi)) {
2552 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2554 if (IsRV32Zdinx && !isInt<12>(Lo12 + 4))
2558 int64_t Hi20 = (
Hi >> 12) & 0xfffff;
2575 if (Seq.
back().getOpcode() != RISCV::ADDI)
2577 Lo12 = Seq.
back().getImm();
2578 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2580 if (IsRV32Zdinx && !isInt<12>(Lo12 + 4))
2585 assert(!Seq.
empty() &&
"Expected more instructions in sequence");
2595 for (
auto *
User :
Add->users()) {
2600 EVT VT = cast<MemSDNode>(
User)->getMemoryVT();
2606 cast<StoreSDNode>(
User)->getValue() ==
Add)
2609 cast<AtomicSDNode>(
User)->getVal() ==
Add)
2617 unsigned MaxShiftAmount,
2620 EVT VT =
Addr.getSimpleValueType();
2621 auto UnwrapShl = [
this, VT, MaxShiftAmount](
SDValue N,
SDValue &Index,
2626 if (
N.getOpcode() ==
ISD::SHL && isa<ConstantSDNode>(
N.getOperand(1))) {
2628 if (
N.getConstantOperandVal(1) <= MaxShiftAmount) {
2629 Index =
N.getOperand(0);
2630 ShiftAmt =
N.getConstantOperandVal(1);
2635 return ShiftAmt != 0;
2639 if (
auto *C1 = dyn_cast<ConstantSDNode>(
Addr.getOperand(1))) {
2642 UnwrapShl(AddrB.
getOperand(0), Index, Scale) &&
2644 isInt<12>(C1->getSExtValue())) {
2653 }
else if (UnwrapShl(
Addr.getOperand(0), Index, Scale)) {
2657 UnwrapShl(
Addr.getOperand(1), Index, Scale);
2661 }
else if (UnwrapShl(
Addr, Index, Scale)) {
2676 MVT VT =
Addr.getSimpleValueType();
2688 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(
Addr.getOperand(1))) {
2691 GA->getGlobal()->getPointerAlignment(
DL), GA->getOffset());
2692 if (Alignment > 4) {
2698 if (
auto *CP = dyn_cast<ConstantPoolSDNode>(
Addr.getOperand(1))) {
2700 if (Alignment > 4) {
2708 int64_t RV32ZdinxRange = IsRV32Zdinx ? 4 : 0;
2710 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2711 if (isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) {
2715 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
2723 GA->getGlobal()->getPointerAlignment(
DL), GA->getOffset());
2724 if ((CVal == 0 || Alignment > CVal) &&
2726 int64_t CombinedOffset = CVal + GA->getOffset();
2730 CombinedOffset, GA->getTargetFlags());
2736 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2744 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2745 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2746 assert(!(isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) &&
2747 "simm12 not already handled?");
2752 if (CVal >= -4096 && CVal <= (4094 - RV32ZdinxRange)) {
2753 int64_t Adj = CVal < 0 ? -2048 : 2047;
2770 Offset,
false, RV32ZdinxRange)) {
2780 false, RV32ZdinxRange))
2796 MVT VT =
Addr.getSimpleValueType();
2799 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2800 if (isInt<12>(CVal)) {
2804 if ((CVal & 0b11111) != 0) {
2810 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2818 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2819 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2820 assert(!(isInt<12>(CVal) && isInt<12>(CVal)) &&
2821 "simm12 not already handled?");
2825 if ((-2049 >= CVal && CVal >= -4096) || (4065 >= CVal && CVal >= 2017)) {
2826 int64_t Adj = CVal < 0 ? -2048 : 2016;
2827 int64_t AdjustedOffset = CVal - Adj;
2830 RISCV::ADDI,
DL, VT,
Addr.getOperand(0),
2861 if (isa<ConstantSDNode>(
Addr.getOperand(1)))
2906 if (Imm != 0 && Imm % ShiftWidth == 0) {
2915 if (Imm != 0 && Imm % ShiftWidth == 0) {
2919 unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
2927 if (Imm % ShiftWidth == ShiftWidth - 1) {
2949 "Unexpected condition code!");
2956 ISD::CondCode CCVal = cast<CondCodeSDNode>(
N->getOperand(2))->get();
2957 if (CCVal != ExpectedCCVal)
2963 if (!
LHS.getValueType().isScalarInteger())
2974 if (
auto *
C = dyn_cast<ConstantSDNode>(
RHS)) {
2975 int64_t CVal =
C->getSExtValue();
2978 if (CVal == -2048) {
2981 RISCV::XORI,
DL,
N->getValueType(0),
LHS,
2988 if (isInt<12>(CVal) || CVal == 2048) {
2991 RISCV::ADDI,
DL,
N->getValueType(0),
LHS,
2999 RISCV::BINVI,
DL,
N->getValueType(0),
LHS,
3015 cast<VTSDNode>(
N.getOperand(1))->getVT().getSizeInBits() == Bits) {
3016 Val =
N.getOperand(0);
3020 auto UnwrapShlSra = [](
SDValue N,
unsigned ShiftAmt) {
3021 if (
N.getOpcode() !=
ISD::SRA || !isa<ConstantSDNode>(
N.getOperand(1)))
3026 N.getConstantOperandVal(1) == ShiftAmt &&
3033 MVT VT =
N.getSimpleValueType();
3044 auto *
C = dyn_cast<ConstantSDNode>(
N.getOperand(1));
3045 if (
C &&
C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
3046 Val =
N.getOperand(0);
3050 MVT VT =
N.getSimpleValueType();
3065 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1))) {
3071 uint64_t Mask =
N.getConstantOperandVal(1);
3074 unsigned XLen = Subtarget->
getXLen();
3076 Mask &= maskTrailingZeros<uint64_t>(C2);
3078 Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
3086 if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
3088 EVT VT =
N.getValueType();
3098 if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
3100 EVT VT =
N.getValueType();
3111 uint64_t Mask =
N.getConstantOperandVal(1);
3119 unsigned XLen = Subtarget->
getXLen();
3122 if (C2 > Leading && Leading > 0 && Trailing == ShAmt) {
3124 EVT VT =
N.getValueType();
3130 RISCV::SRLI,
DL, VT, Val,
3137 }
else if (
bool LeftShift =
N.getOpcode() ==
ISD::SHL;
3138 (LeftShift ||
N.getOpcode() ==
ISD::SRL) &&
3139 isa<ConstantSDNode>(
N.getOperand(1))) {
3145 unsigned C1 =
N.getConstantOperandVal(1);
3146 unsigned XLen = Subtarget->
getXLen();
3151 if (LeftShift && Leading == 32 && Trailing > 0 &&
3152 (Trailing + C1) == ShAmt) {
3154 EVT VT =
N.getValueType();
3163 if (!LeftShift && Leading == 32 && Trailing > C1 &&
3164 (Trailing - C1) == ShAmt) {
3166 EVT VT =
N.getValueType();
3185 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1)) &&
3190 uint64_t Mask =
N.getConstantOperandVal(1);
3193 Mask &= maskTrailingZeros<uint64_t>(C2);
3201 if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
3203 EVT VT =
N.getValueType();
3218 if (!isa<ConstantSDNode>(
N))
3220 int64_t Imm = cast<ConstantSDNode>(
N)->getSExtValue();
3223 if (isInt<32>(Imm) && ((Imm & 0xfff) != 0xfff || Imm == -1))
3227 for (
const SDNode *U :
N->users()) {
3228 switch (U->getOpcode()) {
3232 if (!(Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbkb()))
3236 if (!Subtarget->hasStdExtZvkb())
3239 return V->getOpcode() == ISD::AND ||
3240 V->getOpcode() == RISCVISD::AND_VL;
3251 if (!isInt<32>(Imm)) {
3256 if (OrigImmCost <= NegImmCost)
3279 bool HasChainOp =
User->
getOperand(ChainOpIdx).getValueType() == MVT::Other;
3282 const unsigned Log2SEW =
User->getConstantOperandVal(VLIdx + 1);
3284 if (UserOpNo == VLIdx)
3287 auto NumDemandedBits =
3289 return NumDemandedBits && Bits >= *NumDemandedBits;
3302 const unsigned Depth)
const {
3308 isa<ConstantSDNode>(Node) ||
Depth != 0) &&
3309 "Unexpected opcode");
3316 if (
Depth == 0 && !Node->getValueType(0).isScalarInteger())
3322 if (!
User->isMachineOpcode())
3326 switch (
User->getMachineOpcode()) {
3351 case RISCV::SLLI_UW:
3352 case RISCV::FMV_W_X:
3353 case RISCV::FCVT_H_W:
3354 case RISCV::FCVT_H_W_INX:
3355 case RISCV::FCVT_H_WU:
3356 case RISCV::FCVT_H_WU_INX:
3357 case RISCV::FCVT_S_W:
3358 case RISCV::FCVT_S_W_INX:
3359 case RISCV::FCVT_S_WU:
3360 case RISCV::FCVT_S_WU_INX:
3361 case RISCV::FCVT_D_W:
3362 case RISCV::FCVT_D_W_INX:
3363 case RISCV::FCVT_D_WU:
3364 case RISCV::FCVT_D_WU_INX:
3365 case RISCV::TH_REVW:
3366 case RISCV::TH_SRRIW:
3384 if (Bits >= Subtarget->
getXLen() -
User->getConstantOperandVal(1))
3393 if (Bits >= (
unsigned)llvm::bit_width<uint64_t>(~Imm))
3412 unsigned ShAmt =
User->getConstantOperandVal(1);
3426 case RISCV::FMV_H_X:
3427 case RISCV::ZEXT_H_RV32:
3428 case RISCV::ZEXT_H_RV64:
3434 if (Bits >= (Subtarget->
getXLen() / 2))
3438 case RISCV::SH1ADD_UW:
3439 case RISCV::SH2ADD_UW:
3440 case RISCV::SH3ADD_UW:
3467 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3468 int64_t
Offset =
C->getSExtValue();
3470 for (Shift = 0; Shift < 4; Shift++)
3471 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
3478 EVT Ty =
N->getValueType(0);
3490 auto *
C = dyn_cast<ConstantSDNode>(
N);
3491 if (
C && isUInt<5>(
C->getZExtValue())) {
3493 N->getValueType(0));
3494 }
else if (
C &&
C->isAllOnes()) {
3497 N->getValueType(0));
3498 }
else if (isa<RegisterSDNode>(
N) &&
3499 cast<RegisterSDNode>(
N)->
getReg() == RISCV::X0) {
3505 N->getValueType(0));
3515 if (!
N.getOperand(0).isUndef())
3517 N =
N.getOperand(1);
3522 !
Splat.getOperand(0).isUndef())
3524 assert(
Splat.getNumOperands() == 3 &&
"Unexpected number of operands");
3533 SplatVal =
Splat.getOperand(1);
3540 std::function<
bool(int64_t)> ValidateImm,
3541 bool Decrement =
false) {
3543 if (!
Splat || !isa<ConstantSDNode>(
Splat.getOperand(1)))
3546 const unsigned SplatEltSize =
Splat.getScalarValueSizeInBits();
3548 "Unexpected splat operand type");
3557 APInt SplatConst =
Splat.getConstantOperandAPInt(1).sextOrTrunc(SplatEltSize);
3561 if (!ValidateImm(SplatImm))
3574 [](int64_t Imm) {
return isInt<5>(Imm); });
3579 N, SplatVal, *
CurDAG, *Subtarget,
3580 [](int64_t Imm) {
return (isInt<5>(Imm) && Imm != -16) || Imm == 16; },
3587 N, SplatVal, *
CurDAG, *Subtarget,
3589 return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
3597 N, SplatVal, *
CurDAG, *Subtarget,
3598 [Bits](int64_t Imm) {
return isUIntN(Bits, Imm); });
3602 auto IsExtOrTrunc = [](
SDValue N) {
3603 switch (
N->getOpcode()) {
3618 while (IsExtOrTrunc(
N)) {
3619 if (!
N.hasOneUse() ||
N.getScalarValueSizeInBits() < 8)
3621 N =
N->getOperand(0);
3630 N.getOperand(0).getValueType() == Subtarget->
getXLenVT()) {
3631 Imm =
N.getOperand(0);
3637 Imm =
N.getOperand(0);
3653 if (VT == MVT::f64 && !Subtarget->
is64Bit()) {
3665 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3668 if (!isInt<5>(ImmVal))
3681bool RISCVDAGToDAGISel::doPeepholeSExtW(
SDNode *
N) {
3683 if (
N->getMachineOpcode() != RISCV::ADDIW ||
3705 case RISCV::ADD: Opc = RISCV::ADDW;
break;
3706 case RISCV::ADDI: Opc = RISCV::ADDIW;
break;
3707 case RISCV::SUB: Opc = RISCV::SUBW;
break;
3708 case RISCV::MUL: Opc = RISCV::MULW;
break;
3709 case RISCV::SLLI: Opc = RISCV::SLLIW;
break;
3717 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
3732 case RISCV::TH_MULAW:
3733 case RISCV::TH_MULAH:
3734 case RISCV::TH_MULSW:
3735 case RISCV::TH_MULSH:
3749 const auto IsVMSet = [](
unsigned Opc) {
3750 return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
3751 Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
3752 Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
3753 Opc == RISCV::PseudoVMSET_M_B8;
3763 if (!V.isMachineOpcode())
3765 if (V.getMachineOpcode() == TargetOpcode::REG_SEQUENCE) {
3766 for (
unsigned I = 1;
I < V.getNumOperands();
I += 2)
3771 return V.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
3778 RISCV::getMaskedPseudoInfo(
N->getMachineOpcode());
3782 unsigned MaskOpIdx =
I->MaskOpIdx;
3788 const unsigned Opc =
I->UnmaskedPseudo;
3797 "Masked and unmasked pseudos are inconsistent");
3799 "Unexpected pseudo structure");
3800 assert(!(HasPassthru && !MaskedHasPassthru) &&
3801 "Unmasked pseudo has passthru but masked pseudo doesn't?");
3805 bool ShouldSkip = !HasPassthru && MaskedHasPassthru;
3806 for (
unsigned I = ShouldSkip, E =
N->getNumOperands();
I != E;
I++) {
3817 if (!
N->memoperands_empty())
3820 Result->setFlags(
N->getFlags());
3848bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(
SDNode *
N) {
3851 Passthru =
N->getOperand(0);
3852 False =
N->getOperand(1);
3853 True =
N->getOperand(2);
3854 Mask =
N->getOperand(3);
3855 VL =
N->getOperand(4);
3867 "Expect True is the first output of an instruction.");
3882 RISCV::lookupMaskedIntrinsicByUnmasked(TrueOpc);
3890 if (False != PassthruOpTrue)
3916 unsigned TrueVLIndex =
3928 auto *CLHS = dyn_cast<ConstantSDNode>(LHS);
3929 auto *CRHS = dyn_cast<ConstantSDNode>(RHS);
3932 return CLHS->getZExtValue() <= CRHS->getZExtValue() ?
LHS :
RHS;
3938 VL = GetMinVL(TrueVL, VL);
3959 unsigned MaskedOpc =
Info->MaskedPseudo;
3963 "Expected instructions with mask have policy operand.");
3966 "Expected instructions with mask have a tied dest.");
3976 bool MergeVLShrunk = VL != OrigVL;
3988 const unsigned NormalOpsEnd = TrueVLIndex - HasRoundingMode;
3997 if (HasRoundingMode)
4000 Ops.
append({VL, SEW, PolicyOp});
4010 if (!cast<MachineSDNode>(True)->memoperands_empty())
4023bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
4024 bool MadeChange =
false;
4029 if (
N->use_empty() || !
N->isMachineOpcode())
4033 MadeChange |= performCombineVMergeAndVOps(
N);
4043bool RISCVDAGToDAGISel::doPeepholeNoRegPassThru() {
4044 bool MadeChange =
false;
4049 if (
N->use_empty() || !
N->isMachineOpcode())
4052 const unsigned Opc =
N->getMachineOpcode();
4053 if (!RISCVVPseudosTable::getPseudoInfo(Opc) ||
4060 for (
unsigned I = 1, E =
N->getNumOperands();
I != E;
I++) {
4067 Result->setFlags(
N->getFlags());
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
mir Rename Register Operands
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static bool usesAllOnesMask(SDValue MaskOp)
static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, const RISCVSubtarget *Subtarget, SDValue Addr, SDValue &Base, SDValue &Offset, bool IsPrefetch=false, bool IsRV32Zdinx=false)
#define CASE_VMNAND_VMSET_OPCODES(lmulenum, suffix)
static bool isWorthFoldingAdd(SDValue Add)
static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, RISCVMatInt::InstSeq &Seq)
static bool isImplicitDef(SDValue V)
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
static bool selectVSplatImmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, std::function< bool(int64_t)> ValidateImm, bool Decrement=false)
static unsigned getSegInstNF(unsigned Intrinsic)
static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, unsigned Bits, const TargetInstrInfo *TII)
#define INST_ALL_NF_CASE_WITH_FF(NAME)
#define CASE_VMSLT_OPCODES(lmulenum, suffix)
static cl::opt< bool > UsePseudoMovImm("riscv-use-rematerializable-movimm", cl::Hidden, cl::desc("Use a rematerializable pseudoinstruction for 2 instruction " "constant materialization"), cl::init(false))
static SDValue findVSplat(SDValue N)
#define INST_ALL_NF_CASE(NAME)
static bool IsVMerge(SDNode *N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
This class is used to form a handle around another node that is persistent and is updated across invo...
static StringRef getMemConstraintName(ConstraintCode C)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Describe properties that are true of each instruction in the target description file.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by other flags.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT changeVectorElementType(MVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
MVT getVectorElementType() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
void setFlags(Flags f)
Bitwise OR the current flags with the given flags.
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
RISCVDAGToDAGISelLegacy(RISCVTargetMachine &TargetMachine, CodeGenOptLevel OptLevel)
bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val)
RISC-V doesn't have general instructions for integer setne/seteq, but we can check for equality with ...
bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset, bool IsRV32Zdinx=false)
bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD_UW.
bool hasAllNBitUsers(SDNode *Node, unsigned Bits, const unsigned Depth=0) const
bool SelectAddrRegImmLsb00000(SDValue Addr, SDValue &Base, SDValue &Offset)
Similar to SelectAddrRegImm, except that the least significant 5 bits of Offset should be all zeros.
bool SelectAddrRegReg(SDValue Addr, SDValue &Base, SDValue &Offset)
void selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsOrdered)
void selectVLSEGFF(SDNode *Node, unsigned NF, bool IsMasked)
bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2)
void selectSF_VC_X_SE(SDNode *Node)
bool selectLow8BitsVSplat(SDValue N, SDValue &SplatVal)
bool hasAllHUsers(SDNode *Node) const
bool SelectInlineAsmMemoryOperand(const SDValue &Op, InlineAsm::ConstraintCode ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
bool hasAllWUsers(SDNode *Node) const
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
bool selectInvLogicImm(SDValue N, SDValue &Val)
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
bool selectVSplat(SDValue N, SDValue &SplatVal)
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
bool selectScalarFPAsInt(SDValue N, SDValue &Imm)
bool hasAllBUsers(SDNode *Node) const
void selectVLSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsStrided)
bool tryShrinkShlLogicImm(SDNode *Node)
void selectVSETVLI(SDNode *Node)
bool selectVLOp(SDValue N, SDValue &VL)
bool trySignedBitfieldExtract(SDNode *Node)
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
void selectVSSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsStrided)
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
void selectVLXSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsOrdered)
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD.
bool tryIndexedLoad(SDNode *Node)
bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, SDValue &Base, SDValue &Index, SDValue &Scale)
bool selectVSplatUimm(SDValue N, unsigned Bits, SDValue &SplatVal)
Quantity expandVScale(Quantity X) const
If the ElementCount or TypeSize X is scalable and VScale (VLEN) is exactly known, returns X converted...
bool hasVInstructions() const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVII::VLMUL getLMUL(MVT VT)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
SDNodeFlags getFlags() const
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
op_iterator op_begin() const
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
unsigned getNumOperands() const
const TargetLowering * TLI
const TargetInstrInfo * TII
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOptLevel OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
bool mayRaiseFPException(SDNode *Node) const
Return whether the node may raise an FP exception.
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getRegister(Register Reg, EVT VT)
static constexpr unsigned MaxRecursionDepth
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
allnodes_const_iterator allnodes_end() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
ilist< SDNode >::iterator allnodes_iterator
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
unsigned getOperandNo() const
Return the operand # of this use in its User.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
static bool hasRoundModeOp(uint64_t TSFlags)
static bool hasVLOp(uint64_t TSFlags)
static bool elementsDependOnMask(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool elementsDependOnVL(uint64_t TSFlags)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
@ SplitF64
Turns a f64 into a pair of i32s.
@ BuildPairF64
Turns a pair of i32s into an f64.
@ BuildGPRPair
Turn a pair of i<xlen>s into an even-odd register pair (untyped).
@ SPLAT_VECTOR_SPLIT_I64_VL
@ SplitGPRPair
Turn an even-odd register pair (untyped) into a pair of i<xlen>s.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI, bool CompressionCost, bool FreeZeroes)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
static unsigned decodeVSEW(unsigned VSEW)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul)
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
std::optional< unsigned > getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
static constexpr unsigned RVVBitsPerBlock
static constexpr int64_t VLMaxSentinel
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
static const MachineMemOperand::Flags MONontemporalBit0
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
unsigned M1(unsigned Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
CodeGenOptLevel
Code generation optimization level.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOptLevel OptLevel)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
bool hasNoFPExcept() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.