40#define GEN_CHECK_COMPRESS_INSTR
41#include "RISCVGenCompressInstEmitter.inc"
43#define GET_INSTRINFO_CTOR_DTOR
44#define GET_INSTRINFO_NAMED_OPS
45#include "RISCVGenInstrInfo.inc"
47#define DEBUG_TYPE "riscv-instr-info"
49 "Number of registers within vector register groups spilled");
51 "Number of registers within vector register groups reloaded");
55 cl::desc(
"Prefer whole register move for vector registers."));
58 "riscv-force-machine-combiner-strategy",
cl::Hidden,
59 cl::desc(
"Force machine combiner to use a specific strategy for machine "
60 "trace metrics evaluation."),
65 "MinInstrCount strategy.")));
71#define GET_RISCVVPseudosTable_IMPL
72#include "RISCVGenSearchableTables.inc"
78#define GET_RISCVMaskedPseudosTable_IMPL
79#include "RISCVGenSearchableTables.inc"
87#define GET_INSTRINFO_HELPERS
88#include "RISCVGenInstrInfo.inc"
91 if (
STI.hasStdExtZca())
100 int &FrameIndex)
const {
110 case RISCV::VL1RE8_V:
111 case RISCV::VL1RE16_V:
112 case RISCV::VL1RE32_V:
113 case RISCV::VL1RE64_V:
116 case RISCV::VL2RE8_V:
117 case RISCV::VL2RE16_V:
118 case RISCV::VL2RE32_V:
119 case RISCV::VL2RE64_V:
122 case RISCV::VL4RE8_V:
123 case RISCV::VL4RE16_V:
124 case RISCV::VL4RE32_V:
125 case RISCV::VL4RE64_V:
128 case RISCV::VL8RE8_V:
129 case RISCV::VL8RE16_V:
130 case RISCV::VL8RE32_V:
131 case RISCV::VL8RE64_V:
139 switch (
MI.getOpcode()) {
163 case RISCV::VL1RE8_V:
164 case RISCV::VL2RE8_V:
165 case RISCV::VL4RE8_V:
166 case RISCV::VL8RE8_V:
167 if (!
MI.getOperand(1).isFI())
169 FrameIndex =
MI.getOperand(1).getIndex();
172 return MI.getOperand(0).getReg();
175 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
176 MI.getOperand(2).getImm() == 0) {
177 FrameIndex =
MI.getOperand(1).getIndex();
178 return MI.getOperand(0).getReg();
185 int &FrameIndex)
const {
193 switch (
MI.getOpcode()) {
218 if (!
MI.getOperand(1).isFI())
220 FrameIndex =
MI.getOperand(1).getIndex();
223 return MI.getOperand(0).getReg();
226 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
227 MI.getOperand(2).getImm() == 0) {
228 FrameIndex =
MI.getOperand(1).getIndex();
229 return MI.getOperand(0).getReg();
239 case RISCV::VFMV_V_F:
242 case RISCV::VFMV_S_F:
244 return MI.getOperand(1).isUndef();
252 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
263 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
264 "Unexpected COPY instruction.");
268 bool FoundDef =
false;
269 bool FirstVSetVLI =
false;
270 unsigned FirstSEW = 0;
273 if (
MBBI->isMetaInstruction())
276 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
286 unsigned FirstVType =
MBBI->getOperand(2).getImm();
291 if (FirstLMul != LMul)
296 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
302 unsigned VType =
MBBI->getOperand(2).getImm();
320 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
322 }
else if (
MBBI->getNumDefs()) {
325 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
331 if (!MO.isReg() || !MO.isDef())
333 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
348 if (MO.getReg() != SrcReg)
389 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
390 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
392 assert(!Fractional &&
"It is impossible be fractional lmul here.");
393 unsigned NumRegs = NF * LMulVal;
399 SrcEncoding += NumRegs - 1;
400 DstEncoding += NumRegs - 1;
406 unsigned,
unsigned> {
414 uint16_t Diff = DstEncoding - SrcEncoding;
415 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
416 DstEncoding % 8 == 7)
418 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
419 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
420 DstEncoding % 4 == 3)
422 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
423 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
424 DstEncoding % 2 == 1)
426 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
429 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
434 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
436 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
437 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
439 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
440 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
442 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
445 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
448 while (
I != NumRegs) {
453 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
454 GetCopyInfo(SrcEncoding, DstEncoding);
458 if (LMul == LMulCopied &&
461 if (DefMBBI->getOpcode() == VIOpc)
468 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
470 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
478 MIB = MIB.add(DefMBBI->getOperand(2));
486 MIB.addImm(Log2SEW ? Log2SEW : 3);
498 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
499 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
508 bool RenamableDest,
bool RenamableSrc)
const {
512 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
519 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
525 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
531 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
532 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
533 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
535 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
537 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
541 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
542 .
addReg(EvenReg, KillFlag)
545 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
552 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
553 RISCV::GPRRegClass.
contains(DstReg)) {
555 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
560 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
562 if (
STI.hasStdExtZfh()) {
563 Opc = RISCV::FSGNJ_H;
566 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
567 "Unexpected extensions");
569 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
570 &RISCV::FPR32RegClass);
571 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
572 &RISCV::FPR32RegClass);
573 Opc = RISCV::FSGNJ_S;
577 .
addReg(SrcReg, KillFlag);
581 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
584 .
addReg(SrcReg, KillFlag);
588 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
591 .
addReg(SrcReg, KillFlag);
595 if (RISCV::FPR32RegClass.
contains(DstReg) &&
596 RISCV::GPRRegClass.
contains(SrcReg)) {
598 .
addReg(SrcReg, KillFlag);
602 if (RISCV::GPRRegClass.
contains(DstReg) &&
603 RISCV::FPR32RegClass.
contains(SrcReg)) {
605 .
addReg(SrcReg, KillFlag);
609 if (RISCV::FPR64RegClass.
contains(DstReg) &&
610 RISCV::GPRRegClass.
contains(SrcReg)) {
611 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
613 .
addReg(SrcReg, KillFlag);
617 if (RISCV::GPRRegClass.
contains(DstReg) &&
618 RISCV::FPR64RegClass.
contains(SrcReg)) {
619 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
621 .
addReg(SrcReg, KillFlag);
627 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
638 Register SrcReg,
bool IsKill,
int FI,
647 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
648 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
649 RISCV::SW : RISCV::SD;
650 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
651 Opcode = RISCV::SH_INX;
652 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
653 Opcode = RISCV::SW_INX;
654 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
655 Opcode = RISCV::PseudoRV32ZdinxSD;
656 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
658 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
660 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
662 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
663 Opcode = RISCV::VS1R_V;
664 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
665 Opcode = RISCV::VS2R_V;
666 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
667 Opcode = RISCV::VS4R_V;
668 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
669 Opcode = RISCV::VS8R_V;
670 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
671 Opcode = RISCV::PseudoVSPILL2_M1;
672 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
673 Opcode = RISCV::PseudoVSPILL2_M2;
674 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
675 Opcode = RISCV::PseudoVSPILL2_M4;
676 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
677 Opcode = RISCV::PseudoVSPILL3_M1;
678 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
679 Opcode = RISCV::PseudoVSPILL3_M2;
680 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
681 Opcode = RISCV::PseudoVSPILL4_M1;
682 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
683 Opcode = RISCV::PseudoVSPILL4_M2;
684 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
685 Opcode = RISCV::PseudoVSPILL5_M1;
686 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
687 Opcode = RISCV::PseudoVSPILL6_M1;
688 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
689 Opcode = RISCV::PseudoVSPILL7_M1;
690 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
691 Opcode = RISCV::PseudoVSPILL8_M1;
731 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
732 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
733 RISCV::LW : RISCV::LD;
734 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
735 Opcode = RISCV::LH_INX;
736 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
737 Opcode = RISCV::LW_INX;
738 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
739 Opcode = RISCV::PseudoRV32ZdinxLD;
740 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
742 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
744 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
746 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
747 Opcode = RISCV::VL1RE8_V;
748 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
749 Opcode = RISCV::VL2RE8_V;
750 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
751 Opcode = RISCV::VL4RE8_V;
752 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
753 Opcode = RISCV::VL8RE8_V;
754 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
755 Opcode = RISCV::PseudoVRELOAD2_M1;
756 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
757 Opcode = RISCV::PseudoVRELOAD2_M2;
758 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
759 Opcode = RISCV::PseudoVRELOAD2_M4;
760 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
761 Opcode = RISCV::PseudoVRELOAD3_M1;
762 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
763 Opcode = RISCV::PseudoVRELOAD3_M2;
764 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
765 Opcode = RISCV::PseudoVRELOAD4_M1;
766 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
767 Opcode = RISCV::PseudoVRELOAD4_M2;
768 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
769 Opcode = RISCV::PseudoVRELOAD5_M1;
770 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
771 Opcode = RISCV::PseudoVRELOAD6_M1;
772 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
773 Opcode = RISCV::PseudoVRELOAD7_M1;
774 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
775 Opcode = RISCV::PseudoVRELOAD8_M1;
813 if (
Ops.size() != 1 ||
Ops[0] != 1)
816 switch (
MI.getOpcode()) {
818 if (RISCVInstrInfo::isSEXT_W(
MI))
820 if (RISCVInstrInfo::isZEXT_W(
MI))
822 if (RISCVInstrInfo::isZEXT_B(
MI))
829 case RISCV::ZEXT_H_RV32:
830 case RISCV::ZEXT_H_RV64:
837 case RISCV::VMV_X_S: {
840 if (ST.getXLen() < (1U << Log2SEW))
855 case RISCV::VFMV_F_S: {
882 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
892 bool DstIsDead)
const {
908 bool SrcRenamable =
false;
912 bool LastItem = ++Num == Seq.
size();
917 switch (Inst.getOpndKind()) {
927 .
addReg(SrcReg, SrcRegState)
934 .
addReg(SrcReg, SrcRegState)
935 .
addReg(SrcReg, SrcRegState)
941 .
addReg(SrcReg, SrcRegState)
949 SrcRenamable = DstRenamable;
958 case RISCV::CV_BEQIMM:
960 case RISCV::QC_E_BEQI:
962 case RISCV::NDS_BEQC:
966 case RISCV::QC_E_BNEI:
967 case RISCV::CV_BNEIMM:
969 case RISCV::NDS_BNEC:
973 case RISCV::QC_E_BLTI:
977 case RISCV::QC_E_BGEI:
980 case RISCV::QC_BLTUI:
981 case RISCV::QC_E_BLTUI:
984 case RISCV::QC_BGEUI:
985 case RISCV::QC_E_BGEUI:
1017 "Unknown conditional branch");
1025 switch (SelectOpc) {
1044 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1049 return RISCV::CV_BEQIMM;
1051 return RISCV::CV_BNEIMM;
1054 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1059 return RISCV::QC_BEQI;
1061 return RISCV::QC_BNEI;
1063 return RISCV::QC_BLTI;
1065 return RISCV::QC_BGEI;
1068 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1073 return RISCV::QC_BLTUI;
1075 return RISCV::QC_BGEUI;
1078 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1083 return RISCV::QC_E_BEQI;
1085 return RISCV::QC_E_BNEI;
1087 return RISCV::QC_E_BLTI;
1089 return RISCV::QC_E_BGEI;
1092 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1097 return RISCV::QC_E_BLTUI;
1099 return RISCV::QC_E_BGEUI;
1102 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1107 return RISCV::NDS_BBC;
1109 return RISCV::NDS_BBS;
1112 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1117 return RISCV::NDS_BEQC;
1119 return RISCV::NDS_BNEC;
1148 bool AllowModify)
const {
1149 TBB = FBB =
nullptr;
1154 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1160 int NumTerminators = 0;
1161 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1164 if (J->getDesc().isUnconditionalBranch() ||
1165 J->getDesc().isIndirectBranch()) {
1172 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1173 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1174 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1177 I = FirstUncondOrIndirectBr;
1181 if (
I->getDesc().isIndirectBranch())
1185 if (
I->isPreISelOpcode())
1189 if (NumTerminators > 2)
1193 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1199 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1205 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1206 I->getDesc().isUnconditionalBranch()) {
1217 int *BytesRemoved)
const {
1224 if (!
I->getDesc().isUnconditionalBranch() &&
1225 !
I->getDesc().isConditionalBranch())
1231 I->eraseFromParent();
1235 if (
I ==
MBB.begin())
1238 if (!
I->getDesc().isConditionalBranch())
1244 I->eraseFromParent();
1257 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1259 "RISC-V branch conditions have two components!");
1293 assert(RS &&
"RegScavenger required for long branching");
1295 "new block should be inserted for expanding unconditional branch");
1298 "restore block should be inserted for restoring clobbered registers");
1307 "Branch offsets outside of the signed 32-bit range not supported");
1312 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1313 auto II =
MBB.end();
1324 if (TmpGPR != RISCV::NoRegister)
1330 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1333 if (FrameIndex == -1)
1338 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1341 MI.getOperand(1).setMBB(&RestoreBB);
1345 TRI->eliminateFrameIndex(RestoreBB.
back(),
1349 MRI.replaceRegWith(ScratchReg, TmpGPR);
1350 MRI.clearVirtRegs();
1355 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1360 Cond[0].setImm(RISCV::BNE);
1363 Cond[0].setImm(RISCV::BEQ);
1366 Cond[0].setImm(RISCV::BGE);
1369 Cond[0].setImm(RISCV::BLT);
1372 Cond[0].setImm(RISCV::BGEU);
1375 Cond[0].setImm(RISCV::BLTU);
1377 case RISCV::CV_BEQIMM:
1378 Cond[0].setImm(RISCV::CV_BNEIMM);
1380 case RISCV::CV_BNEIMM:
1381 Cond[0].setImm(RISCV::CV_BEQIMM);
1383 case RISCV::QC_BEQI:
1384 Cond[0].setImm(RISCV::QC_BNEI);
1386 case RISCV::QC_BNEI:
1387 Cond[0].setImm(RISCV::QC_BEQI);
1389 case RISCV::QC_BGEI:
1390 Cond[0].setImm(RISCV::QC_BLTI);
1392 case RISCV::QC_BLTI:
1393 Cond[0].setImm(RISCV::QC_BGEI);
1395 case RISCV::QC_BGEUI:
1396 Cond[0].setImm(RISCV::QC_BLTUI);
1398 case RISCV::QC_BLTUI:
1399 Cond[0].setImm(RISCV::QC_BGEUI);
1401 case RISCV::QC_E_BEQI:
1402 Cond[0].setImm(RISCV::QC_E_BNEI);
1404 case RISCV::QC_E_BNEI:
1405 Cond[0].setImm(RISCV::QC_E_BEQI);
1407 case RISCV::QC_E_BGEI:
1408 Cond[0].setImm(RISCV::QC_E_BLTI);
1410 case RISCV::QC_E_BLTI:
1411 Cond[0].setImm(RISCV::QC_E_BGEI);
1413 case RISCV::QC_E_BGEUI:
1414 Cond[0].setImm(RISCV::QC_E_BLTUI);
1416 case RISCV::QC_E_BLTUI:
1417 Cond[0].setImm(RISCV::QC_E_BGEUI);
1419 case RISCV::NDS_BBC:
1420 Cond[0].setImm(RISCV::NDS_BBS);
1422 case RISCV::NDS_BBS:
1423 Cond[0].setImm(RISCV::NDS_BBC);
1425 case RISCV::NDS_BEQC:
1426 Cond[0].setImm(RISCV::NDS_BNEC);
1428 case RISCV::NDS_BNEC:
1429 Cond[0].setImm(RISCV::NDS_BEQC);
1439 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1440 MI->getOperand(1).getReg() == RISCV::X0) {
1441 Imm =
MI->getOperand(2).getImm();
1454 if (Reg == RISCV::X0) {
1458 return Reg.isVirtual() &&
isLoadImm(
MRI.getVRegDef(Reg), Imm);
1462 bool IsSigned =
false;
1463 bool IsEquality =
false;
1464 switch (
MI.getOpcode()) {
1500 MI.eraseFromParent();
1526 auto searchConst = [&](int64_t C1) ->
Register {
1528 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1531 I.getOperand(0).getReg().isVirtual();
1534 return DefC1->getOperand(0).getReg();
1547 MRI.hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1549 if (
Register RegZ = searchConst(C0 + 1)) {
1556 MRI.clearKillFlags(RegZ);
1557 MI.eraseFromParent();
1568 MRI.hasOneUse(RHS.getReg())) {
1570 if (
Register RegZ = searchConst(C0 - 1)) {
1577 MRI.clearKillFlags(RegZ);
1578 MI.eraseFromParent();
1588 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1590 int NumOp =
MI.getNumExplicitOperands();
1591 return MI.getOperand(NumOp - 1).getMBB();
1595 int64_t BrOffset)
const {
1596 unsigned XLen =
STI.getXLen();
1603 case RISCV::NDS_BBC:
1604 case RISCV::NDS_BBS:
1605 case RISCV::NDS_BEQC:
1606 case RISCV::NDS_BNEC:
1614 case RISCV::CV_BEQIMM:
1615 case RISCV::CV_BNEIMM:
1616 case RISCV::QC_BEQI:
1617 case RISCV::QC_BNEI:
1618 case RISCV::QC_BGEI:
1619 case RISCV::QC_BLTI:
1620 case RISCV::QC_BLTUI:
1621 case RISCV::QC_BGEUI:
1622 case RISCV::QC_E_BEQI:
1623 case RISCV::QC_E_BNEI:
1624 case RISCV::QC_E_BGEI:
1625 case RISCV::QC_E_BLTI:
1626 case RISCV::QC_E_BLTUI:
1627 case RISCV::QC_E_BGEUI:
1630 case RISCV::PseudoBR:
1632 case RISCV::PseudoJump:
1642 case RISCV::ADD:
return RISCV::PseudoCCADD;
break;
1643 case RISCV::SUB:
return RISCV::PseudoCCSUB;
break;
1644 case RISCV::SLL:
return RISCV::PseudoCCSLL;
break;
1645 case RISCV::SRL:
return RISCV::PseudoCCSRL;
break;
1646 case RISCV::SRA:
return RISCV::PseudoCCSRA;
break;
1647 case RISCV::AND:
return RISCV::PseudoCCAND;
break;
1648 case RISCV::OR:
return RISCV::PseudoCCOR;
break;
1649 case RISCV::XOR:
return RISCV::PseudoCCXOR;
break;
1651 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
break;
1652 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
break;
1653 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
break;
1654 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
break;
1655 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
break;
1656 case RISCV::ORI:
return RISCV::PseudoCCORI;
break;
1657 case RISCV::XORI:
return RISCV::PseudoCCXORI;
break;
1659 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
break;
1660 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
break;
1661 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
break;
1662 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
break;
1663 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
break;
1665 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
break;
1666 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
break;
1667 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
break;
1668 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
break;
1670 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
break;
1671 case RISCV::ORN:
return RISCV::PseudoCCORN;
break;
1672 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
break;
1674 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
break;
1675 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
break;
1678 return RISCV::INSTRUCTION_LIST_END;
1686 if (!
Reg.isVirtual())
1688 if (!
MRI.hasOneNonDBGUse(
Reg))
1697 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1698 MI->getOperand(1).getReg() == RISCV::X0)
1703 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1713 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1716 bool DontMoveAcrossStores =
true;
1717 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1724 unsigned &TrueOp,
unsigned &FalseOp,
1725 bool &Optimizable)
const {
1726 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1727 "Unknown select instruction");
1737 Cond.push_back(
MI.getOperand(1));
1738 Cond.push_back(
MI.getOperand(2));
1739 Cond.push_back(
MI.getOperand(3));
1741 Optimizable =
STI.hasShortForwardBranchOpt();
1748 bool PreferFalse)
const {
1749 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1750 "Unknown select instruction");
1751 if (!
STI.hasShortForwardBranchOpt())
1757 bool Invert = !
DefMI;
1765 Register DestReg =
MI.getOperand(0).getReg();
1767 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1771 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1778 NewMI.
add(
MI.getOperand(1));
1779 NewMI.
add(
MI.getOperand(2));
1788 NewMI.
add(FalseReg);
1803 if (
DefMI->getParent() !=
MI.getParent())
1807 DefMI->eraseFromParent();
1812 if (
MI.isMetaInstruction())
1815 unsigned Opcode =
MI.getOpcode();
1817 if (Opcode == TargetOpcode::INLINEASM ||
1818 Opcode == TargetOpcode::INLINEASM_BR) {
1820 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1824 if (!
MI.memoperands_empty()) {
1827 if (
STI.hasStdExtZca()) {
1828 if (isCompressibleInst(
MI,
STI))
1836 if (Opcode == TargetOpcode::BUNDLE)
1837 return getInstBundleLength(
MI);
1839 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1840 if (isCompressibleInst(
MI,
STI))
1845 case RISCV::PseudoMV_FPR16INX:
1846 case RISCV::PseudoMV_FPR32INX:
1848 return STI.hasStdExtZca() ? 2 : 4;
1849 case TargetOpcode::STACKMAP:
1852 case TargetOpcode::PATCHPOINT:
1855 case TargetOpcode::STATEPOINT: {
1859 return std::max(NumBytes, 8U);
1861 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
1862 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
1863 case TargetOpcode::PATCHABLE_TAIL_CALL: {
1866 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
1867 F.hasFnAttribute(
"patchable-function-entry")) {
1869 if (
F.getFnAttribute(
"patchable-function-entry")
1871 .getAsInteger(10, Num))
1872 return get(Opcode).getSize();
1875 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
1879 return STI.is64Bit() ? 68 : 44;
1882 return get(Opcode).getSize();
1886unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
1890 while (++
I != E &&
I->isInsideBundle()) {
1891 assert(!
I->isBundle() &&
"No nested bundle!");
1898 const unsigned Opcode =
MI.getOpcode();
1902 case RISCV::FSGNJ_D:
1903 case RISCV::FSGNJ_S:
1904 case RISCV::FSGNJ_H:
1905 case RISCV::FSGNJ_D_INX:
1906 case RISCV::FSGNJ_D_IN32X:
1907 case RISCV::FSGNJ_S_INX:
1908 case RISCV::FSGNJ_H_INX:
1910 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1911 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
1915 return (
MI.getOperand(1).isReg() &&
1916 MI.getOperand(1).getReg() == RISCV::X0) ||
1917 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
1919 return MI.isAsCheapAsAMove();
1922std::optional<DestSourcePair>
1926 switch (
MI.getOpcode()) {
1932 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
1933 MI.getOperand(2).isReg())
1935 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
1936 MI.getOperand(1).isReg())
1941 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
1942 MI.getOperand(2).getImm() == 0)
1946 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
1947 MI.getOperand(1).isReg())
1951 case RISCV::SH1ADD_UW:
1953 case RISCV::SH2ADD_UW:
1955 case RISCV::SH3ADD_UW:
1956 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
1957 MI.getOperand(2).isReg())
1960 case RISCV::FSGNJ_D:
1961 case RISCV::FSGNJ_S:
1962 case RISCV::FSGNJ_H:
1963 case RISCV::FSGNJ_D_INX:
1964 case RISCV::FSGNJ_D_IN32X:
1965 case RISCV::FSGNJ_S_INX:
1966 case RISCV::FSGNJ_H_INX:
1968 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1969 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
1973 return std::nullopt;
1981 const auto &SchedModel =
STI.getSchedModel();
1982 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
1994 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
1998 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
1999 RISCV::OpName::frm) < 0;
2001 "New instructions require FRM whereas the old one does not have it");
2008 for (
auto *NewMI : InsInstrs) {
2010 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2011 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2053bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2054 bool Invert)
const {
2055#define OPCODE_LMUL_CASE(OPC) \
2056 case RISCV::OPC##_M1: \
2057 case RISCV::OPC##_M2: \
2058 case RISCV::OPC##_M4: \
2059 case RISCV::OPC##_M8: \
2060 case RISCV::OPC##_MF2: \
2061 case RISCV::OPC##_MF4: \
2062 case RISCV::OPC##_MF8
2064#define OPCODE_LMUL_MASK_CASE(OPC) \
2065 case RISCV::OPC##_M1_MASK: \
2066 case RISCV::OPC##_M2_MASK: \
2067 case RISCV::OPC##_M4_MASK: \
2068 case RISCV::OPC##_M8_MASK: \
2069 case RISCV::OPC##_MF2_MASK: \
2070 case RISCV::OPC##_MF4_MASK: \
2071 case RISCV::OPC##_MF8_MASK
2076 Opcode = *InvOpcode;
2093#undef OPCODE_LMUL_MASK_CASE
2094#undef OPCODE_LMUL_CASE
2097bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2104 const TargetRegisterInfo *
TRI =
MRI->getTargetRegisterInfo();
2108 const uint64_t TSFlags =
Desc.TSFlags;
2110 auto checkImmOperand = [&](
unsigned OpIdx) {
2114 auto checkRegOperand = [&](
unsigned OpIdx) {
2122 if (!checkRegOperand(1))
2137 bool SeenMI2 =
false;
2138 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2147 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2148 Register SrcReg = It->getOperand(1).getReg();
2166 if (MI1VReg != SrcReg)
2175 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2214bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2215 bool &Commuted)
const {
2219 "Expect the present of passthrough operand.");
2225 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2226 areRVVInstsReassociable(Inst, *MI2);
2230 return areRVVInstsReassociable(Inst, *MI1) &&
2231 (isVectorAssociativeAndCommutative(*MI1) ||
2232 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2239 if (!isVectorAssociativeAndCommutative(Inst) &&
2240 !isVectorAssociativeAndCommutative(Inst,
true))
2252 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
2254 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
2266 for (
unsigned I = 0;
I < 5; ++
I)
2272 bool &Commuted)
const {
2273 if (isVectorAssociativeAndCommutative(Inst) ||
2274 isVectorAssociativeAndCommutative(Inst,
true))
2275 return hasReassociableVectorSibling(Inst, Commuted);
2281 unsigned OperandIdx = Commuted ? 2 : 1;
2285 int16_t InstFrmOpIdx =
2286 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2287 int16_t SiblingFrmOpIdx =
2288 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2290 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2295 bool Invert)
const {
2296 if (isVectorAssociativeAndCommutative(Inst, Invert))
2304 Opc = *InverseOpcode;
2349std::optional<unsigned>
2351#define RVV_OPC_LMUL_CASE(OPC, INV) \
2352 case RISCV::OPC##_M1: \
2353 return RISCV::INV##_M1; \
2354 case RISCV::OPC##_M2: \
2355 return RISCV::INV##_M2; \
2356 case RISCV::OPC##_M4: \
2357 return RISCV::INV##_M4; \
2358 case RISCV::OPC##_M8: \
2359 return RISCV::INV##_M8; \
2360 case RISCV::OPC##_MF2: \
2361 return RISCV::INV##_MF2; \
2362 case RISCV::OPC##_MF4: \
2363 return RISCV::INV##_MF4; \
2364 case RISCV::OPC##_MF8: \
2365 return RISCV::INV##_MF8
2367#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2368 case RISCV::OPC##_M1_MASK: \
2369 return RISCV::INV##_M1_MASK; \
2370 case RISCV::OPC##_M2_MASK: \
2371 return RISCV::INV##_M2_MASK; \
2372 case RISCV::OPC##_M4_MASK: \
2373 return RISCV::INV##_M4_MASK; \
2374 case RISCV::OPC##_M8_MASK: \
2375 return RISCV::INV##_M8_MASK; \
2376 case RISCV::OPC##_MF2_MASK: \
2377 return RISCV::INV##_MF2_MASK; \
2378 case RISCV::OPC##_MF4_MASK: \
2379 return RISCV::INV##_MF4_MASK; \
2380 case RISCV::OPC##_MF8_MASK: \
2381 return RISCV::INV##_MF8_MASK
2385 return std::nullopt;
2387 return RISCV::FSUB_H;
2389 return RISCV::FSUB_S;
2391 return RISCV::FSUB_D;
2393 return RISCV::FADD_H;
2395 return RISCV::FADD_S;
2397 return RISCV::FADD_D;
2414#undef RVV_OPC_LMUL_MASK_CASE
2415#undef RVV_OPC_LMUL_CASE
2420 bool DoRegPressureReduce) {
2436 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2447 bool DoRegPressureReduce) {
2454 DoRegPressureReduce)) {
2460 DoRegPressureReduce)) {
2470 bool DoRegPressureReduce) {
2478 unsigned CombineOpc) {
2485 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2488 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2499 unsigned OuterShiftAmt) {
2505 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2532 case RISCV::SH1ADD_UW:
2534 case RISCV::SH2ADD_UW:
2536 case RISCV::SH3ADD_UW:
2582 bool DoRegPressureReduce)
const {
2591 DoRegPressureReduce);
2599 return RISCV::FMADD_H;
2601 return RISCV::FMADD_S;
2603 return RISCV::FMADD_D;
2648 bool Mul1IsKill = Mul1.
isKill();
2649 bool Mul2IsKill = Mul2.
isKill();
2650 bool AddendIsKill = Addend.
isKill();
2659 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2684 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2691 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2694 switch (InnerShiftAmt - OuterShiftAmt) {
2698 InnerOpc = RISCV::ADD;
2701 InnerOpc = RISCV::SH1ADD;
2704 InnerOpc = RISCV::SH2ADD;
2707 InnerOpc = RISCV::SH3ADD;
2715 Register NewVR =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2725 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2742 DelInstrs, InstrIdxForVirtReg);
2769 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2770 unsigned OpType = Operand.OperandType;
2775 ErrInfo =
"Expected a non-register operand.";
2779 int64_t Imm = MO.
getImm();
2786#define CASE_OPERAND_UIMM(NUM) \
2787 case RISCVOp::OPERAND_UIMM##NUM: \
2788 Ok = isUInt<NUM>(Imm); \
2790#define CASE_OPERAND_SIMM(NUM) \
2791 case RISCVOp::OPERAND_SIMM##NUM: \
2792 Ok = isInt<NUM>(Imm); \
2824 Ok = (
isUInt<5>(Imm) && (Imm != 0)) || (Imm == 32);
2870 Ok = (
isInt<5>(Imm) && Imm != -16) || Imm == 16;
2901 Ok = Ok && Imm != 0;
2905 (Imm >= 0xfffe0 && Imm <= 0xfffff);
2908 Ok = Imm >= 0 && Imm <= 10;
2911 Ok = Imm >= 0 && Imm <= 7;
2914 Ok = Imm >= 1 && Imm <= 10;
2917 Ok = Imm >= 2 && Imm <= 14;
2926 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
2956 ErrInfo =
"Invalid immediate";
2966 if (!
Op.isImm() && !
Op.isReg()) {
2967 ErrInfo =
"Invalid operand type for VL operand";
2970 if (
Op.isReg() &&
Op.getReg() != RISCV::NoRegister) {
2972 auto *RC =
MRI.getRegClass(
Op.getReg());
2973 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
2974 ErrInfo =
"Invalid register class for VL operand";
2979 ErrInfo =
"VL operand w/o SEW operand?";
2985 if (!
MI.getOperand(
OpIdx).isImm()) {
2986 ErrInfo =
"SEW value expected to be an immediate";
2991 ErrInfo =
"Unexpected SEW value";
2994 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2996 ErrInfo =
"Unexpected SEW value";
3002 if (!
MI.getOperand(
OpIdx).isImm()) {
3003 ErrInfo =
"Policy operand expected to be an immediate";
3008 ErrInfo =
"Invalid Policy Value";
3012 ErrInfo =
"policy operand w/o VL operand?";
3020 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3021 ErrInfo =
"policy operand w/o tied operand?";
3028 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3029 ErrInfo =
"dynamic rounding mode should read FRM";
3051 case RISCV::LD_RV32:
3061 case RISCV::SD_RV32:
3077 int64_t NewOffset = OldOffset + Disp;
3099 "Addressing mode not supported for folding");
3173 case RISCV::LD_RV32:
3176 case RISCV::SD_RV32:
3183 OffsetIsScalable =
false;
3199 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3207 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3210 auto Base1 = MO1->getValue();
3211 auto Base2 = MO2->getValue();
3212 if (!Base1 || !Base2)
3220 return Base1 == Base2;
3226 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3227 unsigned NumBytes)
const {
3230 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3235 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3241 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3247 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3297 int64_t OffsetA = 0, OffsetB = 0;
3303 int LowOffset = std::min(OffsetA, OffsetB);
3304 int HighOffset = std::max(OffsetA, OffsetB);
3305 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3307 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3314std::pair<unsigned, unsigned>
3317 return std::make_pair(TF & Mask, TF & ~Mask);
3323 static const std::pair<unsigned, const char *> TargetFlags[] = {
3324 {MO_CALL,
"riscv-call"},
3325 {MO_LO,
"riscv-lo"},
3326 {MO_HI,
"riscv-hi"},
3327 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3328 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3329 {MO_GOT_HI,
"riscv-got-hi"},
3330 {MO_TPREL_LO,
"riscv-tprel-lo"},
3331 {MO_TPREL_HI,
"riscv-tprel-hi"},
3332 {MO_TPREL_ADD,
"riscv-tprel-add"},
3333 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3334 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3335 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3336 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3337 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3338 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3346 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3359 unsigned &Flags)
const {
3378 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3379 F.hasFnAttribute(
"patchable-function-entry");
3384 return MI.readsRegister(RegNo,
TRI) ||
3385 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3390 return MI.modifiesRegister(RegNo,
TRI) ||
3391 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3395 if (!
MBB.back().isReturn())
3418 if (
C.back().isReturn()) {
3420 "The candidate who uses return instruction must be outlined "
3433 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
3436std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3439 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3440 unsigned MinRepeats)
const {
3446 if (RepeatedSequenceLocs.size() < MinRepeats)
3447 return std::nullopt;
3451 unsigned InstrSizeCExt =
3453 unsigned CallOverhead = 0, FrameOverhead = 0;
3460 CallOverhead = 4 + InstrSizeCExt;
3467 FrameOverhead = InstrSizeCExt;
3470 for (
auto &
C : RepeatedSequenceLocs)
3471 C.setCallInfo(MOCI, CallOverhead);
3473 unsigned SequenceSize = 0;
3474 for (
auto &
MI : Candidate)
3477 return std::make_unique<outliner::OutlinedFunction>(
3478 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3484 unsigned Flags)
const {
3488 MBB->getParent()->getSubtarget().getRegisterInfo();
3489 const auto &
F =
MI.getMF()->getFunction();
3492 if (
MI.isCFIInstruction())
3504 for (
const auto &MO :
MI.operands()) {
3509 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3510 F.hasSection() ||
F.getSectionPrefix()))
3528 auto I =
MBB.begin();
3530 for (;
I != E; ++
I) {
3531 if (
I->isCFIInstruction()) {
3532 I->removeFromParent();
3542 MBB.addLiveIn(RISCV::X5);
3557 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3565 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3576 return std::nullopt;
3580 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3581 MI.getOperand(2).isImm())
3582 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3584 return std::nullopt;
3592 std::string GenericComment =
3594 if (!GenericComment.empty())
3595 return GenericComment;
3599 return std::string();
3603 return std::string();
3605 std::string Comment;
3612 switch (OpInfo.OperandType) {
3615 unsigned Imm =
Op.getImm();
3621 unsigned Log2SEW =
Op.getImm();
3622 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3628 unsigned Policy =
Op.getImm();
3630 "Invalid Policy Value");
3640#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3641 RISCV::Pseudo##OP##_##LMUL
3643#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3644 RISCV::Pseudo##OP##_##LMUL##_MASK
3646#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3647 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3648 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3650#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3651 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3652 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3653 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3654 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3655 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3656 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3658#define CASE_RVV_OPCODE_UNMASK(OP) \
3659 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3660 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3662#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3663 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3664 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3665 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3666 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3667 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3668 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3670#define CASE_RVV_OPCODE_MASK(OP) \
3671 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3672 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3674#define CASE_RVV_OPCODE_WIDEN(OP) \
3675 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3676 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3678#define CASE_RVV_OPCODE(OP) \
3679 CASE_RVV_OPCODE_UNMASK(OP): \
3680 case CASE_RVV_OPCODE_MASK(OP)
3684#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3685 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3687#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3688 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3689 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3690 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3691 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3692 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3693 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3694 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3697#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3698 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3700#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3701 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3702 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3703 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3704 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3706#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3707 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3708 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3710#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3711 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3712 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3714#define CASE_VFMA_OPCODE_VV(OP) \
3715 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3716 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3717 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3719#define CASE_VFMA_SPLATS(OP) \
3720 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3721 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3722 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3726 unsigned &SrcOpIdx1,
3727 unsigned &SrcOpIdx2)
const {
3729 if (!
Desc.isCommutable())
3732 switch (
MI.getOpcode()) {
3733 case RISCV::TH_MVEQZ:
3734 case RISCV::TH_MVNEZ:
3738 if (
MI.getOperand(2).getReg() == RISCV::X0)
3741 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3742 case RISCV::TH_MULA:
3743 case RISCV::TH_MULAW:
3744 case RISCV::TH_MULAH:
3745 case RISCV::TH_MULS:
3746 case RISCV::TH_MULSW:
3747 case RISCV::TH_MULSH:
3749 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3750 case RISCV::PseudoCCMOVGPRNoX0:
3751 case RISCV::PseudoCCMOVGPR:
3753 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
3780 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3807 unsigned CommutableOpIdx1 = 1;
3808 unsigned CommutableOpIdx2 = 3;
3809 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3830 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
3832 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
3836 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
3837 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
3843 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
3844 SrcOpIdx2 == CommuteAnyOperandIndex) {
3847 unsigned CommutableOpIdx1 = SrcOpIdx1;
3848 if (SrcOpIdx1 == SrcOpIdx2) {
3851 CommutableOpIdx1 = 1;
3852 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
3854 CommutableOpIdx1 = SrcOpIdx2;
3859 unsigned CommutableOpIdx2;
3860 if (CommutableOpIdx1 != 1) {
3862 CommutableOpIdx2 = 1;
3864 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
3869 if (Op1Reg !=
MI.getOperand(2).getReg())
3870 CommutableOpIdx2 = 2;
3872 CommutableOpIdx2 = 3;
3877 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3890#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
3891 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
3892 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
3895#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
3896 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
3897 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
3898 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
3899 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
3900 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
3901 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
3902 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
3905#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
3906 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
3907 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
3910#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
3911 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
3912 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
3913 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
3914 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
3916#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
3917 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
3918 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
3920#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
3921 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
3922 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
3924#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
3925 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
3926 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
3927 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
3929#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
3930 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
3931 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
3932 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
3938 unsigned OpIdx2)
const {
3941 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
3945 switch (
MI.getOpcode()) {
3946 case RISCV::TH_MVEQZ:
3947 case RISCV::TH_MVNEZ: {
3948 auto &WorkingMI = cloneIfNew(
MI);
3949 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
3950 : RISCV::TH_MVEQZ));
3954 case RISCV::PseudoCCMOVGPRNoX0:
3955 case RISCV::PseudoCCMOVGPR: {
3959 auto &WorkingMI = cloneIfNew(
MI);
3960 WorkingMI.getOperand(3).setImm(CC);
3984 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
3985 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
3987 switch (
MI.getOpcode()) {
4010 auto &WorkingMI = cloneIfNew(
MI);
4011 WorkingMI.setDesc(
get(
Opc));
4021 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4024 if (OpIdx1 == 3 || OpIdx2 == 3) {
4026 switch (
MI.getOpcode()) {
4037 auto &WorkingMI = cloneIfNew(
MI);
4038 WorkingMI.setDesc(
get(
Opc));
4050#undef CASE_VMA_CHANGE_OPCODE_COMMON
4051#undef CASE_VMA_CHANGE_OPCODE_LMULS
4052#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4053#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4054#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4055#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4056#undef CASE_VFMA_CHANGE_OPCODE_VV
4057#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4059#undef CASE_RVV_OPCODE_UNMASK_LMUL
4060#undef CASE_RVV_OPCODE_MASK_LMUL
4061#undef CASE_RVV_OPCODE_LMUL
4062#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4063#undef CASE_RVV_OPCODE_UNMASK
4064#undef CASE_RVV_OPCODE_MASK_WIDEN
4065#undef CASE_RVV_OPCODE_MASK
4066#undef CASE_RVV_OPCODE_WIDEN
4067#undef CASE_RVV_OPCODE
4069#undef CASE_VMA_OPCODE_COMMON
4070#undef CASE_VMA_OPCODE_LMULS
4071#undef CASE_VFMA_OPCODE_COMMON
4072#undef CASE_VFMA_OPCODE_LMULS_M1
4073#undef CASE_VFMA_OPCODE_LMULS_MF2
4074#undef CASE_VFMA_OPCODE_LMULS_MF4
4075#undef CASE_VFMA_OPCODE_VV
4076#undef CASE_VFMA_SPLATS
4079 switch (
MI.getOpcode()) {
4087 if (
MI.getOperand(1).getReg() == RISCV::X0)
4088 commuteInstruction(
MI);
4090 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4091 MI.getOperand(2).ChangeToImmediate(0);
4092 MI.setDesc(
get(RISCV::ADDI));
4096 if (
MI.getOpcode() == RISCV::XOR &&
4097 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4098 MI.getOperand(1).setReg(RISCV::X0);
4099 MI.getOperand(2).ChangeToImmediate(0);
4100 MI.setDesc(
get(RISCV::ADDI));
4107 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4108 MI.setDesc(
get(RISCV::ADDI));
4114 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4115 MI.getOperand(2).ChangeToImmediate(0);
4116 MI.setDesc(
get(RISCV::ADDI));
4122 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4123 MI.getOperand(2).ChangeToImmediate(0);
4124 MI.setDesc(
get(RISCV::ADDIW));
4131 if (
MI.getOperand(1).getReg() == RISCV::X0)
4132 commuteInstruction(
MI);
4134 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4135 MI.getOperand(2).ChangeToImmediate(0);
4136 MI.setDesc(
get(RISCV::ADDIW));
4141 case RISCV::SH1ADD_UW:
4143 case RISCV::SH2ADD_UW:
4145 case RISCV::SH3ADD_UW:
4147 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4148 MI.removeOperand(1);
4150 MI.setDesc(
get(RISCV::ADDI));
4154 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4155 MI.removeOperand(2);
4156 unsigned Opc =
MI.getOpcode();
4157 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4158 Opc == RISCV::SH3ADD_UW) {
4160 MI.setDesc(
get(RISCV::SLLI_UW));
4164 MI.setDesc(
get(RISCV::SLLI));
4178 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4179 MI.getOperand(2).getReg() == RISCV::X0) {
4180 MI.getOperand(1).setReg(RISCV::X0);
4181 MI.getOperand(2).ChangeToImmediate(0);
4182 MI.setDesc(
get(RISCV::ADDI));
4188 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4189 MI.getOperand(2).setImm(0);
4190 MI.setDesc(
get(RISCV::ADDI));
4198 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4199 MI.getOperand(2).ChangeToImmediate(0);
4200 MI.setDesc(
get(RISCV::ADDI));
4204 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4205 MI.getOperand(2).ChangeToImmediate(0);
4206 MI.setDesc(
get(RISCV::ADDI));
4214 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4215 MI.getOperand(2).ChangeToImmediate(0);
4216 MI.setDesc(
get(RISCV::ADDI));
4226 case RISCV::SLLI_UW:
4228 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4229 MI.getOperand(2).setImm(0);
4230 MI.setDesc(
get(RISCV::ADDI));
4238 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4239 MI.getOperand(2).getReg() == RISCV::X0) {
4240 MI.getOperand(2).ChangeToImmediate(0);
4241 MI.setDesc(
get(RISCV::ADDI));
4245 if (
MI.getOpcode() == RISCV::ADD_UW &&
4246 MI.getOperand(1).getReg() == RISCV::X0) {
4247 MI.removeOperand(1);
4249 MI.setDesc(
get(RISCV::ADDI));
4255 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4256 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4257 MI.setDesc(
get(RISCV::ADDI));
4263 case RISCV::ZEXT_H_RV32:
4264 case RISCV::ZEXT_H_RV64:
4267 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4269 MI.setDesc(
get(RISCV::ADDI));
4278 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4279 MI.getOperand(2).ChangeToImmediate(0);
4280 MI.setDesc(
get(RISCV::ADDI));
4287 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4289 MI.removeOperand(0);
4290 MI.insert(
MI.operands_begin() + 1, {MO0});
4295 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4297 MI.removeOperand(0);
4298 MI.insert(
MI.operands_begin() + 1, {MO0});
4299 MI.setDesc(
get(RISCV::BNE));
4304 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4306 MI.removeOperand(0);
4307 MI.insert(
MI.operands_begin() + 1, {MO0});
4308 MI.setDesc(
get(RISCV::BEQ));
4316#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4317 RISCV::PseudoV##OP##_##LMUL##_TIED
4319#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4320 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4321 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4322 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4323 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4324 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4325 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4327#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4328 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4329 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4332#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4333 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4334 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4335 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4336 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4337 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4338 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4341#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4342 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4344#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4345 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4346 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4347 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4348 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4349 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4350 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4351 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4352 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4353 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4355#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4356 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4357 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4360#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4361 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4362 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4363 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4364 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4365 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4366 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4367 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4368 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4369 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4376 switch (
MI.getOpcode()) {
4382 MI.getNumExplicitOperands() == 7 &&
4383 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4390 switch (
MI.getOpcode()) {
4400 .
add(
MI.getOperand(0))
4402 .
add(
MI.getOperand(1))
4403 .
add(
MI.getOperand(2))
4404 .
add(
MI.getOperand(3))
4405 .
add(
MI.getOperand(4))
4406 .
add(
MI.getOperand(5))
4407 .
add(
MI.getOperand(6));
4416 MI.getNumExplicitOperands() == 6);
4423 switch (
MI.getOpcode()) {
4435 .
add(
MI.getOperand(0))
4437 .
add(
MI.getOperand(1))
4438 .
add(
MI.getOperand(2))
4439 .
add(
MI.getOperand(3))
4440 .
add(
MI.getOperand(4))
4441 .
add(
MI.getOperand(5));
4448 unsigned NumOps =
MI.getNumOperands();
4451 if (
Op.isReg() &&
Op.isKill())
4459 if (
MI.getOperand(0).isEarlyClobber()) {
4473#undef CASE_WIDEOP_OPCODE_COMMON
4474#undef CASE_WIDEOP_OPCODE_LMULS
4475#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4476#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4477#undef CASE_FP_WIDEOP_OPCODE_COMMON
4478#undef CASE_FP_WIDEOP_OPCODE_LMULS
4479#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4480#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4489 if (ShiftAmount == 0)
4495 }
else if (
STI.hasStdExtZba() &&
4502 if (Amount % 9 == 0) {
4503 Opc = RISCV::SH3ADD;
4504 ShiftAmount =
Log2_64(Amount / 9);
4505 }
else if (Amount % 5 == 0) {
4506 Opc = RISCV::SH2ADD;
4507 ShiftAmount =
Log2_64(Amount / 5);
4508 }
else if (Amount % 3 == 0) {
4509 Opc = RISCV::SH1ADD;
4510 ShiftAmount =
Log2_64(Amount / 3);
4524 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4535 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4545 }
else if (
STI.hasStdExtZmmul()) {
4546 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4555 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
4556 if (Amount & (1U << ShiftAmount)) {
4560 .
addImm(ShiftAmount - PrevShiftAmount)
4562 if (Amount >> (ShiftAmount + 1)) {
4565 Acc =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4576 PrevShiftAmount = ShiftAmount;
4579 assert(Acc &&
"Expected valid accumulator");
4589 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4597 ?
STI.getTailDupAggressiveThreshold()
4604 unsigned Opcode =
MI.getOpcode();
4605 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
4611std::optional<std::pair<unsigned, unsigned>>
4615 return std::nullopt;
4616 case RISCV::PseudoVSPILL2_M1:
4617 case RISCV::PseudoVRELOAD2_M1:
4618 return std::make_pair(2u, 1u);
4619 case RISCV::PseudoVSPILL2_M2:
4620 case RISCV::PseudoVRELOAD2_M2:
4621 return std::make_pair(2u, 2u);
4622 case RISCV::PseudoVSPILL2_M4:
4623 case RISCV::PseudoVRELOAD2_M4:
4624 return std::make_pair(2u, 4u);
4625 case RISCV::PseudoVSPILL3_M1:
4626 case RISCV::PseudoVRELOAD3_M1:
4627 return std::make_pair(3u, 1u);
4628 case RISCV::PseudoVSPILL3_M2:
4629 case RISCV::PseudoVRELOAD3_M2:
4630 return std::make_pair(3u, 2u);
4631 case RISCV::PseudoVSPILL4_M1:
4632 case RISCV::PseudoVRELOAD4_M1:
4633 return std::make_pair(4u, 1u);
4634 case RISCV::PseudoVSPILL4_M2:
4635 case RISCV::PseudoVRELOAD4_M2:
4636 return std::make_pair(4u, 2u);
4637 case RISCV::PseudoVSPILL5_M1:
4638 case RISCV::PseudoVRELOAD5_M1:
4639 return std::make_pair(5u, 1u);
4640 case RISCV::PseudoVSPILL6_M1:
4641 case RISCV::PseudoVRELOAD6_M1:
4642 return std::make_pair(6u, 1u);
4643 case RISCV::PseudoVSPILL7_M1:
4644 case RISCV::PseudoVRELOAD7_M1:
4645 return std::make_pair(7u, 1u);
4646 case RISCV::PseudoVSPILL8_M1:
4647 case RISCV::PseudoVRELOAD8_M1:
4648 return std::make_pair(8u, 1u);
4653 int16_t MI1FrmOpIdx =
4654 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
4655 int16_t MI2FrmOpIdx =
4656 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
4657 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
4664std::optional<unsigned>
4668 return std::nullopt;
4671 case RISCV::VSLL_VX:
4672 case RISCV::VSRL_VX:
4673 case RISCV::VSRA_VX:
4675 case RISCV::VSSRL_VX:
4676 case RISCV::VSSRA_VX:
4678 case RISCV::VROL_VX:
4679 case RISCV::VROR_VX:
4684 case RISCV::VNSRL_WX:
4685 case RISCV::VNSRA_WX:
4687 case RISCV::VNCLIPU_WX:
4688 case RISCV::VNCLIP_WX:
4690 case RISCV::VWSLL_VX:
4695 case RISCV::VADD_VX:
4696 case RISCV::VSUB_VX:
4697 case RISCV::VRSUB_VX:
4699 case RISCV::VWADDU_VX:
4700 case RISCV::VWSUBU_VX:
4701 case RISCV::VWADD_VX:
4702 case RISCV::VWSUB_VX:
4703 case RISCV::VWADDU_WX:
4704 case RISCV::VWSUBU_WX:
4705 case RISCV::VWADD_WX:
4706 case RISCV::VWSUB_WX:
4708 case RISCV::VADC_VXM:
4709 case RISCV::VADC_VIM:
4710 case RISCV::VMADC_VXM:
4711 case RISCV::VMADC_VIM:
4712 case RISCV::VMADC_VX:
4713 case RISCV::VSBC_VXM:
4714 case RISCV::VMSBC_VXM:
4715 case RISCV::VMSBC_VX:
4717 case RISCV::VAND_VX:
4719 case RISCV::VXOR_VX:
4721 case RISCV::VMSEQ_VX:
4722 case RISCV::VMSNE_VX:
4723 case RISCV::VMSLTU_VX:
4724 case RISCV::VMSLT_VX:
4725 case RISCV::VMSLEU_VX:
4726 case RISCV::VMSLE_VX:
4727 case RISCV::VMSGTU_VX:
4728 case RISCV::VMSGT_VX:
4730 case RISCV::VMINU_VX:
4731 case RISCV::VMIN_VX:
4732 case RISCV::VMAXU_VX:
4733 case RISCV::VMAX_VX:
4735 case RISCV::VMUL_VX:
4736 case RISCV::VMULH_VX:
4737 case RISCV::VMULHU_VX:
4738 case RISCV::VMULHSU_VX:
4740 case RISCV::VDIVU_VX:
4741 case RISCV::VDIV_VX:
4742 case RISCV::VREMU_VX:
4743 case RISCV::VREM_VX:
4745 case RISCV::VWMUL_VX:
4746 case RISCV::VWMULU_VX:
4747 case RISCV::VWMULSU_VX:
4749 case RISCV::VMACC_VX:
4750 case RISCV::VNMSAC_VX:
4751 case RISCV::VMADD_VX:
4752 case RISCV::VNMSUB_VX:
4754 case RISCV::VWMACCU_VX:
4755 case RISCV::VWMACC_VX:
4756 case RISCV::VWMACCSU_VX:
4757 case RISCV::VWMACCUS_VX:
4759 case RISCV::VMERGE_VXM:
4761 case RISCV::VMV_V_X:
4763 case RISCV::VSADDU_VX:
4764 case RISCV::VSADD_VX:
4765 case RISCV::VSSUBU_VX:
4766 case RISCV::VSSUB_VX:
4768 case RISCV::VAADDU_VX:
4769 case RISCV::VAADD_VX:
4770 case RISCV::VASUBU_VX:
4771 case RISCV::VASUB_VX:
4773 case RISCV::VSMUL_VX:
4775 case RISCV::VMV_S_X:
4777 case RISCV::VANDN_VX:
4778 return 1U << Log2SEW;
4784 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
4787 return RVV->BaseInstr;
4797 unsigned Scaled = Log2SEW + (DestEEW - 1);
4811 return std::nullopt;
4816 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
4817 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
4818 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
4819 LHS.getReg() == RHS.getReg())
4823 if (LHS.isImm() && LHS.getImm() == 0)
4829 if (!LHSImm || !RHSImm)
4831 return LHSImm <= RHSImm;
4843 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
4845 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
4855 std::optional<bool> createTripCountGreaterCondition(
4856 int TC, MachineBasicBlock &
MBB,
4857 SmallVectorImpl<MachineOperand> &CondParam)
override {
4865 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
4867 void adjustTripCount(
int TripCountAdjust)
override {}
4871std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
4879 if (
TBB == LoopBB && FBB == LoopBB)
4886 assert((
TBB == LoopBB || FBB == LoopBB) &&
4887 "The Loop must be a single-basic-block loop");
4898 if (!Reg.isVirtual())
4900 return MRI.getVRegDef(Reg);
4905 if (LHS && LHS->isPHI())
4907 if (RHS && RHS->isPHI())
4910 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
4916 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
4933 case RISCV::FDIV_H_INX:
4934 case RISCV::FDIV_S_INX:
4935 case RISCV::FDIV_D_INX:
4936 case RISCV::FDIV_D_IN32X:
4937 case RISCV::FSQRT_H:
4938 case RISCV::FSQRT_S:
4939 case RISCV::FSQRT_D:
4940 case RISCV::FSQRT_H_INX:
4941 case RISCV::FSQRT_S_INX:
4942 case RISCV::FSQRT_D_INX:
4943 case RISCV::FSQRT_D_IN32X:
4945 case RISCV::VDIV_VV:
4946 case RISCV::VDIV_VX:
4947 case RISCV::VDIVU_VV:
4948 case RISCV::VDIVU_VX:
4949 case RISCV::VREM_VV:
4950 case RISCV::VREM_VX:
4951 case RISCV::VREMU_VV:
4952 case RISCV::VREMU_VX:
4954 case RISCV::VFDIV_VV:
4955 case RISCV::VFDIV_VF:
4956 case RISCV::VFRDIV_VF:
4957 case RISCV::VFSQRT_V:
4958 case RISCV::VFRSQRT7_V:
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static bool analyzeCandidate(outliner::Candidate &C)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override
RISCVInstrInfo(const RISCVSubtarget &STI)
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getOppositeBranchCondition(CondCode)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
static const MachineMemOperand::Flags MONontemporalBit0
unsigned getDeadRegState(bool B)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
unsigned getKillRegState(bool B)
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.