40#define GEN_CHECK_COMPRESS_INSTR
41#include "RISCVGenCompressInstEmitter.inc"
43#define GET_INSTRINFO_CTOR_DTOR
44#define GET_INSTRINFO_NAMED_OPS
45#include "RISCVGenInstrInfo.inc"
47#define DEBUG_TYPE "riscv-instr-info"
49 "Number of registers within vector register groups spilled");
51 "Number of registers within vector register groups reloaded");
55 cl::desc(
"Prefer whole register move for vector registers."));
58 "riscv-force-machine-combiner-strategy",
cl::Hidden,
59 cl::desc(
"Force machine combiner to use a specific strategy for machine "
60 "trace metrics evaluation."),
65 "MinInstrCount strategy.")));
71#define GET_RISCVVPseudosTable_IMPL
72#include "RISCVGenSearchableTables.inc"
78#define GET_RISCVMaskedPseudosTable_IMPL
79#include "RISCVGenSearchableTables.inc"
87#define GET_INSTRINFO_HELPERS
88#include "RISCVGenInstrInfo.inc"
91 if (
STI.hasStdExtZca())
100 int &FrameIndex)
const {
110 case RISCV::VL1RE8_V:
111 case RISCV::VL1RE16_V:
112 case RISCV::VL1RE32_V:
113 case RISCV::VL1RE64_V:
116 case RISCV::VL2RE8_V:
117 case RISCV::VL2RE16_V:
118 case RISCV::VL2RE32_V:
119 case RISCV::VL2RE64_V:
122 case RISCV::VL4RE8_V:
123 case RISCV::VL4RE16_V:
124 case RISCV::VL4RE32_V:
125 case RISCV::VL4RE64_V:
128 case RISCV::VL8RE8_V:
129 case RISCV::VL8RE16_V:
130 case RISCV::VL8RE32_V:
131 case RISCV::VL8RE64_V:
139 switch (
MI.getOpcode()) {
163 case RISCV::VL1RE8_V:
164 case RISCV::VL2RE8_V:
165 case RISCV::VL4RE8_V:
166 case RISCV::VL8RE8_V:
167 if (!
MI.getOperand(1).isFI())
169 FrameIndex =
MI.getOperand(1).getIndex();
172 return MI.getOperand(0).getReg();
175 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
176 MI.getOperand(2).getImm() == 0) {
177 FrameIndex =
MI.getOperand(1).getIndex();
178 return MI.getOperand(0).getReg();
185 int &FrameIndex)
const {
193 switch (
MI.getOpcode()) {
218 if (!
MI.getOperand(1).isFI())
220 FrameIndex =
MI.getOperand(1).getIndex();
223 return MI.getOperand(0).getReg();
226 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
227 MI.getOperand(2).getImm() == 0) {
228 FrameIndex =
MI.getOperand(1).getIndex();
229 return MI.getOperand(0).getReg();
239 case RISCV::VFMV_V_F:
242 case RISCV::VFMV_S_F:
244 return MI.getOperand(1).isUndef();
252 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
263 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
264 "Unexpected COPY instruction.");
268 bool FoundDef =
false;
269 bool FirstVSetVLI =
false;
270 unsigned FirstSEW = 0;
273 if (
MBBI->isMetaInstruction())
276 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
286 unsigned FirstVType =
MBBI->getOperand(2).getImm();
291 if (FirstLMul != LMul)
296 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
302 unsigned VType =
MBBI->getOperand(2).getImm();
320 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
322 }
else if (
MBBI->getNumDefs()) {
325 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
331 if (!MO.isReg() || !MO.isDef())
333 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
348 if (MO.getReg() != SrcReg)
389 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
390 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
392 assert(!Fractional &&
"It is impossible be fractional lmul here.");
393 unsigned NumRegs = NF * LMulVal;
399 SrcEncoding += NumRegs - 1;
400 DstEncoding += NumRegs - 1;
406 unsigned,
unsigned> {
414 uint16_t Diff = DstEncoding - SrcEncoding;
415 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
416 DstEncoding % 8 == 7)
418 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
419 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
420 DstEncoding % 4 == 3)
422 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
423 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
424 DstEncoding % 2 == 1)
426 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
429 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
434 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
436 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
437 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
439 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
440 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
442 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
445 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
448 while (
I != NumRegs) {
453 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
454 GetCopyInfo(SrcEncoding, DstEncoding);
458 if (LMul == LMulCopied &&
461 if (DefMBBI->getOpcode() == VIOpc)
468 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
470 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
478 MIB = MIB.add(DefMBBI->getOperand(2));
486 MIB.addImm(Log2SEW ? Log2SEW : 3);
498 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
499 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
508 bool RenamableDest,
bool RenamableSrc)
const {
512 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
519 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
525 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
531 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
532 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
533 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
535 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
537 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
541 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
542 .
addReg(EvenReg, KillFlag)
545 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
552 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
553 RISCV::GPRRegClass.
contains(DstReg)) {
555 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
560 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
562 if (
STI.hasStdExtZfh()) {
563 Opc = RISCV::FSGNJ_H;
566 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
567 "Unexpected extensions");
569 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
570 &RISCV::FPR32RegClass);
571 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
572 &RISCV::FPR32RegClass);
573 Opc = RISCV::FSGNJ_S;
577 .
addReg(SrcReg, KillFlag);
581 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
584 .
addReg(SrcReg, KillFlag);
588 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
591 .
addReg(SrcReg, KillFlag);
595 if (RISCV::FPR32RegClass.
contains(DstReg) &&
596 RISCV::GPRRegClass.
contains(SrcReg)) {
598 .
addReg(SrcReg, KillFlag);
602 if (RISCV::GPRRegClass.
contains(DstReg) &&
603 RISCV::FPR32RegClass.
contains(SrcReg)) {
605 .
addReg(SrcReg, KillFlag);
609 if (RISCV::FPR64RegClass.
contains(DstReg) &&
610 RISCV::GPRRegClass.
contains(SrcReg)) {
611 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
613 .
addReg(SrcReg, KillFlag);
617 if (RISCV::GPRRegClass.
contains(DstReg) &&
618 RISCV::FPR64RegClass.
contains(SrcReg)) {
619 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
621 .
addReg(SrcReg, KillFlag);
627 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
638 Register SrcReg,
bool IsKill,
int FI,
647 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
648 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
649 RISCV::SW : RISCV::SD;
650 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
651 Opcode = RISCV::SH_INX;
652 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
653 Opcode = RISCV::SW_INX;
654 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
655 Opcode = RISCV::PseudoRV32ZdinxSD;
656 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
658 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
660 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
662 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
663 Opcode = RISCV::VS1R_V;
664 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
665 Opcode = RISCV::VS2R_V;
666 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
667 Opcode = RISCV::VS4R_V;
668 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
669 Opcode = RISCV::VS8R_V;
670 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
671 Opcode = RISCV::PseudoVSPILL2_M1;
672 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
673 Opcode = RISCV::PseudoVSPILL2_M2;
674 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
675 Opcode = RISCV::PseudoVSPILL2_M4;
676 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
677 Opcode = RISCV::PseudoVSPILL3_M1;
678 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
679 Opcode = RISCV::PseudoVSPILL3_M2;
680 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
681 Opcode = RISCV::PseudoVSPILL4_M1;
682 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
683 Opcode = RISCV::PseudoVSPILL4_M2;
684 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
685 Opcode = RISCV::PseudoVSPILL5_M1;
686 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
687 Opcode = RISCV::PseudoVSPILL6_M1;
688 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
689 Opcode = RISCV::PseudoVSPILL7_M1;
690 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
691 Opcode = RISCV::PseudoVSPILL8_M1;
731 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
732 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
733 RISCV::LW : RISCV::LD;
734 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
735 Opcode = RISCV::LH_INX;
736 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
737 Opcode = RISCV::LW_INX;
738 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
739 Opcode = RISCV::PseudoRV32ZdinxLD;
740 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
742 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
744 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
746 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
747 Opcode = RISCV::VL1RE8_V;
748 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
749 Opcode = RISCV::VL2RE8_V;
750 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
751 Opcode = RISCV::VL4RE8_V;
752 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
753 Opcode = RISCV::VL8RE8_V;
754 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
755 Opcode = RISCV::PseudoVRELOAD2_M1;
756 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
757 Opcode = RISCV::PseudoVRELOAD2_M2;
758 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
759 Opcode = RISCV::PseudoVRELOAD2_M4;
760 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
761 Opcode = RISCV::PseudoVRELOAD3_M1;
762 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
763 Opcode = RISCV::PseudoVRELOAD3_M2;
764 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
765 Opcode = RISCV::PseudoVRELOAD4_M1;
766 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
767 Opcode = RISCV::PseudoVRELOAD4_M2;
768 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
769 Opcode = RISCV::PseudoVRELOAD5_M1;
770 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
771 Opcode = RISCV::PseudoVRELOAD6_M1;
772 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
773 Opcode = RISCV::PseudoVRELOAD7_M1;
774 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
775 Opcode = RISCV::PseudoVRELOAD8_M1;
813 if (
Ops.size() != 1 ||
Ops[0] != 1)
816 switch (
MI.getOpcode()) {
818 if (RISCVInstrInfo::isSEXT_W(
MI))
820 if (RISCVInstrInfo::isZEXT_W(
MI))
822 if (RISCVInstrInfo::isZEXT_B(
MI))
829 case RISCV::ZEXT_H_RV32:
830 case RISCV::ZEXT_H_RV64:
837 case RISCV::VMV_X_S: {
840 if (ST.getXLen() < (1U << Log2SEW))
855 case RISCV::VFMV_F_S: {
882 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
892 bool DstIsDead)
const {
908 bool SrcRenamable =
false;
912 bool LastItem = ++Num == Seq.
size();
917 switch (Inst.getOpndKind()) {
927 .
addReg(SrcReg, SrcRegState)
934 .
addReg(SrcReg, SrcRegState)
935 .
addReg(SrcReg, SrcRegState)
941 .
addReg(SrcReg, SrcRegState)
949 SrcRenamable = DstRenamable;
959 case RISCV::CV_BEQIMM:
961 case RISCV::QC_E_BEQI:
963 case RISCV::NDS_BEQC:
968 case RISCV::QC_E_BNEI:
969 case RISCV::CV_BNEIMM:
971 case RISCV::NDS_BNEC:
975 case RISCV::QC_E_BLTI:
979 case RISCV::QC_E_BGEI:
982 case RISCV::QC_BLTUI:
983 case RISCV::QC_E_BLTUI:
986 case RISCV::QC_BGEUI:
987 case RISCV::QC_E_BGEUI:
1019 "Unknown conditional branch");
1027 switch (SelectOpc) {
1046 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1056 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1061 return RISCV::CV_BEQIMM;
1063 return RISCV::CV_BNEIMM;
1066 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1071 return RISCV::QC_BEQI;
1073 return RISCV::QC_BNEI;
1075 return RISCV::QC_BLTI;
1077 return RISCV::QC_BGEI;
1080 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1085 return RISCV::QC_BLTUI;
1087 return RISCV::QC_BGEUI;
1090 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1095 return RISCV::QC_E_BEQI;
1097 return RISCV::QC_E_BNEI;
1099 return RISCV::QC_E_BLTI;
1101 return RISCV::QC_E_BGEI;
1104 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1109 return RISCV::QC_E_BLTUI;
1111 return RISCV::QC_E_BGEUI;
1114 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1119 return RISCV::NDS_BBC;
1121 return RISCV::NDS_BBS;
1124 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1129 return RISCV::NDS_BEQC;
1131 return RISCV::NDS_BNEC;
1160 bool AllowModify)
const {
1161 TBB = FBB =
nullptr;
1166 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1172 int NumTerminators = 0;
1173 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1176 if (J->getDesc().isUnconditionalBranch() ||
1177 J->getDesc().isIndirectBranch()) {
1184 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1185 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1186 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1189 I = FirstUncondOrIndirectBr;
1193 if (
I->getDesc().isIndirectBranch())
1197 if (
I->isPreISelOpcode())
1201 if (NumTerminators > 2)
1205 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1211 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1217 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1218 I->getDesc().isUnconditionalBranch()) {
1229 int *BytesRemoved)
const {
1236 if (!
I->getDesc().isUnconditionalBranch() &&
1237 !
I->getDesc().isConditionalBranch())
1243 I->eraseFromParent();
1247 if (
I ==
MBB.begin())
1250 if (!
I->getDesc().isConditionalBranch())
1256 I->eraseFromParent();
1269 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1271 "RISC-V branch conditions have two components!");
1305 assert(RS &&
"RegScavenger required for long branching");
1307 "new block should be inserted for expanding unconditional branch");
1310 "restore block should be inserted for restoring clobbered registers");
1319 "Branch offsets outside of the signed 32-bit range not supported");
1324 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1325 auto II =
MBB.end();
1336 if (TmpGPR != RISCV::NoRegister)
1342 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1345 if (FrameIndex == -1)
1350 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1353 MI.getOperand(1).setMBB(&RestoreBB);
1357 TRI->eliminateFrameIndex(RestoreBB.
back(),
1361 MRI.replaceRegWith(ScratchReg, TmpGPR);
1362 MRI.clearVirtRegs();
1367 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1372 Cond[0].setImm(RISCV::BNE);
1375 Cond[0].setImm(RISCV::BNEI);
1378 Cond[0].setImm(RISCV::BEQ);
1381 Cond[0].setImm(RISCV::BEQI);
1384 Cond[0].setImm(RISCV::BGE);
1387 Cond[0].setImm(RISCV::BLT);
1390 Cond[0].setImm(RISCV::BGEU);
1393 Cond[0].setImm(RISCV::BLTU);
1395 case RISCV::CV_BEQIMM:
1396 Cond[0].setImm(RISCV::CV_BNEIMM);
1398 case RISCV::CV_BNEIMM:
1399 Cond[0].setImm(RISCV::CV_BEQIMM);
1401 case RISCV::QC_BEQI:
1402 Cond[0].setImm(RISCV::QC_BNEI);
1404 case RISCV::QC_BNEI:
1405 Cond[0].setImm(RISCV::QC_BEQI);
1407 case RISCV::QC_BGEI:
1408 Cond[0].setImm(RISCV::QC_BLTI);
1410 case RISCV::QC_BLTI:
1411 Cond[0].setImm(RISCV::QC_BGEI);
1413 case RISCV::QC_BGEUI:
1414 Cond[0].setImm(RISCV::QC_BLTUI);
1416 case RISCV::QC_BLTUI:
1417 Cond[0].setImm(RISCV::QC_BGEUI);
1419 case RISCV::QC_E_BEQI:
1420 Cond[0].setImm(RISCV::QC_E_BNEI);
1422 case RISCV::QC_E_BNEI:
1423 Cond[0].setImm(RISCV::QC_E_BEQI);
1425 case RISCV::QC_E_BGEI:
1426 Cond[0].setImm(RISCV::QC_E_BLTI);
1428 case RISCV::QC_E_BLTI:
1429 Cond[0].setImm(RISCV::QC_E_BGEI);
1431 case RISCV::QC_E_BGEUI:
1432 Cond[0].setImm(RISCV::QC_E_BLTUI);
1434 case RISCV::QC_E_BLTUI:
1435 Cond[0].setImm(RISCV::QC_E_BGEUI);
1437 case RISCV::NDS_BBC:
1438 Cond[0].setImm(RISCV::NDS_BBS);
1440 case RISCV::NDS_BBS:
1441 Cond[0].setImm(RISCV::NDS_BBC);
1443 case RISCV::NDS_BEQC:
1444 Cond[0].setImm(RISCV::NDS_BNEC);
1446 case RISCV::NDS_BNEC:
1447 Cond[0].setImm(RISCV::NDS_BEQC);
1457 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1458 MI->getOperand(1).getReg() == RISCV::X0) {
1459 Imm =
MI->getOperand(2).getImm();
1472 if (Reg == RISCV::X0) {
1476 return Reg.isVirtual() &&
isLoadImm(
MRI.getVRegDef(Reg), Imm);
1480 bool IsSigned =
false;
1481 bool IsEquality =
false;
1482 switch (
MI.getOpcode()) {
1518 MI.eraseFromParent();
1544 auto searchConst = [&](int64_t C1) ->
Register {
1546 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1549 I.getOperand(0).getReg().isVirtual();
1552 return DefC1->getOperand(0).getReg();
1565 MRI.hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1567 if (
Register RegZ = searchConst(C0 + 1)) {
1574 MRI.clearKillFlags(RegZ);
1575 MI.eraseFromParent();
1586 MRI.hasOneUse(RHS.getReg())) {
1588 if (
Register RegZ = searchConst(C0 - 1)) {
1595 MRI.clearKillFlags(RegZ);
1596 MI.eraseFromParent();
1606 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1608 int NumOp =
MI.getNumExplicitOperands();
1609 return MI.getOperand(NumOp - 1).getMBB();
1613 int64_t BrOffset)
const {
1614 unsigned XLen =
STI.getXLen();
1621 case RISCV::NDS_BBC:
1622 case RISCV::NDS_BBS:
1623 case RISCV::NDS_BEQC:
1624 case RISCV::NDS_BNEC:
1634 case RISCV::CV_BEQIMM:
1635 case RISCV::CV_BNEIMM:
1636 case RISCV::QC_BEQI:
1637 case RISCV::QC_BNEI:
1638 case RISCV::QC_BGEI:
1639 case RISCV::QC_BLTI:
1640 case RISCV::QC_BLTUI:
1641 case RISCV::QC_BGEUI:
1642 case RISCV::QC_E_BEQI:
1643 case RISCV::QC_E_BNEI:
1644 case RISCV::QC_E_BGEI:
1645 case RISCV::QC_E_BLTI:
1646 case RISCV::QC_E_BLTUI:
1647 case RISCV::QC_E_BGEUI:
1650 case RISCV::PseudoBR:
1652 case RISCV::PseudoJump:
1662 case RISCV::ADD:
return RISCV::PseudoCCADD;
break;
1663 case RISCV::SUB:
return RISCV::PseudoCCSUB;
break;
1664 case RISCV::SLL:
return RISCV::PseudoCCSLL;
break;
1665 case RISCV::SRL:
return RISCV::PseudoCCSRL;
break;
1666 case RISCV::SRA:
return RISCV::PseudoCCSRA;
break;
1667 case RISCV::AND:
return RISCV::PseudoCCAND;
break;
1668 case RISCV::OR:
return RISCV::PseudoCCOR;
break;
1669 case RISCV::XOR:
return RISCV::PseudoCCXOR;
break;
1671 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
break;
1672 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
break;
1673 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
break;
1674 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
break;
1675 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
break;
1676 case RISCV::ORI:
return RISCV::PseudoCCORI;
break;
1677 case RISCV::XORI:
return RISCV::PseudoCCXORI;
break;
1679 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
break;
1680 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
break;
1681 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
break;
1682 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
break;
1683 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
break;
1685 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
break;
1686 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
break;
1687 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
break;
1688 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
break;
1690 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
break;
1691 case RISCV::ORN:
return RISCV::PseudoCCORN;
break;
1692 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
break;
1694 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
break;
1695 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
break;
1698 return RISCV::INSTRUCTION_LIST_END;
1706 if (!
Reg.isVirtual())
1708 if (!
MRI.hasOneNonDBGUse(
Reg))
1717 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1718 MI->getOperand(1).getReg() == RISCV::X0)
1723 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1733 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1736 bool DontMoveAcrossStores =
true;
1737 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1744 unsigned &TrueOp,
unsigned &FalseOp,
1745 bool &Optimizable)
const {
1746 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1747 "Unknown select instruction");
1757 Cond.push_back(
MI.getOperand(1));
1758 Cond.push_back(
MI.getOperand(2));
1759 Cond.push_back(
MI.getOperand(3));
1761 Optimizable =
STI.hasShortForwardBranchOpt();
1768 bool PreferFalse)
const {
1769 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1770 "Unknown select instruction");
1771 if (!
STI.hasShortForwardBranchOpt())
1777 bool Invert = !
DefMI;
1785 Register DestReg =
MI.getOperand(0).getReg();
1787 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1791 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1798 NewMI.
add(
MI.getOperand(1));
1799 NewMI.
add(
MI.getOperand(2));
1808 NewMI.
add(FalseReg);
1823 if (
DefMI->getParent() !=
MI.getParent())
1827 DefMI->eraseFromParent();
1832 if (
MI.isMetaInstruction())
1835 unsigned Opcode =
MI.getOpcode();
1837 if (Opcode == TargetOpcode::INLINEASM ||
1838 Opcode == TargetOpcode::INLINEASM_BR) {
1840 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1844 if (!
MI.memoperands_empty()) {
1847 if (
STI.hasStdExtZca()) {
1848 if (isCompressibleInst(
MI,
STI))
1856 if (Opcode == TargetOpcode::BUNDLE)
1857 return getInstBundleLength(
MI);
1859 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1860 if (isCompressibleInst(
MI,
STI))
1865 case RISCV::PseudoMV_FPR16INX:
1866 case RISCV::PseudoMV_FPR32INX:
1868 return STI.hasStdExtZca() ? 2 : 4;
1869 case TargetOpcode::STACKMAP:
1872 case TargetOpcode::PATCHPOINT:
1875 case TargetOpcode::STATEPOINT: {
1879 return std::max(NumBytes, 8U);
1881 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
1882 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
1883 case TargetOpcode::PATCHABLE_TAIL_CALL: {
1886 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
1887 F.hasFnAttribute(
"patchable-function-entry")) {
1889 if (
F.getFnAttribute(
"patchable-function-entry")
1891 .getAsInteger(10, Num))
1892 return get(Opcode).getSize();
1895 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
1899 return STI.is64Bit() ? 68 : 44;
1902 return get(Opcode).getSize();
1906unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
1910 while (++
I != E &&
I->isInsideBundle()) {
1911 assert(!
I->isBundle() &&
"No nested bundle!");
1918 const unsigned Opcode =
MI.getOpcode();
1922 case RISCV::FSGNJ_D:
1923 case RISCV::FSGNJ_S:
1924 case RISCV::FSGNJ_H:
1925 case RISCV::FSGNJ_D_INX:
1926 case RISCV::FSGNJ_D_IN32X:
1927 case RISCV::FSGNJ_S_INX:
1928 case RISCV::FSGNJ_H_INX:
1930 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1931 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
1935 return (
MI.getOperand(1).isReg() &&
1936 MI.getOperand(1).getReg() == RISCV::X0) ||
1937 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
1939 return MI.isAsCheapAsAMove();
1942std::optional<DestSourcePair>
1946 switch (
MI.getOpcode()) {
1952 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
1953 MI.getOperand(2).isReg())
1955 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
1956 MI.getOperand(1).isReg())
1961 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
1962 MI.getOperand(2).getImm() == 0)
1966 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
1967 MI.getOperand(1).isReg())
1971 case RISCV::SH1ADD_UW:
1973 case RISCV::SH2ADD_UW:
1975 case RISCV::SH3ADD_UW:
1976 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
1977 MI.getOperand(2).isReg())
1980 case RISCV::FSGNJ_D:
1981 case RISCV::FSGNJ_S:
1982 case RISCV::FSGNJ_H:
1983 case RISCV::FSGNJ_D_INX:
1984 case RISCV::FSGNJ_D_IN32X:
1985 case RISCV::FSGNJ_S_INX:
1986 case RISCV::FSGNJ_H_INX:
1988 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1989 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
1993 return std::nullopt;
2001 const auto &SchedModel =
STI.getSchedModel();
2002 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2014 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
2018 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
2019 RISCV::OpName::frm) < 0;
2021 "New instructions require FRM whereas the old one does not have it");
2028 for (
auto *NewMI : InsInstrs) {
2030 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2031 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2073bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2074 bool Invert)
const {
2075#define OPCODE_LMUL_CASE(OPC) \
2076 case RISCV::OPC##_M1: \
2077 case RISCV::OPC##_M2: \
2078 case RISCV::OPC##_M4: \
2079 case RISCV::OPC##_M8: \
2080 case RISCV::OPC##_MF2: \
2081 case RISCV::OPC##_MF4: \
2082 case RISCV::OPC##_MF8
2084#define OPCODE_LMUL_MASK_CASE(OPC) \
2085 case RISCV::OPC##_M1_MASK: \
2086 case RISCV::OPC##_M2_MASK: \
2087 case RISCV::OPC##_M4_MASK: \
2088 case RISCV::OPC##_M8_MASK: \
2089 case RISCV::OPC##_MF2_MASK: \
2090 case RISCV::OPC##_MF4_MASK: \
2091 case RISCV::OPC##_MF8_MASK
2096 Opcode = *InvOpcode;
2113#undef OPCODE_LMUL_MASK_CASE
2114#undef OPCODE_LMUL_CASE
2117bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2124 const TargetRegisterInfo *
TRI =
MRI->getTargetRegisterInfo();
2128 const uint64_t TSFlags =
Desc.TSFlags;
2130 auto checkImmOperand = [&](
unsigned OpIdx) {
2134 auto checkRegOperand = [&](
unsigned OpIdx) {
2142 if (!checkRegOperand(1))
2157 bool SeenMI2 =
false;
2158 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2167 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2168 Register SrcReg = It->getOperand(1).getReg();
2186 if (MI1VReg != SrcReg)
2195 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2234bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2235 bool &Commuted)
const {
2239 "Expect the present of passthrough operand.");
2245 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2246 areRVVInstsReassociable(Inst, *MI2);
2250 return areRVVInstsReassociable(Inst, *MI1) &&
2251 (isVectorAssociativeAndCommutative(*MI1) ||
2252 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2259 if (!isVectorAssociativeAndCommutative(Inst) &&
2260 !isVectorAssociativeAndCommutative(Inst,
true))
2272 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
2274 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
2286 for (
unsigned I = 0;
I < 5; ++
I)
2292 bool &Commuted)
const {
2293 if (isVectorAssociativeAndCommutative(Inst) ||
2294 isVectorAssociativeAndCommutative(Inst,
true))
2295 return hasReassociableVectorSibling(Inst, Commuted);
2301 unsigned OperandIdx = Commuted ? 2 : 1;
2305 int16_t InstFrmOpIdx =
2306 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2307 int16_t SiblingFrmOpIdx =
2308 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2310 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2315 bool Invert)
const {
2316 if (isVectorAssociativeAndCommutative(Inst, Invert))
2324 Opc = *InverseOpcode;
2369std::optional<unsigned>
2371#define RVV_OPC_LMUL_CASE(OPC, INV) \
2372 case RISCV::OPC##_M1: \
2373 return RISCV::INV##_M1; \
2374 case RISCV::OPC##_M2: \
2375 return RISCV::INV##_M2; \
2376 case RISCV::OPC##_M4: \
2377 return RISCV::INV##_M4; \
2378 case RISCV::OPC##_M8: \
2379 return RISCV::INV##_M8; \
2380 case RISCV::OPC##_MF2: \
2381 return RISCV::INV##_MF2; \
2382 case RISCV::OPC##_MF4: \
2383 return RISCV::INV##_MF4; \
2384 case RISCV::OPC##_MF8: \
2385 return RISCV::INV##_MF8
2387#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2388 case RISCV::OPC##_M1_MASK: \
2389 return RISCV::INV##_M1_MASK; \
2390 case RISCV::OPC##_M2_MASK: \
2391 return RISCV::INV##_M2_MASK; \
2392 case RISCV::OPC##_M4_MASK: \
2393 return RISCV::INV##_M4_MASK; \
2394 case RISCV::OPC##_M8_MASK: \
2395 return RISCV::INV##_M8_MASK; \
2396 case RISCV::OPC##_MF2_MASK: \
2397 return RISCV::INV##_MF2_MASK; \
2398 case RISCV::OPC##_MF4_MASK: \
2399 return RISCV::INV##_MF4_MASK; \
2400 case RISCV::OPC##_MF8_MASK: \
2401 return RISCV::INV##_MF8_MASK
2405 return std::nullopt;
2407 return RISCV::FSUB_H;
2409 return RISCV::FSUB_S;
2411 return RISCV::FSUB_D;
2413 return RISCV::FADD_H;
2415 return RISCV::FADD_S;
2417 return RISCV::FADD_D;
2434#undef RVV_OPC_LMUL_MASK_CASE
2435#undef RVV_OPC_LMUL_CASE
2440 bool DoRegPressureReduce) {
2456 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2467 bool DoRegPressureReduce) {
2474 DoRegPressureReduce)) {
2480 DoRegPressureReduce)) {
2490 bool DoRegPressureReduce) {
2498 unsigned CombineOpc) {
2505 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2508 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2519 unsigned OuterShiftAmt) {
2525 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2552 case RISCV::SH1ADD_UW:
2554 case RISCV::SH2ADD_UW:
2556 case RISCV::SH3ADD_UW:
2602 bool DoRegPressureReduce)
const {
2611 DoRegPressureReduce);
2619 return RISCV::FMADD_H;
2621 return RISCV::FMADD_S;
2623 return RISCV::FMADD_D;
2668 bool Mul1IsKill = Mul1.
isKill();
2669 bool Mul2IsKill = Mul2.
isKill();
2670 bool AddendIsKill = Addend.
isKill();
2679 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2704 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2711 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2714 switch (InnerShiftAmt - OuterShiftAmt) {
2718 InnerOpc = RISCV::ADD;
2721 InnerOpc = RISCV::SH1ADD;
2724 InnerOpc = RISCV::SH2ADD;
2727 InnerOpc = RISCV::SH3ADD;
2735 Register NewVR =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2745 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2762 DelInstrs, InstrIdxForVirtReg);
2789 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2790 unsigned OpType = Operand.OperandType;
2795 ErrInfo =
"Expected a non-register operand.";
2799 int64_t Imm = MO.
getImm();
2806#define CASE_OPERAND_UIMM(NUM) \
2807 case RISCVOp::OPERAND_UIMM##NUM: \
2808 Ok = isUInt<NUM>(Imm); \
2810#define CASE_OPERAND_SIMM(NUM) \
2811 case RISCVOp::OPERAND_SIMM##NUM: \
2812 Ok = isInt<NUM>(Imm); \
2844 Ok = (
isUInt<5>(Imm) && (Imm != 0)) || (Imm == 32);
2883 Ok = (
isUInt<5>(Imm) && Imm != 0) || Imm == -1;
2893 Ok = (
isInt<5>(Imm) && Imm != -16) || Imm == 16;
2924 Ok = Ok && Imm != 0;
2928 (Imm >= 0xfffe0 && Imm <= 0xfffff);
2931 Ok = Imm >= 0 && Imm <= 10;
2934 Ok = Imm >= 0 && Imm <= 7;
2937 Ok = Imm >= 1 && Imm <= 10;
2940 Ok = Imm >= 2 && Imm <= 14;
2949 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
2979 ErrInfo =
"Invalid immediate";
2989 if (!
Op.isImm() && !
Op.isReg()) {
2990 ErrInfo =
"Invalid operand type for VL operand";
2993 if (
Op.isReg() &&
Op.getReg() != RISCV::NoRegister) {
2995 auto *RC =
MRI.getRegClass(
Op.getReg());
2996 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
2997 ErrInfo =
"Invalid register class for VL operand";
3002 ErrInfo =
"VL operand w/o SEW operand?";
3008 if (!
MI.getOperand(
OpIdx).isImm()) {
3009 ErrInfo =
"SEW value expected to be an immediate";
3014 ErrInfo =
"Unexpected SEW value";
3017 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3019 ErrInfo =
"Unexpected SEW value";
3025 if (!
MI.getOperand(
OpIdx).isImm()) {
3026 ErrInfo =
"Policy operand expected to be an immediate";
3031 ErrInfo =
"Invalid Policy Value";
3035 ErrInfo =
"policy operand w/o VL operand?";
3043 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3044 ErrInfo =
"policy operand w/o tied operand?";
3051 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3052 ErrInfo =
"dynamic rounding mode should read FRM";
3074 case RISCV::LD_RV32:
3084 case RISCV::SD_RV32:
3100 int64_t NewOffset = OldOffset + Disp;
3122 "Addressing mode not supported for folding");
3196 case RISCV::LD_RV32:
3199 case RISCV::SD_RV32:
3206 OffsetIsScalable =
false;
3222 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3230 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3233 auto Base1 = MO1->getValue();
3234 auto Base2 = MO2->getValue();
3235 if (!Base1 || !Base2)
3243 return Base1 == Base2;
3249 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3250 unsigned NumBytes)
const {
3253 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3258 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3264 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3270 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3320 int64_t OffsetA = 0, OffsetB = 0;
3326 int LowOffset = std::min(OffsetA, OffsetB);
3327 int HighOffset = std::max(OffsetA, OffsetB);
3328 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3330 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3337std::pair<unsigned, unsigned>
3340 return std::make_pair(TF & Mask, TF & ~Mask);
3346 static const std::pair<unsigned, const char *> TargetFlags[] = {
3347 {MO_CALL,
"riscv-call"},
3348 {MO_LO,
"riscv-lo"},
3349 {MO_HI,
"riscv-hi"},
3350 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3351 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3352 {MO_GOT_HI,
"riscv-got-hi"},
3353 {MO_TPREL_LO,
"riscv-tprel-lo"},
3354 {MO_TPREL_HI,
"riscv-tprel-hi"},
3355 {MO_TPREL_ADD,
"riscv-tprel-add"},
3356 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3357 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3358 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3359 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3360 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3361 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3369 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3382 unsigned &Flags)
const {
3401 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3402 F.hasFnAttribute(
"patchable-function-entry");
3407 return MI.readsRegister(RegNo,
TRI) ||
3408 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3413 return MI.modifiesRegister(RegNo,
TRI) ||
3414 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3418 if (!
MBB.back().isReturn())
3441 if (
C.back().isReturn()) {
3443 "The candidate who uses return instruction must be outlined "
3456 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
3459std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3462 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3463 unsigned MinRepeats)
const {
3469 if (RepeatedSequenceLocs.size() < MinRepeats)
3470 return std::nullopt;
3474 unsigned InstrSizeCExt =
3476 unsigned CallOverhead = 0, FrameOverhead = 0;
3483 CallOverhead = 4 + InstrSizeCExt;
3490 FrameOverhead = InstrSizeCExt;
3493 for (
auto &
C : RepeatedSequenceLocs)
3494 C.setCallInfo(MOCI, CallOverhead);
3496 unsigned SequenceSize = 0;
3497 for (
auto &
MI : Candidate)
3500 return std::make_unique<outliner::OutlinedFunction>(
3501 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3507 unsigned Flags)
const {
3511 MBB->getParent()->getSubtarget().getRegisterInfo();
3512 const auto &
F =
MI.getMF()->getFunction();
3515 if (
MI.isCFIInstruction())
3527 for (
const auto &MO :
MI.operands()) {
3532 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3533 F.hasSection() ||
F.getSectionPrefix()))
3551 auto I =
MBB.begin();
3553 for (;
I != E; ++
I) {
3554 if (
I->isCFIInstruction()) {
3555 I->removeFromParent();
3565 MBB.addLiveIn(RISCV::X5);
3580 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3588 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3599 return std::nullopt;
3603 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3604 MI.getOperand(2).isImm())
3605 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3607 return std::nullopt;
3615 std::string GenericComment =
3617 if (!GenericComment.empty())
3618 return GenericComment;
3622 return std::string();
3626 return std::string();
3628 std::string Comment;
3635 switch (OpInfo.OperandType) {
3638 unsigned Imm =
Op.getImm();
3644 unsigned Log2SEW =
Op.getImm();
3645 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3651 unsigned Policy =
Op.getImm();
3653 "Invalid Policy Value");
3663#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3664 RISCV::Pseudo##OP##_##LMUL
3666#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3667 RISCV::Pseudo##OP##_##LMUL##_MASK
3669#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3670 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3671 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3673#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3674 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3675 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3676 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3677 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3678 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3679 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3681#define CASE_RVV_OPCODE_UNMASK(OP) \
3682 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3683 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3685#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3686 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3687 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3688 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3689 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3690 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3691 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3693#define CASE_RVV_OPCODE_MASK(OP) \
3694 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3695 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3697#define CASE_RVV_OPCODE_WIDEN(OP) \
3698 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3699 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3701#define CASE_RVV_OPCODE(OP) \
3702 CASE_RVV_OPCODE_UNMASK(OP): \
3703 case CASE_RVV_OPCODE_MASK(OP)
3707#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3708 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3710#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3711 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3712 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3713 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3714 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3715 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3716 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3717 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3720#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3721 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3723#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3724 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3725 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3726 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3727 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3729#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3730 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3731 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3733#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3734 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3735 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3737#define CASE_VFMA_OPCODE_VV(OP) \
3738 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3739 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3740 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3742#define CASE_VFMA_SPLATS(OP) \
3743 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3744 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3745 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3749 unsigned &SrcOpIdx1,
3750 unsigned &SrcOpIdx2)
const {
3752 if (!
Desc.isCommutable())
3755 switch (
MI.getOpcode()) {
3756 case RISCV::TH_MVEQZ:
3757 case RISCV::TH_MVNEZ:
3761 if (
MI.getOperand(2).getReg() == RISCV::X0)
3764 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3765 case RISCV::TH_MULA:
3766 case RISCV::TH_MULAW:
3767 case RISCV::TH_MULAH:
3768 case RISCV::TH_MULS:
3769 case RISCV::TH_MULSW:
3770 case RISCV::TH_MULSH:
3772 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3773 case RISCV::PseudoCCMOVGPRNoX0:
3774 case RISCV::PseudoCCMOVGPR:
3776 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
3803 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3830 unsigned CommutableOpIdx1 = 1;
3831 unsigned CommutableOpIdx2 = 3;
3832 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3853 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
3855 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
3859 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
3860 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
3866 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
3867 SrcOpIdx2 == CommuteAnyOperandIndex) {
3870 unsigned CommutableOpIdx1 = SrcOpIdx1;
3871 if (SrcOpIdx1 == SrcOpIdx2) {
3874 CommutableOpIdx1 = 1;
3875 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
3877 CommutableOpIdx1 = SrcOpIdx2;
3882 unsigned CommutableOpIdx2;
3883 if (CommutableOpIdx1 != 1) {
3885 CommutableOpIdx2 = 1;
3887 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
3892 if (Op1Reg !=
MI.getOperand(2).getReg())
3893 CommutableOpIdx2 = 2;
3895 CommutableOpIdx2 = 3;
3900 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3913#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
3914 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
3915 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
3918#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
3919 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
3920 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
3921 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
3922 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
3923 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
3924 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
3925 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
3928#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
3929 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
3930 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
3933#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
3934 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
3935 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
3936 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
3937 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
3939#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
3940 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
3941 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
3943#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
3944 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
3945 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
3947#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
3948 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
3949 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
3950 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
3952#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
3953 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
3954 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
3955 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
3961 unsigned OpIdx2)
const {
3964 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
3968 switch (
MI.getOpcode()) {
3969 case RISCV::TH_MVEQZ:
3970 case RISCV::TH_MVNEZ: {
3971 auto &WorkingMI = cloneIfNew(
MI);
3972 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
3973 : RISCV::TH_MVEQZ));
3977 case RISCV::PseudoCCMOVGPRNoX0:
3978 case RISCV::PseudoCCMOVGPR: {
3982 auto &WorkingMI = cloneIfNew(
MI);
3983 WorkingMI.getOperand(3).setImm(CC);
4007 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4008 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
4010 switch (
MI.getOpcode()) {
4033 auto &WorkingMI = cloneIfNew(
MI);
4034 WorkingMI.setDesc(
get(
Opc));
4044 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4047 if (OpIdx1 == 3 || OpIdx2 == 3) {
4049 switch (
MI.getOpcode()) {
4060 auto &WorkingMI = cloneIfNew(
MI);
4061 WorkingMI.setDesc(
get(
Opc));
4073#undef CASE_VMA_CHANGE_OPCODE_COMMON
4074#undef CASE_VMA_CHANGE_OPCODE_LMULS
4075#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4076#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4077#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4078#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4079#undef CASE_VFMA_CHANGE_OPCODE_VV
4080#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4082#undef CASE_RVV_OPCODE_UNMASK_LMUL
4083#undef CASE_RVV_OPCODE_MASK_LMUL
4084#undef CASE_RVV_OPCODE_LMUL
4085#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4086#undef CASE_RVV_OPCODE_UNMASK
4087#undef CASE_RVV_OPCODE_MASK_WIDEN
4088#undef CASE_RVV_OPCODE_MASK
4089#undef CASE_RVV_OPCODE_WIDEN
4090#undef CASE_RVV_OPCODE
4092#undef CASE_VMA_OPCODE_COMMON
4093#undef CASE_VMA_OPCODE_LMULS
4094#undef CASE_VFMA_OPCODE_COMMON
4095#undef CASE_VFMA_OPCODE_LMULS_M1
4096#undef CASE_VFMA_OPCODE_LMULS_MF2
4097#undef CASE_VFMA_OPCODE_LMULS_MF4
4098#undef CASE_VFMA_OPCODE_VV
4099#undef CASE_VFMA_SPLATS
4102 switch (
MI.getOpcode()) {
4110 if (
MI.getOperand(1).getReg() == RISCV::X0)
4111 commuteInstruction(
MI);
4113 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4114 MI.getOperand(2).ChangeToImmediate(0);
4115 MI.setDesc(
get(RISCV::ADDI));
4119 if (
MI.getOpcode() == RISCV::XOR &&
4120 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4121 MI.getOperand(1).setReg(RISCV::X0);
4122 MI.getOperand(2).ChangeToImmediate(0);
4123 MI.setDesc(
get(RISCV::ADDI));
4130 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4131 MI.setDesc(
get(RISCV::ADDI));
4137 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4138 MI.getOperand(2).ChangeToImmediate(0);
4139 MI.setDesc(
get(RISCV::ADDI));
4145 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4146 MI.getOperand(2).ChangeToImmediate(0);
4147 MI.setDesc(
get(RISCV::ADDIW));
4154 if (
MI.getOperand(1).getReg() == RISCV::X0)
4155 commuteInstruction(
MI);
4157 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4158 MI.getOperand(2).ChangeToImmediate(0);
4159 MI.setDesc(
get(RISCV::ADDIW));
4164 case RISCV::SH1ADD_UW:
4166 case RISCV::SH2ADD_UW:
4168 case RISCV::SH3ADD_UW:
4170 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4171 MI.removeOperand(1);
4173 MI.setDesc(
get(RISCV::ADDI));
4177 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4178 MI.removeOperand(2);
4179 unsigned Opc =
MI.getOpcode();
4180 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4181 Opc == RISCV::SH3ADD_UW) {
4183 MI.setDesc(
get(RISCV::SLLI_UW));
4187 MI.setDesc(
get(RISCV::SLLI));
4201 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4202 MI.getOperand(2).getReg() == RISCV::X0) {
4203 MI.getOperand(1).setReg(RISCV::X0);
4204 MI.getOperand(2).ChangeToImmediate(0);
4205 MI.setDesc(
get(RISCV::ADDI));
4211 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4212 MI.getOperand(2).setImm(0);
4213 MI.setDesc(
get(RISCV::ADDI));
4221 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4222 MI.getOperand(2).ChangeToImmediate(0);
4223 MI.setDesc(
get(RISCV::ADDI));
4227 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4228 MI.getOperand(2).ChangeToImmediate(0);
4229 MI.setDesc(
get(RISCV::ADDI));
4237 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4238 MI.getOperand(2).ChangeToImmediate(0);
4239 MI.setDesc(
get(RISCV::ADDI));
4249 case RISCV::SLLI_UW:
4251 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4252 MI.getOperand(2).setImm(0);
4253 MI.setDesc(
get(RISCV::ADDI));
4261 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4262 MI.getOperand(2).getReg() == RISCV::X0) {
4263 MI.getOperand(2).ChangeToImmediate(0);
4264 MI.setDesc(
get(RISCV::ADDI));
4268 if (
MI.getOpcode() == RISCV::ADD_UW &&
4269 MI.getOperand(1).getReg() == RISCV::X0) {
4270 MI.removeOperand(1);
4272 MI.setDesc(
get(RISCV::ADDI));
4278 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4279 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4280 MI.setDesc(
get(RISCV::ADDI));
4286 case RISCV::ZEXT_H_RV32:
4287 case RISCV::ZEXT_H_RV64:
4290 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4292 MI.setDesc(
get(RISCV::ADDI));
4301 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4302 MI.getOperand(2).ChangeToImmediate(0);
4303 MI.setDesc(
get(RISCV::ADDI));
4310 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4312 MI.removeOperand(0);
4313 MI.insert(
MI.operands_begin() + 1, {MO0});
4318 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4320 MI.removeOperand(0);
4321 MI.insert(
MI.operands_begin() + 1, {MO0});
4322 MI.setDesc(
get(RISCV::BNE));
4327 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4329 MI.removeOperand(0);
4330 MI.insert(
MI.operands_begin() + 1, {MO0});
4331 MI.setDesc(
get(RISCV::BEQ));
4339#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4340 RISCV::PseudoV##OP##_##LMUL##_TIED
4342#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4343 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4344 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4345 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4346 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4347 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4348 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4350#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4351 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4352 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4355#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4356 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4357 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4358 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4359 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4360 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4361 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4364#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4365 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4367#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4368 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4369 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4370 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4371 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4372 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4373 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4374 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4375 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4376 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4378#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4379 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4380 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4383#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4384 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4385 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4386 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4387 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4388 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4389 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4390 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4391 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4392 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4399 switch (
MI.getOpcode()) {
4405 MI.getNumExplicitOperands() == 7 &&
4406 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4413 switch (
MI.getOpcode()) {
4423 .
add(
MI.getOperand(0))
4425 .
add(
MI.getOperand(1))
4426 .
add(
MI.getOperand(2))
4427 .
add(
MI.getOperand(3))
4428 .
add(
MI.getOperand(4))
4429 .
add(
MI.getOperand(5))
4430 .
add(
MI.getOperand(6));
4439 MI.getNumExplicitOperands() == 6);
4446 switch (
MI.getOpcode()) {
4458 .
add(
MI.getOperand(0))
4460 .
add(
MI.getOperand(1))
4461 .
add(
MI.getOperand(2))
4462 .
add(
MI.getOperand(3))
4463 .
add(
MI.getOperand(4))
4464 .
add(
MI.getOperand(5));
4471 unsigned NumOps =
MI.getNumOperands();
4474 if (
Op.isReg() &&
Op.isKill())
4482 if (
MI.getOperand(0).isEarlyClobber()) {
4496#undef CASE_WIDEOP_OPCODE_COMMON
4497#undef CASE_WIDEOP_OPCODE_LMULS
4498#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4499#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4500#undef CASE_FP_WIDEOP_OPCODE_COMMON
4501#undef CASE_FP_WIDEOP_OPCODE_LMULS
4502#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4503#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4512 if (ShiftAmount == 0)
4518 }
else if (
STI.hasShlAdd(3) &&
4525 if (Amount % 9 == 0) {
4526 Opc = RISCV::SH3ADD;
4527 ShiftAmount =
Log2_64(Amount / 9);
4528 }
else if (Amount % 5 == 0) {
4529 Opc = RISCV::SH2ADD;
4530 ShiftAmount =
Log2_64(Amount / 5);
4531 }
else if (Amount % 3 == 0) {
4532 Opc = RISCV::SH1ADD;
4533 ShiftAmount =
Log2_64(Amount / 3);
4547 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4558 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4568 }
else if (
STI.hasStdExtZmmul()) {
4569 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4578 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
4579 if (Amount & (1U << ShiftAmount)) {
4583 .
addImm(ShiftAmount - PrevShiftAmount)
4585 if (Amount >> (ShiftAmount + 1)) {
4588 Acc =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4599 PrevShiftAmount = ShiftAmount;
4602 assert(Acc &&
"Expected valid accumulator");
4612 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4620 ?
STI.getTailDupAggressiveThreshold()
4627 unsigned Opcode =
MI.getOpcode();
4628 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
4634std::optional<std::pair<unsigned, unsigned>>
4638 return std::nullopt;
4639 case RISCV::PseudoVSPILL2_M1:
4640 case RISCV::PseudoVRELOAD2_M1:
4641 return std::make_pair(2u, 1u);
4642 case RISCV::PseudoVSPILL2_M2:
4643 case RISCV::PseudoVRELOAD2_M2:
4644 return std::make_pair(2u, 2u);
4645 case RISCV::PseudoVSPILL2_M4:
4646 case RISCV::PseudoVRELOAD2_M4:
4647 return std::make_pair(2u, 4u);
4648 case RISCV::PseudoVSPILL3_M1:
4649 case RISCV::PseudoVRELOAD3_M1:
4650 return std::make_pair(3u, 1u);
4651 case RISCV::PseudoVSPILL3_M2:
4652 case RISCV::PseudoVRELOAD3_M2:
4653 return std::make_pair(3u, 2u);
4654 case RISCV::PseudoVSPILL4_M1:
4655 case RISCV::PseudoVRELOAD4_M1:
4656 return std::make_pair(4u, 1u);
4657 case RISCV::PseudoVSPILL4_M2:
4658 case RISCV::PseudoVRELOAD4_M2:
4659 return std::make_pair(4u, 2u);
4660 case RISCV::PseudoVSPILL5_M1:
4661 case RISCV::PseudoVRELOAD5_M1:
4662 return std::make_pair(5u, 1u);
4663 case RISCV::PseudoVSPILL6_M1:
4664 case RISCV::PseudoVRELOAD6_M1:
4665 return std::make_pair(6u, 1u);
4666 case RISCV::PseudoVSPILL7_M1:
4667 case RISCV::PseudoVRELOAD7_M1:
4668 return std::make_pair(7u, 1u);
4669 case RISCV::PseudoVSPILL8_M1:
4670 case RISCV::PseudoVRELOAD8_M1:
4671 return std::make_pair(8u, 1u);
4676 int16_t MI1FrmOpIdx =
4677 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
4678 int16_t MI2FrmOpIdx =
4679 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
4680 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
4687std::optional<unsigned>
4691 return std::nullopt;
4694 case RISCV::VSLL_VX:
4695 case RISCV::VSRL_VX:
4696 case RISCV::VSRA_VX:
4698 case RISCV::VSSRL_VX:
4699 case RISCV::VSSRA_VX:
4701 case RISCV::VROL_VX:
4702 case RISCV::VROR_VX:
4707 case RISCV::VNSRL_WX:
4708 case RISCV::VNSRA_WX:
4710 case RISCV::VNCLIPU_WX:
4711 case RISCV::VNCLIP_WX:
4713 case RISCV::VWSLL_VX:
4718 case RISCV::VADD_VX:
4719 case RISCV::VSUB_VX:
4720 case RISCV::VRSUB_VX:
4722 case RISCV::VWADDU_VX:
4723 case RISCV::VWSUBU_VX:
4724 case RISCV::VWADD_VX:
4725 case RISCV::VWSUB_VX:
4726 case RISCV::VWADDU_WX:
4727 case RISCV::VWSUBU_WX:
4728 case RISCV::VWADD_WX:
4729 case RISCV::VWSUB_WX:
4731 case RISCV::VADC_VXM:
4732 case RISCV::VADC_VIM:
4733 case RISCV::VMADC_VXM:
4734 case RISCV::VMADC_VIM:
4735 case RISCV::VMADC_VX:
4736 case RISCV::VSBC_VXM:
4737 case RISCV::VMSBC_VXM:
4738 case RISCV::VMSBC_VX:
4740 case RISCV::VAND_VX:
4742 case RISCV::VXOR_VX:
4744 case RISCV::VMSEQ_VX:
4745 case RISCV::VMSNE_VX:
4746 case RISCV::VMSLTU_VX:
4747 case RISCV::VMSLT_VX:
4748 case RISCV::VMSLEU_VX:
4749 case RISCV::VMSLE_VX:
4750 case RISCV::VMSGTU_VX:
4751 case RISCV::VMSGT_VX:
4753 case RISCV::VMINU_VX:
4754 case RISCV::VMIN_VX:
4755 case RISCV::VMAXU_VX:
4756 case RISCV::VMAX_VX:
4758 case RISCV::VMUL_VX:
4759 case RISCV::VMULH_VX:
4760 case RISCV::VMULHU_VX:
4761 case RISCV::VMULHSU_VX:
4763 case RISCV::VDIVU_VX:
4764 case RISCV::VDIV_VX:
4765 case RISCV::VREMU_VX:
4766 case RISCV::VREM_VX:
4768 case RISCV::VWMUL_VX:
4769 case RISCV::VWMULU_VX:
4770 case RISCV::VWMULSU_VX:
4772 case RISCV::VMACC_VX:
4773 case RISCV::VNMSAC_VX:
4774 case RISCV::VMADD_VX:
4775 case RISCV::VNMSUB_VX:
4777 case RISCV::VWMACCU_VX:
4778 case RISCV::VWMACC_VX:
4779 case RISCV::VWMACCSU_VX:
4780 case RISCV::VWMACCUS_VX:
4782 case RISCV::VMERGE_VXM:
4784 case RISCV::VMV_V_X:
4786 case RISCV::VSADDU_VX:
4787 case RISCV::VSADD_VX:
4788 case RISCV::VSSUBU_VX:
4789 case RISCV::VSSUB_VX:
4791 case RISCV::VAADDU_VX:
4792 case RISCV::VAADD_VX:
4793 case RISCV::VASUBU_VX:
4794 case RISCV::VASUB_VX:
4796 case RISCV::VSMUL_VX:
4798 case RISCV::VMV_S_X:
4800 case RISCV::VANDN_VX:
4801 return 1U << Log2SEW;
4807 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
4810 return RVV->BaseInstr;
4820 unsigned Scaled = Log2SEW + (DestEEW - 1);
4834 return std::nullopt;
4839 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
4840 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
4841 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
4842 LHS.getReg() == RHS.getReg())
4846 if (LHS.isImm() && LHS.getImm() == 0)
4852 if (!LHSImm || !RHSImm)
4854 return LHSImm <= RHSImm;
4866 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
4868 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
4878 std::optional<bool> createTripCountGreaterCondition(
4879 int TC, MachineBasicBlock &
MBB,
4880 SmallVectorImpl<MachineOperand> &CondParam)
override {
4888 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
4890 void adjustTripCount(
int TripCountAdjust)
override {}
4894std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
4902 if (
TBB == LoopBB && FBB == LoopBB)
4909 assert((
TBB == LoopBB || FBB == LoopBB) &&
4910 "The Loop must be a single-basic-block loop");
4921 if (!Reg.isVirtual())
4923 return MRI.getVRegDef(Reg);
4928 if (LHS && LHS->isPHI())
4930 if (RHS && RHS->isPHI())
4933 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
4939 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
4956 case RISCV::FDIV_H_INX:
4957 case RISCV::FDIV_S_INX:
4958 case RISCV::FDIV_D_INX:
4959 case RISCV::FDIV_D_IN32X:
4960 case RISCV::FSQRT_H:
4961 case RISCV::FSQRT_S:
4962 case RISCV::FSQRT_D:
4963 case RISCV::FSQRT_H_INX:
4964 case RISCV::FSQRT_S_INX:
4965 case RISCV::FSQRT_D_INX:
4966 case RISCV::FSQRT_D_IN32X:
4968 case RISCV::VDIV_VV:
4969 case RISCV::VDIV_VX:
4970 case RISCV::VDIVU_VV:
4971 case RISCV::VDIVU_VX:
4972 case RISCV::VREM_VV:
4973 case RISCV::VREM_VX:
4974 case RISCV::VREMU_VV:
4975 case RISCV::VREMU_VX:
4977 case RISCV::VFDIV_VV:
4978 case RISCV::VFDIV_VF:
4979 case RISCV::VFRDIV_VF:
4980 case RISCV::VFSQRT_V:
4981 case RISCV::VFRSQRT7_V:
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static bool analyzeCandidate(outliner::Candidate &C)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override
RISCVInstrInfo(const RISCVSubtarget &STI)
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getOppositeBranchCondition(CondCode)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
static const MachineMemOperand::Flags MONontemporalBit0
unsigned getDeadRegState(bool B)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
unsigned getKillRegState(bool B)
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.