40#define GEN_CHECK_COMPRESS_INSTR
41#include "RISCVGenCompressInstEmitter.inc"
43#define GET_INSTRINFO_CTOR_DTOR
44#define GET_INSTRINFO_NAMED_OPS
45#include "RISCVGenInstrInfo.inc"
47#define DEBUG_TYPE "riscv-instr-info"
49 "Number of registers within vector register groups spilled");
51 "Number of registers within vector register groups reloaded");
55 cl::desc(
"Prefer whole register move for vector registers."));
58 "riscv-force-machine-combiner-strategy",
cl::Hidden,
59 cl::desc(
"Force machine combiner to use a specific strategy for machine "
60 "trace metrics evaluation."),
61 cl::init(MachineTraceStrategy::TS_NumStrategies),
64 clEnumValN(MachineTraceStrategy::TS_MinInstrCount,
"min-instr",
65 "MinInstrCount strategy.")));
71#define GET_RISCVVPseudosTable_IMPL
72#include "RISCVGenSearchableTables.inc"
78#define GET_RISCVMaskedPseudosTable_IMPL
79#include "RISCVGenSearchableTables.inc"
87#define GET_INSTRINFO_HELPERS
88#include "RISCVGenInstrInfo.inc"
91 if (
STI.hasStdExtZca())
100 int &FrameIndex)
const {
110 case RISCV::VL1RE8_V:
111 case RISCV::VL1RE16_V:
112 case RISCV::VL1RE32_V:
113 case RISCV::VL1RE64_V:
116 case RISCV::VL2RE8_V:
117 case RISCV::VL2RE16_V:
118 case RISCV::VL2RE32_V:
119 case RISCV::VL2RE64_V:
122 case RISCV::VL4RE8_V:
123 case RISCV::VL4RE16_V:
124 case RISCV::VL4RE32_V:
125 case RISCV::VL4RE64_V:
128 case RISCV::VL8RE8_V:
129 case RISCV::VL8RE16_V:
130 case RISCV::VL8RE32_V:
131 case RISCV::VL8RE64_V:
139 switch (
MI.getOpcode()) {
163 case RISCV::VL1RE8_V:
164 case RISCV::VL2RE8_V:
165 case RISCV::VL4RE8_V:
166 case RISCV::VL8RE8_V:
167 if (!
MI.getOperand(1).isFI())
169 FrameIndex =
MI.getOperand(1).getIndex();
172 return MI.getOperand(0).getReg();
175 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
176 MI.getOperand(2).getImm() == 0) {
177 FrameIndex =
MI.getOperand(1).getIndex();
178 return MI.getOperand(0).getReg();
185 int &FrameIndex)
const {
193 switch (
MI.getOpcode()) {
218 if (!
MI.getOperand(1).isFI())
220 FrameIndex =
MI.getOperand(1).getIndex();
223 return MI.getOperand(0).getReg();
226 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
227 MI.getOperand(2).getImm() == 0) {
228 FrameIndex =
MI.getOperand(1).getIndex();
229 return MI.getOperand(0).getReg();
239 case RISCV::VFMV_V_F:
242 case RISCV::VFMV_S_F:
244 return MI.getOperand(1).isUndef();
252 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
263 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
264 "Unexpected COPY instruction.");
268 bool FoundDef =
false;
269 bool FirstVSetVLI =
false;
270 unsigned FirstSEW = 0;
273 if (
MBBI->isMetaInstruction())
276 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
286 unsigned FirstVType =
MBBI->getOperand(2).getImm();
291 if (FirstLMul != LMul)
296 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
302 unsigned VType =
MBBI->getOperand(2).getImm();
320 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
322 }
else if (
MBBI->getNumDefs()) {
325 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
331 if (!MO.isReg() || !MO.isDef())
333 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
348 if (MO.getReg() != SrcReg)
389 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
390 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
392 assert(!Fractional &&
"It is impossible be fractional lmul here.");
393 unsigned NumRegs = NF * LMulVal;
399 SrcEncoding += NumRegs - 1;
400 DstEncoding += NumRegs - 1;
406 unsigned,
unsigned> {
414 uint16_t Diff = DstEncoding - SrcEncoding;
415 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
416 DstEncoding % 8 == 7)
418 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
419 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
420 DstEncoding % 4 == 3)
422 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
423 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
424 DstEncoding % 2 == 1)
426 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
429 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
434 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
436 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
437 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
439 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
440 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
442 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
445 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
448 while (
I != NumRegs) {
453 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
454 GetCopyInfo(SrcEncoding, DstEncoding);
458 if (LMul == LMulCopied &&
461 if (DefMBBI->getOpcode() == VIOpc)
468 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
470 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
478 MIB = MIB.add(DefMBBI->getOperand(2));
486 MIB.addImm(Log2SEW ? Log2SEW : 3);
498 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
499 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
508 bool RenamableDest,
bool RenamableSrc)
const {
512 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
519 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
525 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
531 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
532 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
533 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
535 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
537 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
541 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
542 .
addReg(EvenReg, KillFlag)
545 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
552 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
553 RISCV::GPRRegClass.
contains(DstReg)) {
555 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
560 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
562 if (
STI.hasStdExtZfh()) {
563 Opc = RISCV::FSGNJ_H;
566 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
567 "Unexpected extensions");
569 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
570 &RISCV::FPR32RegClass);
571 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
572 &RISCV::FPR32RegClass);
573 Opc = RISCV::FSGNJ_S;
577 .
addReg(SrcReg, KillFlag);
581 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
584 .
addReg(SrcReg, KillFlag);
588 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
591 .
addReg(SrcReg, KillFlag);
595 if (RISCV::FPR32RegClass.
contains(DstReg) &&
596 RISCV::GPRRegClass.
contains(SrcReg)) {
598 .
addReg(SrcReg, KillFlag);
602 if (RISCV::GPRRegClass.
contains(DstReg) &&
603 RISCV::FPR32RegClass.
contains(SrcReg)) {
605 .
addReg(SrcReg, KillFlag);
609 if (RISCV::FPR64RegClass.
contains(DstReg) &&
610 RISCV::GPRRegClass.
contains(SrcReg)) {
613 .
addReg(SrcReg, KillFlag);
617 if (RISCV::GPRRegClass.
contains(DstReg) &&
618 RISCV::FPR64RegClass.
contains(SrcReg)) {
621 .
addReg(SrcReg, KillFlag);
627 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
638 Register SrcReg,
bool IsKill,
int FI,
647 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
648 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
649 RISCV::SW : RISCV::SD;
650 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
651 Opcode = RISCV::SH_INX;
652 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
653 Opcode = RISCV::SW_INX;
654 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
655 Opcode = RISCV::PseudoRV32ZdinxSD;
656 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
658 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
660 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
662 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
663 Opcode = RISCV::VS1R_V;
664 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
665 Opcode = RISCV::VS2R_V;
666 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
667 Opcode = RISCV::VS4R_V;
668 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
669 Opcode = RISCV::VS8R_V;
670 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
671 Opcode = RISCV::PseudoVSPILL2_M1;
672 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
673 Opcode = RISCV::PseudoVSPILL2_M2;
674 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
675 Opcode = RISCV::PseudoVSPILL2_M4;
676 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
677 Opcode = RISCV::PseudoVSPILL3_M1;
678 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
679 Opcode = RISCV::PseudoVSPILL3_M2;
680 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
681 Opcode = RISCV::PseudoVSPILL4_M1;
682 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
683 Opcode = RISCV::PseudoVSPILL4_M2;
684 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
685 Opcode = RISCV::PseudoVSPILL5_M1;
686 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
687 Opcode = RISCV::PseudoVSPILL6_M1;
688 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
689 Opcode = RISCV::PseudoVSPILL7_M1;
690 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
691 Opcode = RISCV::PseudoVSPILL8_M1;
731 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
732 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
733 RISCV::LW : RISCV::LD;
734 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
735 Opcode = RISCV::LH_INX;
736 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
737 Opcode = RISCV::LW_INX;
738 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
739 Opcode = RISCV::PseudoRV32ZdinxLD;
740 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
742 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
744 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
746 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
747 Opcode = RISCV::VL1RE8_V;
748 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
749 Opcode = RISCV::VL2RE8_V;
750 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
751 Opcode = RISCV::VL4RE8_V;
752 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
753 Opcode = RISCV::VL8RE8_V;
754 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
755 Opcode = RISCV::PseudoVRELOAD2_M1;
756 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
757 Opcode = RISCV::PseudoVRELOAD2_M2;
758 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
759 Opcode = RISCV::PseudoVRELOAD2_M4;
760 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
761 Opcode = RISCV::PseudoVRELOAD3_M1;
762 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
763 Opcode = RISCV::PseudoVRELOAD3_M2;
764 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
765 Opcode = RISCV::PseudoVRELOAD4_M1;
766 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
767 Opcode = RISCV::PseudoVRELOAD4_M2;
768 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
769 Opcode = RISCV::PseudoVRELOAD5_M1;
770 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
771 Opcode = RISCV::PseudoVRELOAD6_M1;
772 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
773 Opcode = RISCV::PseudoVRELOAD7_M1;
774 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
775 Opcode = RISCV::PseudoVRELOAD8_M1;
813 if (Ops.
size() != 1 || Ops[0] != 1)
816 switch (
MI.getOpcode()) {
818 if (RISCVInstrInfo::isSEXT_W(
MI))
820 if (RISCVInstrInfo::isZEXT_W(
MI))
822 if (RISCVInstrInfo::isZEXT_B(
MI))
829 case RISCV::ZEXT_H_RV32:
830 case RISCV::ZEXT_H_RV64:
837 case RISCV::VMV_X_S: {
840 if (ST.getXLen() < (1U << Log2SEW))
855 case RISCV::VFMV_F_S: {
882 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
892 bool DstIsDead)
const {
898 if (!isUInt<32>(Val))
902 Val = SignExtend64<32>(Val);
908 bool SrcRenamable =
false;
912 bool LastItem = ++Num == Seq.
size();
917 switch (Inst.getOpndKind()) {
927 .
addReg(SrcReg, SrcRegState)
934 .
addReg(SrcReg, SrcRegState)
935 .
addReg(SrcReg, SrcRegState)
941 .
addReg(SrcReg, SrcRegState)
949 SrcRenamable = DstRenamable;
958 case RISCV::CV_BEQIMM:
960 case RISCV::QC_E_BEQI:
962 case RISCV::NDS_BEQC:
966 case RISCV::QC_E_BNEI:
967 case RISCV::CV_BNEIMM:
969 case RISCV::NDS_BNEC:
973 case RISCV::QC_E_BLTI:
977 case RISCV::QC_E_BGEI:
980 case RISCV::QC_BLTUI:
981 case RISCV::QC_E_BLTUI:
984 case RISCV::QC_BGEUI:
985 case RISCV::QC_E_BGEUI:
1017 "Unknown conditional branch");
1025 switch (SelectOpc) {
1044 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1049 return RISCV::CV_BEQIMM;
1051 return RISCV::CV_BNEIMM;
1054 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1059 return RISCV::QC_BEQI;
1061 return RISCV::QC_BNEI;
1063 return RISCV::QC_BLTI;
1065 return RISCV::QC_BGEI;
1068 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1073 return RISCV::QC_BLTUI;
1075 return RISCV::QC_BGEUI;
1078 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1083 return RISCV::QC_E_BEQI;
1085 return RISCV::QC_E_BNEI;
1087 return RISCV::QC_E_BLTI;
1089 return RISCV::QC_E_BGEI;
1092 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1097 return RISCV::QC_E_BLTUI;
1099 return RISCV::QC_E_BGEUI;
1102 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1107 return RISCV::NDS_BBC;
1109 return RISCV::NDS_BBS;
1112 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1117 return RISCV::NDS_BEQC;
1119 return RISCV::NDS_BNEC;
1148 bool AllowModify)
const {
1149 TBB = FBB =
nullptr;
1154 if (
I ==
MBB.
end() || !isUnpredicatedTerminator(*
I))
1160 int NumTerminators = 0;
1161 for (
auto J =
I.getReverse(); J !=
MBB.
rend() && isUnpredicatedTerminator(*J);
1164 if (J->getDesc().isUnconditionalBranch() ||
1165 J->getDesc().isIndirectBranch()) {
1172 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.
end()) {
1173 while (std::next(FirstUncondOrIndirectBr) !=
MBB.
end()) {
1174 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1177 I = FirstUncondOrIndirectBr;
1181 if (
I->getDesc().isIndirectBranch())
1185 if (
I->isPreISelOpcode())
1189 if (NumTerminators > 2)
1193 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1199 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1205 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1206 I->getDesc().isUnconditionalBranch()) {
1217 int *BytesRemoved)
const {
1224 if (!
I->getDesc().isUnconditionalBranch() &&
1225 !
I->getDesc().isConditionalBranch())
1231 I->eraseFromParent();
1238 if (!
I->getDesc().isConditionalBranch())
1244 I->eraseFromParent();
1257 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1259 "RISC-V branch conditions have two components!");
1293 assert(RS &&
"RegScavenger required for long branching");
1295 "new block should be inserted for expanding unconditional branch");
1298 "restore block should be inserted for restoring clobbered registers");
1305 if (!isInt<32>(BrOffset))
1307 "Branch offsets outside of the signed 32-bit range not supported");
1312 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1324 if (TmpGPR != RISCV::NoRegister)
1330 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1333 if (FrameIndex == -1)
1338 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1341 MI.getOperand(1).setMBB(&RestoreBB);
1345 TRI->eliminateFrameIndex(RestoreBB.
back(),
1349 MRI.replaceRegWith(ScratchReg, TmpGPR);
1350 MRI.clearVirtRegs();
1355 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1360 Cond[0].setImm(RISCV::BNE);
1363 Cond[0].setImm(RISCV::BEQ);
1366 Cond[0].setImm(RISCV::BGE);
1369 Cond[0].setImm(RISCV::BLT);
1372 Cond[0].setImm(RISCV::BGEU);
1375 Cond[0].setImm(RISCV::BLTU);
1377 case RISCV::CV_BEQIMM:
1378 Cond[0].setImm(RISCV::CV_BNEIMM);
1380 case RISCV::CV_BNEIMM:
1381 Cond[0].setImm(RISCV::CV_BEQIMM);
1383 case RISCV::QC_BEQI:
1384 Cond[0].setImm(RISCV::QC_BNEI);
1386 case RISCV::QC_BNEI:
1387 Cond[0].setImm(RISCV::QC_BEQI);
1389 case RISCV::QC_BGEI:
1390 Cond[0].setImm(RISCV::QC_BLTI);
1392 case RISCV::QC_BLTI:
1393 Cond[0].setImm(RISCV::QC_BGEI);
1395 case RISCV::QC_BGEUI:
1396 Cond[0].setImm(RISCV::QC_BLTUI);
1398 case RISCV::QC_BLTUI:
1399 Cond[0].setImm(RISCV::QC_BGEUI);
1401 case RISCV::QC_E_BEQI:
1402 Cond[0].setImm(RISCV::QC_E_BNEI);
1404 case RISCV::QC_E_BNEI:
1405 Cond[0].setImm(RISCV::QC_E_BEQI);
1407 case RISCV::QC_E_BGEI:
1408 Cond[0].setImm(RISCV::QC_E_BLTI);
1410 case RISCV::QC_E_BLTI:
1411 Cond[0].setImm(RISCV::QC_E_BGEI);
1413 case RISCV::QC_E_BGEUI:
1414 Cond[0].setImm(RISCV::QC_E_BLTUI);
1416 case RISCV::QC_E_BLTUI:
1417 Cond[0].setImm(RISCV::QC_E_BGEUI);
1419 case RISCV::NDS_BBC:
1420 Cond[0].setImm(RISCV::NDS_BBS);
1422 case RISCV::NDS_BBS:
1423 Cond[0].setImm(RISCV::NDS_BBC);
1425 case RISCV::NDS_BEQC:
1426 Cond[0].setImm(RISCV::NDS_BNEC);
1428 case RISCV::NDS_BNEC:
1429 Cond[0].setImm(RISCV::NDS_BEQC);
1439 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1440 MI->getOperand(1).getReg() == RISCV::X0) {
1441 Imm =
MI->getOperand(2).getImm();
1454 if (Reg == RISCV::X0) {
1458 return Reg.isVirtual() &&
isLoadImm(
MRI.getVRegDef(Reg), Imm);
1462 bool IsSigned =
false;
1463 bool IsEquality =
false;
1464 switch (
MI.getOpcode()) {
1500 MI.eraseFromParent();
1526 auto searchConst = [&](int64_t C1) ->
Register {
1528 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1531 I.getOperand(0).getReg().isVirtual();
1534 return DefC1->getOperand(0).getReg();
1547 MRI.hasOneUse(
LHS.getReg()) && (IsSigned || C0 != -1)) {
1548 assert(isInt<12>(C0) &&
"Unexpected immediate");
1549 if (
Register RegZ = searchConst(C0 + 1)) {
1556 MRI.clearKillFlags(RegZ);
1557 MI.eraseFromParent();
1568 MRI.hasOneUse(
RHS.getReg())) {
1569 assert(isInt<12>(C0) &&
"Unexpected immediate");
1570 if (
Register RegZ = searchConst(C0 - 1)) {
1577 MRI.clearKillFlags(RegZ);
1578 MI.eraseFromParent();
1588 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1590 int NumOp =
MI.getNumExplicitOperands();
1591 return MI.getOperand(NumOp - 1).getMBB();
1595 int64_t BrOffset)
const {
1603 case RISCV::NDS_BBC:
1604 case RISCV::NDS_BBS:
1605 case RISCV::NDS_BEQC:
1606 case RISCV::NDS_BNEC:
1607 return isInt<11>(BrOffset);
1614 case RISCV::CV_BEQIMM:
1615 case RISCV::CV_BNEIMM:
1616 case RISCV::QC_BEQI:
1617 case RISCV::QC_BNEI:
1618 case RISCV::QC_BGEI:
1619 case RISCV::QC_BLTI:
1620 case RISCV::QC_BLTUI:
1621 case RISCV::QC_BGEUI:
1622 case RISCV::QC_E_BEQI:
1623 case RISCV::QC_E_BNEI:
1624 case RISCV::QC_E_BGEI:
1625 case RISCV::QC_E_BLTI:
1626 case RISCV::QC_E_BLTUI:
1627 case RISCV::QC_E_BGEUI:
1628 return isInt<13>(BrOffset);
1630 case RISCV::PseudoBR:
1631 return isInt<21>(BrOffset);
1632 case RISCV::PseudoJump:
1633 return isInt<32>(
SignExtend64(BrOffset + 0x800, XLen));
1642 case RISCV::ADD:
return RISCV::PseudoCCADD;
break;
1643 case RISCV::SUB:
return RISCV::PseudoCCSUB;
break;
1644 case RISCV::SLL:
return RISCV::PseudoCCSLL;
break;
1645 case RISCV::SRL:
return RISCV::PseudoCCSRL;
break;
1646 case RISCV::SRA:
return RISCV::PseudoCCSRA;
break;
1647 case RISCV::AND:
return RISCV::PseudoCCAND;
break;
1648 case RISCV::OR:
return RISCV::PseudoCCOR;
break;
1649 case RISCV::XOR:
return RISCV::PseudoCCXOR;
break;
1651 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
break;
1652 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
break;
1653 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
break;
1654 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
break;
1655 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
break;
1656 case RISCV::ORI:
return RISCV::PseudoCCORI;
break;
1657 case RISCV::XORI:
return RISCV::PseudoCCXORI;
break;
1659 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
break;
1660 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
break;
1661 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
break;
1662 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
break;
1663 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
break;
1665 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
break;
1666 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
break;
1667 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
break;
1668 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
break;
1670 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
break;
1671 case RISCV::ORN:
return RISCV::PseudoCCORN;
break;
1672 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
break;
1674 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
break;
1675 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
break;
1678 return RISCV::INSTRUCTION_LIST_END;
1686 if (!Reg.isVirtual())
1688 if (!
MRI.hasOneNonDBGUse(Reg))
1697 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1698 MI->getOperand(1).getReg() == RISCV::X0)
1703 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1713 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1716 bool DontMoveAcrossStores =
true;
1717 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1724 unsigned &TrueOp,
unsigned &FalseOp,
1725 bool &Optimizable)
const {
1726 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1727 "Unknown select instruction");
1737 Cond.push_back(
MI.getOperand(1));
1738 Cond.push_back(
MI.getOperand(2));
1739 Cond.push_back(
MI.getOperand(3));
1741 Optimizable =
STI.hasShortForwardBranchOpt();
1748 bool PreferFalse)
const {
1749 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1750 "Unknown select instruction");
1751 if (!
STI.hasShortForwardBranchOpt())
1757 bool Invert = !
DefMI;
1765 Register DestReg =
MI.getOperand(0).getReg();
1767 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1771 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1778 NewMI.
add(
MI.getOperand(1));
1779 NewMI.
add(
MI.getOperand(2));
1788 NewMI.
add(FalseReg);
1812 if (
MI.isMetaInstruction())
1815 unsigned Opcode =
MI.getOpcode();
1817 if (Opcode == TargetOpcode::INLINEASM ||
1818 Opcode == TargetOpcode::INLINEASM_BR) {
1820 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1824 if (!
MI.memoperands_empty()) {
1827 if (
STI.hasStdExtZca()) {
1828 if (isCompressibleInst(
MI,
STI))
1836 if (Opcode == TargetOpcode::BUNDLE)
1837 return getInstBundleLength(
MI);
1839 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1840 if (isCompressibleInst(
MI,
STI))
1845 case RISCV::PseudoMV_FPR16INX:
1846 case RISCV::PseudoMV_FPR32INX:
1848 return STI.hasStdExtZca() ? 2 : 4;
1849 case TargetOpcode::STACKMAP:
1852 case TargetOpcode::PATCHPOINT:
1855 case TargetOpcode::STATEPOINT: {
1859 return std::max(NumBytes, 8U);
1861 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
1862 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
1863 case TargetOpcode::PATCHABLE_TAIL_CALL: {
1866 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
1867 F.hasFnAttribute(
"patchable-function-entry")) {
1869 if (
F.getFnAttribute(
"patchable-function-entry")
1871 .getAsInteger(10, Num))
1872 return get(Opcode).getSize();
1875 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
1882 return get(Opcode).getSize();
1886unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
1890 while (++
I != E &&
I->isInsideBundle()) {
1891 assert(!
I->isBundle() &&
"No nested bundle!");
1898 const unsigned Opcode =
MI.getOpcode();
1902 case RISCV::FSGNJ_D:
1903 case RISCV::FSGNJ_S:
1904 case RISCV::FSGNJ_H:
1905 case RISCV::FSGNJ_D_INX:
1906 case RISCV::FSGNJ_D_IN32X:
1907 case RISCV::FSGNJ_S_INX:
1908 case RISCV::FSGNJ_H_INX:
1910 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1911 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
1915 return (
MI.getOperand(1).isReg() &&
1916 MI.getOperand(1).getReg() == RISCV::X0) ||
1917 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
1919 return MI.isAsCheapAsAMove();
1922std::optional<DestSourcePair>
1926 switch (
MI.getOpcode()) {
1932 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
1933 MI.getOperand(2).isReg())
1935 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
1936 MI.getOperand(1).isReg())
1941 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
1942 MI.getOperand(2).getImm() == 0)
1946 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
1947 MI.getOperand(1).isReg())
1951 case RISCV::SH1ADD_UW:
1953 case RISCV::SH2ADD_UW:
1955 case RISCV::SH3ADD_UW:
1956 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
1957 MI.getOperand(2).isReg())
1960 case RISCV::FSGNJ_D:
1961 case RISCV::FSGNJ_S:
1962 case RISCV::FSGNJ_H:
1963 case RISCV::FSGNJ_D_INX:
1964 case RISCV::FSGNJ_D_IN32X:
1965 case RISCV::FSGNJ_S_INX:
1966 case RISCV::FSGNJ_H_INX:
1968 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1969 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
1973 return std::nullopt;
1981 const auto &SchedModel =
STI.getSchedModel();
1982 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
1994 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
1998 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
1999 RISCV::OpName::frm) < 0;
2001 "New instructions require FRM whereas the old one does not have it");
2008 for (
auto *NewMI : InsInstrs) {
2010 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2011 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2053bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2054 bool Invert)
const {
2055#define OPCODE_LMUL_CASE(OPC) \
2056 case RISCV::OPC##_M1: \
2057 case RISCV::OPC##_M2: \
2058 case RISCV::OPC##_M4: \
2059 case RISCV::OPC##_M8: \
2060 case RISCV::OPC##_MF2: \
2061 case RISCV::OPC##_MF4: \
2062 case RISCV::OPC##_MF8
2064#define OPCODE_LMUL_MASK_CASE(OPC) \
2065 case RISCV::OPC##_M1_MASK: \
2066 case RISCV::OPC##_M2_MASK: \
2067 case RISCV::OPC##_M4_MASK: \
2068 case RISCV::OPC##_M8_MASK: \
2069 case RISCV::OPC##_MF2_MASK: \
2070 case RISCV::OPC##_MF4_MASK: \
2071 case RISCV::OPC##_MF8_MASK
2076 Opcode = *InvOpcode;
2093#undef OPCODE_LMUL_MASK_CASE
2094#undef OPCODE_LMUL_CASE
2097bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2110 auto checkImmOperand = [&](
unsigned OpIdx) {
2114 auto checkRegOperand = [&](
unsigned OpIdx) {
2122 if (!checkRegOperand(1))
2137 bool SeenMI2 =
false;
2147 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2148 Register SrcReg = It->getOperand(1).getReg();
2166 if (MI1VReg != SrcReg)
2175 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2214bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2215 bool &Commuted)
const {
2219 "Expect the present of passthrough operand.");
2225 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2226 areRVVInstsReassociable(Inst, *MI2);
2230 return areRVVInstsReassociable(Inst, *MI1) &&
2231 (isVectorAssociativeAndCommutative(*MI1) ||
2232 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2239 if (!isVectorAssociativeAndCommutative(Inst) &&
2240 !isVectorAssociativeAndCommutative(Inst,
true))
2252 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
2254 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
2266 for (
unsigned I = 0;
I < 5; ++
I)
2272 bool &Commuted)
const {
2273 if (isVectorAssociativeAndCommutative(Inst) ||
2274 isVectorAssociativeAndCommutative(Inst,
true))
2275 return hasReassociableVectorSibling(Inst, Commuted);
2281 unsigned OperandIdx = Commuted ? 2 : 1;
2285 int16_t InstFrmOpIdx =
2286 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2287 int16_t SiblingFrmOpIdx =
2288 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2290 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2295 bool Invert)
const {
2296 if (isVectorAssociativeAndCommutative(Inst, Invert))
2304 Opc = *InverseOpcode;
2349std::optional<unsigned>
2351#define RVV_OPC_LMUL_CASE(OPC, INV) \
2352 case RISCV::OPC##_M1: \
2353 return RISCV::INV##_M1; \
2354 case RISCV::OPC##_M2: \
2355 return RISCV::INV##_M2; \
2356 case RISCV::OPC##_M4: \
2357 return RISCV::INV##_M4; \
2358 case RISCV::OPC##_M8: \
2359 return RISCV::INV##_M8; \
2360 case RISCV::OPC##_MF2: \
2361 return RISCV::INV##_MF2; \
2362 case RISCV::OPC##_MF4: \
2363 return RISCV::INV##_MF4; \
2364 case RISCV::OPC##_MF8: \
2365 return RISCV::INV##_MF8
2367#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2368 case RISCV::OPC##_M1_MASK: \
2369 return RISCV::INV##_M1_MASK; \
2370 case RISCV::OPC##_M2_MASK: \
2371 return RISCV::INV##_M2_MASK; \
2372 case RISCV::OPC##_M4_MASK: \
2373 return RISCV::INV##_M4_MASK; \
2374 case RISCV::OPC##_M8_MASK: \
2375 return RISCV::INV##_M8_MASK; \
2376 case RISCV::OPC##_MF2_MASK: \
2377 return RISCV::INV##_MF2_MASK; \
2378 case RISCV::OPC##_MF4_MASK: \
2379 return RISCV::INV##_MF4_MASK; \
2380 case RISCV::OPC##_MF8_MASK: \
2381 return RISCV::INV##_MF8_MASK
2385 return std::nullopt;
2387 return RISCV::FSUB_H;
2389 return RISCV::FSUB_S;
2391 return RISCV::FSUB_D;
2393 return RISCV::FADD_H;
2395 return RISCV::FADD_S;
2397 return RISCV::FADD_D;
2414#undef RVV_OPC_LMUL_MASK_CASE
2415#undef RVV_OPC_LMUL_CASE
2420 bool DoRegPressureReduce) {
2436 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2447 bool DoRegPressureReduce) {
2454 DoRegPressureReduce)) {
2460 DoRegPressureReduce)) {
2470 bool DoRegPressureReduce) {
2478 unsigned CombineOpc) {
2485 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2488 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2499 unsigned OuterShiftAmt) {
2505 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2532 case RISCV::SH1ADD_UW:
2534 case RISCV::SH2ADD_UW:
2536 case RISCV::SH3ADD_UW:
2582 bool DoRegPressureReduce)
const {
2591 DoRegPressureReduce);
2599 return RISCV::FMADD_H;
2601 return RISCV::FMADD_S;
2603 return RISCV::FMADD_D;
2648 bool Mul1IsKill = Mul1.
isKill();
2649 bool Mul2IsKill = Mul2.
isKill();
2650 bool AddendIsKill = Addend.
isKill();
2659 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2684 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2691 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2694 switch (InnerShiftAmt - OuterShiftAmt) {
2698 InnerOpc = RISCV::ADD;
2701 InnerOpc = RISCV::SH1ADD;
2704 InnerOpc = RISCV::SH2ADD;
2707 InnerOpc = RISCV::SH3ADD;
2715 Register NewVR =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2725 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2742 DelInstrs, InstrIdxForVirtReg);
2769 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2770 unsigned OpType = Operand.OperandType;
2775 ErrInfo =
"Expected a non-register operand.";
2779 int64_t Imm = MO.
getImm();
2786#define CASE_OPERAND_UIMM(NUM) \
2787 case RISCVOp::OPERAND_UIMM##NUM: \
2788 Ok = isUInt<NUM>(Imm); \
2790#define CASE_OPERAND_SIMM(NUM) \
2791 case RISCVOp::OPERAND_SIMM##NUM: \
2792 Ok = isInt<NUM>(Imm); \
2812 Ok = isShiftedUInt<1, 1>(Imm);
2815 Ok = isShiftedUInt<4, 1>(Imm);
2818 Ok = isUInt<5>(Imm) && (Imm != 0);
2821 Ok = isUInt<5>(Imm) && (Imm > 3);
2824 Ok = (isUInt<5>(Imm) && (Imm != 0)) || (Imm == 32);
2827 Ok = isShiftedUInt<5, 1>(Imm);
2830 Ok = isShiftedUInt<5, 2>(Imm);
2833 Ok = isShiftedUInt<4, 3>(Imm);
2836 Ok = isShiftedUInt<6, 2>(Imm);
2839 Ok = isShiftedUInt<5, 3>(Imm);
2842 Ok = isUInt<8>(Imm) && Imm >= 32;
2845 Ok = isShiftedUInt<6, 3>(Imm);
2848 Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
2851 Ok = isShiftedUInt<8, 2>(Imm) && (Imm != 0);
2854 Ok = isUInt<16>(Imm) && (Imm != 0);
2870 Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
2873 Ok = isInt<5>(Imm) && (Imm != 0);
2876 Ok = Imm != 0 && isInt<6>(Imm);
2879 Ok = isUInt<10>(Imm);
2882 Ok = isUInt<11>(Imm);
2885 Ok = isShiftedInt<7, 5>(Imm);
2888 Ok = isInt<16>(Imm) && (Imm != 0);
2891 Ok = isInt<20>(Imm);
2894 Ok = isInt<32>(Imm);
2897 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2900 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2901 Ok = Ok && Imm != 0;
2904 Ok = (isUInt<5>(Imm) && Imm != 0) ||
2905 (Imm >= 0xfffe0 && Imm <= 0xfffff);
2908 Ok = Imm >= 0 && Imm <= 10;
2911 Ok = Imm >= 0 && Imm <= 7;
2914 Ok = Imm >= 1 && Imm <= 10;
2917 Ok = Imm >= 2 && Imm <= 14;
2926 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
2950 Ok = isUInt<2>(Imm);
2956 ErrInfo =
"Invalid immediate";
2966 if (!
Op.isImm() && !
Op.isReg()) {
2967 ErrInfo =
"Invalid operand type for VL operand";
2970 if (
Op.isReg() &&
Op.getReg() != RISCV::NoRegister) {
2972 auto *RC =
MRI.getRegClass(
Op.getReg());
2973 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
2974 ErrInfo =
"Invalid register class for VL operand";
2979 ErrInfo =
"VL operand w/o SEW operand?";
2985 if (!
MI.getOperand(
OpIdx).isImm()) {
2986 ErrInfo =
"SEW value expected to be an immediate";
2991 ErrInfo =
"Unexpected SEW value";
2994 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2996 ErrInfo =
"Unexpected SEW value";
3002 if (!
MI.getOperand(
OpIdx).isImm()) {
3003 ErrInfo =
"Policy operand expected to be an immediate";
3008 ErrInfo =
"Invalid Policy Value";
3012 ErrInfo =
"policy operand w/o VL operand?";
3020 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3021 ErrInfo =
"policy operand w/o tied operand?";
3028 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3029 ErrInfo =
"dynamic rounding mode should read FRM";
3051 case RISCV::LD_RV32:
3061 case RISCV::SD_RV32:
3077 int64_t NewOffset = OldOffset + Disp;
3079 NewOffset = SignExtend64<32>(NewOffset);
3081 if (!isInt<12>(NewOffset))
3099 "Addressing mode not supported for folding");
3173 case RISCV::LD_RV32:
3176 case RISCV::SD_RV32:
3183 OffsetIsScalable =
false;
3199 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3207 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3210 auto Base1 = MO1->getValue();
3211 auto Base2 = MO2->getValue();
3212 if (!Base1 || !Base2)
3217 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
3220 return Base1 == Base2;
3226 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3227 unsigned NumBytes)
const {
3230 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3235 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3241 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3247 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3297 int64_t OffsetA = 0, OffsetB = 0;
3303 int LowOffset = std::min(OffsetA, OffsetB);
3304 int HighOffset = std::max(OffsetA, OffsetB);
3305 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3307 LowOffset + (int)LowWidth.
getValue() <= HighOffset)
3314std::pair<unsigned, unsigned>
3317 return std::make_pair(TF & Mask, TF & ~Mask);
3322 using namespace RISCVII;
3323 static const std::pair<unsigned, const char *> TargetFlags[] = {
3324 {MO_CALL,
"riscv-call"},
3325 {MO_LO,
"riscv-lo"},
3326 {MO_HI,
"riscv-hi"},
3327 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3328 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3329 {MO_GOT_HI,
"riscv-got-hi"},
3330 {MO_TPREL_LO,
"riscv-tprel-lo"},
3331 {MO_TPREL_HI,
"riscv-tprel-hi"},
3332 {MO_TPREL_ADD,
"riscv-tprel-add"},
3333 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3334 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3335 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3336 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3337 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3338 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3346 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3359 unsigned &Flags)
const {
3378 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3379 F.hasFnAttribute(
"patchable-function-entry");
3384 return MI.readsRegister(RegNo,
TRI) ||
3385 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3390 return MI.modifiesRegister(RegNo,
TRI) ||
3391 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3418 if (
C.back().isReturn()) {
3420 "The candidate who uses return instruction must be outlined "
3433 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
3436std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3439 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3440 unsigned MinRepeats)
const {
3446 if (RepeatedSequenceLocs.size() < MinRepeats)
3447 return std::nullopt;
3451 unsigned InstrSizeCExt =
3453 unsigned CallOverhead = 0, FrameOverhead = 0;
3460 CallOverhead = 4 + InstrSizeCExt;
3467 FrameOverhead = InstrSizeCExt;
3470 for (
auto &
C : RepeatedSequenceLocs)
3471 C.setCallInfo(MOCI, CallOverhead);
3473 unsigned SequenceSize = 0;
3474 for (
auto &
MI : Candidate)
3477 return std::make_unique<outliner::OutlinedFunction>(
3478 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3484 unsigned Flags)
const {
3489 const auto &
F =
MI.getMF()->getFunction();
3492 if (
MI.isCFIInstruction())
3504 for (
const auto &MO :
MI.operands()) {
3509 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3510 F.hasSection() ||
F.getSectionPrefix()))
3522 bool Changed =
true;
3527 for (;
I != E; ++
I) {
3528 if (
I->isCFIInstruction()) {
3529 I->removeFromParent();
3554 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3562 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3573 return std::nullopt;
3577 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3578 MI.getOperand(2).isImm())
3579 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3581 return std::nullopt;
3589 std::string GenericComment =
3591 if (!GenericComment.empty())
3592 return GenericComment;
3596 return std::string();
3600 return std::string();
3602 std::string Comment;
3612 unsigned Imm =
Op.getImm();
3618 unsigned Log2SEW =
Op.getImm();
3619 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3625 unsigned Policy =
Op.getImm();
3627 "Invalid Policy Value");
3637#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3638 RISCV::Pseudo##OP##_##LMUL
3640#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3641 RISCV::Pseudo##OP##_##LMUL##_MASK
3643#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3644 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3645 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3647#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3648 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3649 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3650 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3651 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3652 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3653 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3655#define CASE_RVV_OPCODE_UNMASK(OP) \
3656 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3657 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3659#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3660 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3661 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3662 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3663 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3664 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3665 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3667#define CASE_RVV_OPCODE_MASK(OP) \
3668 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3669 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3671#define CASE_RVV_OPCODE_WIDEN(OP) \
3672 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3673 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3675#define CASE_RVV_OPCODE(OP) \
3676 CASE_RVV_OPCODE_UNMASK(OP): \
3677 case CASE_RVV_OPCODE_MASK(OP)
3681#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3682 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3684#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3685 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3686 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3687 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3688 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3689 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3690 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3691 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3694#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3695 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3697#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3698 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3699 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3700 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3701 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3703#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3704 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3705 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3707#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3708 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3709 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3711#define CASE_VFMA_OPCODE_VV(OP) \
3712 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3713 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3714 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3716#define CASE_VFMA_SPLATS(OP) \
3717 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3718 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3719 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3723 unsigned &SrcOpIdx1,
3724 unsigned &SrcOpIdx2)
const {
3726 if (!
Desc.isCommutable())
3729 switch (
MI.getOpcode()) {
3730 case RISCV::TH_MVEQZ:
3731 case RISCV::TH_MVNEZ:
3735 if (
MI.getOperand(2).getReg() == RISCV::X0)
3738 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3739 case RISCV::TH_MULA:
3740 case RISCV::TH_MULAW:
3741 case RISCV::TH_MULAH:
3742 case RISCV::TH_MULS:
3743 case RISCV::TH_MULSW:
3744 case RISCV::TH_MULSH:
3746 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3747 case RISCV::PseudoCCMOVGPRNoX0:
3748 case RISCV::PseudoCCMOVGPR:
3750 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
3777 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3804 unsigned CommutableOpIdx1 = 1;
3805 unsigned CommutableOpIdx2 = 3;
3806 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3827 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
3829 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
3833 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
3834 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
3840 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
3841 SrcOpIdx2 == CommuteAnyOperandIndex) {
3844 unsigned CommutableOpIdx1 = SrcOpIdx1;
3845 if (SrcOpIdx1 == SrcOpIdx2) {
3848 CommutableOpIdx1 = 1;
3849 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
3851 CommutableOpIdx1 = SrcOpIdx2;
3856 unsigned CommutableOpIdx2;
3857 if (CommutableOpIdx1 != 1) {
3859 CommutableOpIdx2 = 1;
3861 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
3866 if (Op1Reg !=
MI.getOperand(2).getReg())
3867 CommutableOpIdx2 = 2;
3869 CommutableOpIdx2 = 3;
3874 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3887#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
3888 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
3889 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
3892#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
3893 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
3894 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
3895 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
3896 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
3897 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
3898 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
3899 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
3902#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
3903 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
3904 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
3907#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
3908 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
3909 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
3910 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
3911 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
3913#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
3914 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
3915 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
3917#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
3918 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
3919 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
3921#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
3922 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
3923 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
3924 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
3926#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
3927 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
3928 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
3929 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
3935 unsigned OpIdx2)
const {
3938 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
3942 switch (
MI.getOpcode()) {
3943 case RISCV::TH_MVEQZ:
3944 case RISCV::TH_MVNEZ: {
3945 auto &WorkingMI = cloneIfNew(
MI);
3946 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
3947 : RISCV::TH_MVEQZ));
3951 case RISCV::PseudoCCMOVGPRNoX0:
3952 case RISCV::PseudoCCMOVGPR: {
3956 auto &WorkingMI = cloneIfNew(
MI);
3957 WorkingMI.getOperand(3).setImm(CC);
3981 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
3982 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
3984 switch (
MI.getOpcode()) {
4007 auto &WorkingMI = cloneIfNew(
MI);
4008 WorkingMI.setDesc(
get(
Opc));
4018 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4021 if (OpIdx1 == 3 || OpIdx2 == 3) {
4023 switch (
MI.getOpcode()) {
4034 auto &WorkingMI = cloneIfNew(
MI);
4035 WorkingMI.setDesc(
get(
Opc));
4047#undef CASE_VMA_CHANGE_OPCODE_COMMON
4048#undef CASE_VMA_CHANGE_OPCODE_LMULS
4049#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4050#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4051#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4052#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4053#undef CASE_VFMA_CHANGE_OPCODE_VV
4054#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4056#undef CASE_RVV_OPCODE_UNMASK_LMUL
4057#undef CASE_RVV_OPCODE_MASK_LMUL
4058#undef CASE_RVV_OPCODE_LMUL
4059#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4060#undef CASE_RVV_OPCODE_UNMASK
4061#undef CASE_RVV_OPCODE_MASK_WIDEN
4062#undef CASE_RVV_OPCODE_MASK
4063#undef CASE_RVV_OPCODE_WIDEN
4064#undef CASE_RVV_OPCODE
4066#undef CASE_VMA_OPCODE_COMMON
4067#undef CASE_VMA_OPCODE_LMULS
4068#undef CASE_VFMA_OPCODE_COMMON
4069#undef CASE_VFMA_OPCODE_LMULS_M1
4070#undef CASE_VFMA_OPCODE_LMULS_MF2
4071#undef CASE_VFMA_OPCODE_LMULS_MF4
4072#undef CASE_VFMA_OPCODE_VV
4073#undef CASE_VFMA_SPLATS
4076 switch (
MI.getOpcode()) {
4084 if (
MI.getOperand(1).getReg() == RISCV::X0)
4085 commuteInstruction(
MI);
4087 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4088 MI.getOperand(2).ChangeToImmediate(0);
4089 MI.setDesc(
get(RISCV::ADDI));
4093 if (
MI.getOpcode() == RISCV::XOR &&
4094 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4095 MI.getOperand(1).setReg(RISCV::X0);
4096 MI.getOperand(2).ChangeToImmediate(0);
4097 MI.setDesc(
get(RISCV::ADDI));
4104 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4105 MI.setDesc(
get(RISCV::ADDI));
4111 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4112 MI.getOperand(2).ChangeToImmediate(0);
4113 MI.setDesc(
get(RISCV::ADDI));
4119 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4120 MI.getOperand(2).ChangeToImmediate(0);
4121 MI.setDesc(
get(RISCV::ADDIW));
4128 if (
MI.getOperand(1).getReg() == RISCV::X0)
4129 commuteInstruction(
MI);
4131 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4132 MI.getOperand(2).ChangeToImmediate(0);
4133 MI.setDesc(
get(RISCV::ADDIW));
4138 case RISCV::SH1ADD_UW:
4140 case RISCV::SH2ADD_UW:
4142 case RISCV::SH3ADD_UW:
4144 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4145 MI.removeOperand(1);
4147 MI.setDesc(
get(RISCV::ADDI));
4151 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4152 MI.removeOperand(2);
4153 unsigned Opc =
MI.getOpcode();
4154 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4155 Opc == RISCV::SH3ADD_UW) {
4157 MI.setDesc(
get(RISCV::SLLI_UW));
4161 MI.setDesc(
get(RISCV::SLLI));
4175 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4176 MI.getOperand(2).getReg() == RISCV::X0) {
4177 MI.getOperand(1).setReg(RISCV::X0);
4178 MI.getOperand(2).ChangeToImmediate(0);
4179 MI.setDesc(
get(RISCV::ADDI));
4185 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4186 MI.getOperand(2).setImm(0);
4187 MI.setDesc(
get(RISCV::ADDI));
4195 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4196 MI.getOperand(2).ChangeToImmediate(0);
4197 MI.setDesc(
get(RISCV::ADDI));
4201 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4202 MI.getOperand(2).ChangeToImmediate(0);
4203 MI.setDesc(
get(RISCV::ADDI));
4211 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4212 MI.getOperand(2).ChangeToImmediate(0);
4213 MI.setDesc(
get(RISCV::ADDI));
4223 case RISCV::SLLI_UW:
4225 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4226 MI.getOperand(2).setImm(0);
4227 MI.setDesc(
get(RISCV::ADDI));
4235 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4236 MI.getOperand(2).getReg() == RISCV::X0) {
4237 MI.getOperand(2).ChangeToImmediate(0);
4238 MI.setDesc(
get(RISCV::ADDI));
4242 if (
MI.getOpcode() == RISCV::ADD_UW &&
4243 MI.getOperand(1).getReg() == RISCV::X0) {
4244 MI.removeOperand(1);
4246 MI.setDesc(
get(RISCV::ADDI));
4252 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4253 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4254 MI.setDesc(
get(RISCV::ADDI));
4260 case RISCV::ZEXT_H_RV32:
4261 case RISCV::ZEXT_H_RV64:
4264 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4266 MI.setDesc(
get(RISCV::ADDI));
4275 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4276 MI.getOperand(2).ChangeToImmediate(0);
4277 MI.setDesc(
get(RISCV::ADDI));
4284 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4286 MI.removeOperand(0);
4287 MI.insert(
MI.operands_begin() + 1, {MO0});
4292 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4294 MI.removeOperand(0);
4295 MI.insert(
MI.operands_begin() + 1, {MO0});
4296 MI.setDesc(
get(RISCV::BNE));
4301 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4303 MI.removeOperand(0);
4304 MI.insert(
MI.operands_begin() + 1, {MO0});
4305 MI.setDesc(
get(RISCV::BEQ));
4313#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4314 RISCV::PseudoV##OP##_##LMUL##_TIED
4316#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4317 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4318 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4319 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4320 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4321 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4322 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4324#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4325 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4326 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4329#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4330 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4331 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4332 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4333 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4334 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4335 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4338#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4339 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4341#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4342 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4343 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4344 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4345 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4346 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4347 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4348 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4349 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4350 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4352#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4353 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4354 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4357#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4358 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4359 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4360 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4361 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4362 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4363 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4364 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4365 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4366 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4373 switch (
MI.getOpcode()) {
4379 MI.getNumExplicitOperands() == 7 &&
4380 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4387 switch (
MI.getOpcode()) {
4397 .
add(
MI.getOperand(0))
4399 .
add(
MI.getOperand(1))
4400 .
add(
MI.getOperand(2))
4401 .
add(
MI.getOperand(3))
4402 .
add(
MI.getOperand(4))
4403 .
add(
MI.getOperand(5))
4404 .
add(
MI.getOperand(6));
4413 MI.getNumExplicitOperands() == 6);
4420 switch (
MI.getOpcode()) {
4432 .
add(
MI.getOperand(0))
4434 .
add(
MI.getOperand(1))
4435 .
add(
MI.getOperand(2))
4436 .
add(
MI.getOperand(3))
4437 .
add(
MI.getOperand(4))
4438 .
add(
MI.getOperand(5));
4445 unsigned NumOps =
MI.getNumOperands();
4446 for (
unsigned I = 1;
I < NumOps; ++
I) {
4448 if (
Op.isReg() &&
Op.isKill())
4456 if (
MI.getOperand(0).isEarlyClobber()) {
4462 if (S->
end ==
Idx.getRegSlot(
true))
4463 S->
end =
Idx.getRegSlot();
4470#undef CASE_WIDEOP_OPCODE_COMMON
4471#undef CASE_WIDEOP_OPCODE_LMULS
4472#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4473#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4474#undef CASE_FP_WIDEOP_OPCODE_COMMON
4475#undef CASE_FP_WIDEOP_OPCODE_LMULS
4476#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4477#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4484 if (llvm::has_single_bit<uint32_t>(Amount)) {
4486 if (ShiftAmount == 0)
4492 }
else if (
STI.hasStdExtZba() &&
4499 if (Amount % 9 == 0) {
4500 Opc = RISCV::SH3ADD;
4501 ShiftAmount =
Log2_64(Amount / 9);
4502 }
else if (Amount % 5 == 0) {
4503 Opc = RISCV::SH2ADD;
4504 ShiftAmount =
Log2_64(Amount / 5);
4505 }
else if (Amount % 3 == 0) {
4506 Opc = RISCV::SH1ADD;
4507 ShiftAmount =
Log2_64(Amount / 3);
4520 }
else if (llvm::has_single_bit<uint32_t>(Amount - 1)) {
4521 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4531 }
else if (llvm::has_single_bit<uint32_t>(Amount + 1)) {
4532 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4542 }
else if (
STI.hasStdExtZmmul()) {
4543 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4552 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
4553 if (Amount & (1U << ShiftAmount)) {
4557 .
addImm(ShiftAmount - PrevShiftAmount)
4559 if (Amount >> (ShiftAmount + 1)) {
4562 Acc =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4573 PrevShiftAmount = ShiftAmount;
4576 assert(Acc &&
"Expected valid accumulator");
4586 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4601 unsigned Opcode =
MI.getOpcode();
4602 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
4608std::optional<std::pair<unsigned, unsigned>>
4612 return std::nullopt;
4613 case RISCV::PseudoVSPILL2_M1:
4614 case RISCV::PseudoVRELOAD2_M1:
4615 return std::make_pair(2u, 1u);
4616 case RISCV::PseudoVSPILL2_M2:
4617 case RISCV::PseudoVRELOAD2_M2:
4618 return std::make_pair(2u, 2u);
4619 case RISCV::PseudoVSPILL2_M4:
4620 case RISCV::PseudoVRELOAD2_M4:
4621 return std::make_pair(2u, 4u);
4622 case RISCV::PseudoVSPILL3_M1:
4623 case RISCV::PseudoVRELOAD3_M1:
4624 return std::make_pair(3u, 1u);
4625 case RISCV::PseudoVSPILL3_M2:
4626 case RISCV::PseudoVRELOAD3_M2:
4627 return std::make_pair(3u, 2u);
4628 case RISCV::PseudoVSPILL4_M1:
4629 case RISCV::PseudoVRELOAD4_M1:
4630 return std::make_pair(4u, 1u);
4631 case RISCV::PseudoVSPILL4_M2:
4632 case RISCV::PseudoVRELOAD4_M2:
4633 return std::make_pair(4u, 2u);
4634 case RISCV::PseudoVSPILL5_M1:
4635 case RISCV::PseudoVRELOAD5_M1:
4636 return std::make_pair(5u, 1u);
4637 case RISCV::PseudoVSPILL6_M1:
4638 case RISCV::PseudoVRELOAD6_M1:
4639 return std::make_pair(6u, 1u);
4640 case RISCV::PseudoVSPILL7_M1:
4641 case RISCV::PseudoVRELOAD7_M1:
4642 return std::make_pair(7u, 1u);
4643 case RISCV::PseudoVSPILL8_M1:
4644 case RISCV::PseudoVRELOAD8_M1:
4645 return std::make_pair(8u, 1u);
4650 int16_t MI1FrmOpIdx =
4651 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
4652 int16_t MI2FrmOpIdx =
4653 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
4654 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
4661std::optional<unsigned>
4665 return std::nullopt;
4668 case RISCV::VSLL_VX:
4669 case RISCV::VSRL_VX:
4670 case RISCV::VSRA_VX:
4672 case RISCV::VSSRL_VX:
4673 case RISCV::VSSRA_VX:
4675 case RISCV::VROL_VX:
4676 case RISCV::VROR_VX:
4681 case RISCV::VNSRL_WX:
4682 case RISCV::VNSRA_WX:
4684 case RISCV::VNCLIPU_WX:
4685 case RISCV::VNCLIP_WX:
4687 case RISCV::VWSLL_VX:
4692 case RISCV::VADD_VX:
4693 case RISCV::VSUB_VX:
4694 case RISCV::VRSUB_VX:
4696 case RISCV::VWADDU_VX:
4697 case RISCV::VWSUBU_VX:
4698 case RISCV::VWADD_VX:
4699 case RISCV::VWSUB_VX:
4700 case RISCV::VWADDU_WX:
4701 case RISCV::VWSUBU_WX:
4702 case RISCV::VWADD_WX:
4703 case RISCV::VWSUB_WX:
4705 case RISCV::VADC_VXM:
4706 case RISCV::VADC_VIM:
4707 case RISCV::VMADC_VXM:
4708 case RISCV::VMADC_VIM:
4709 case RISCV::VMADC_VX:
4710 case RISCV::VSBC_VXM:
4711 case RISCV::VMSBC_VXM:
4712 case RISCV::VMSBC_VX:
4714 case RISCV::VAND_VX:
4716 case RISCV::VXOR_VX:
4718 case RISCV::VMSEQ_VX:
4719 case RISCV::VMSNE_VX:
4720 case RISCV::VMSLTU_VX:
4721 case RISCV::VMSLT_VX:
4722 case RISCV::VMSLEU_VX:
4723 case RISCV::VMSLE_VX:
4724 case RISCV::VMSGTU_VX:
4725 case RISCV::VMSGT_VX:
4727 case RISCV::VMINU_VX:
4728 case RISCV::VMIN_VX:
4729 case RISCV::VMAXU_VX:
4730 case RISCV::VMAX_VX:
4732 case RISCV::VMUL_VX:
4733 case RISCV::VMULH_VX:
4734 case RISCV::VMULHU_VX:
4735 case RISCV::VMULHSU_VX:
4737 case RISCV::VDIVU_VX:
4738 case RISCV::VDIV_VX:
4739 case RISCV::VREMU_VX:
4740 case RISCV::VREM_VX:
4742 case RISCV::VWMUL_VX:
4743 case RISCV::VWMULU_VX:
4744 case RISCV::VWMULSU_VX:
4746 case RISCV::VMACC_VX:
4747 case RISCV::VNMSAC_VX:
4748 case RISCV::VMADD_VX:
4749 case RISCV::VNMSUB_VX:
4751 case RISCV::VWMACCU_VX:
4752 case RISCV::VWMACC_VX:
4753 case RISCV::VWMACCSU_VX:
4754 case RISCV::VWMACCUS_VX:
4756 case RISCV::VMERGE_VXM:
4758 case RISCV::VMV_V_X:
4760 case RISCV::VSADDU_VX:
4761 case RISCV::VSADD_VX:
4762 case RISCV::VSSUBU_VX:
4763 case RISCV::VSSUB_VX:
4765 case RISCV::VAADDU_VX:
4766 case RISCV::VAADD_VX:
4767 case RISCV::VASUBU_VX:
4768 case RISCV::VASUB_VX:
4770 case RISCV::VSMUL_VX:
4772 case RISCV::VMV_S_X:
4774 case RISCV::VANDN_VX:
4775 return 1U << Log2SEW;
4781 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
4784 return RVV->BaseInstr;
4794 unsigned Scaled = Log2SEW + (DestEEW - 1);
4801 if (
LHS.isReg() &&
RHS.isReg() &&
LHS.getReg().isVirtual() &&
4802 LHS.getReg() ==
RHS.getReg())
4806 if (
LHS.isImm() &&
LHS.getImm() == 0)
4810 if (!
LHS.isImm() || !
RHS.isImm())
4812 return LHS.getImm() <=
RHS.getImm();
4826 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
4836 std::optional<bool> createTripCountGreaterCondition(
4848 void adjustTripCount(
int TripCountAdjust)
override {}
4852std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
4860 if (
TBB == LoopBB && FBB == LoopBB)
4867 assert((
TBB == LoopBB || FBB == LoopBB) &&
4868 "The Loop must be a single-basic-block loop");
4879 if (!Reg.isVirtual())
4881 return MRI.getVRegDef(Reg);
4891 return std::make_unique<RISCVPipelinerLoopInfo>(
LHS,
RHS,
Cond);
4897 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
4914 case RISCV::FDIV_H_INX:
4915 case RISCV::FDIV_S_INX:
4916 case RISCV::FDIV_D_INX:
4917 case RISCV::FDIV_D_IN32X:
4918 case RISCV::FSQRT_H:
4919 case RISCV::FSQRT_S:
4920 case RISCV::FSQRT_D:
4921 case RISCV::FSQRT_H_INX:
4922 case RISCV::FSQRT_S_INX:
4923 case RISCV::FSQRT_D_INX:
4924 case RISCV::FSQRT_D_IN32X:
4926 case RISCV::VDIV_VV:
4927 case RISCV::VDIV_VX:
4928 case RISCV::VDIVU_VV:
4929 case RISCV::VDIVU_VX:
4930 case RISCV::VREM_VV:
4931 case RISCV::VREM_VX:
4932 case RISCV::VREMU_VV:
4933 case RISCV::VREMU_VX:
4935 case RISCV::VFDIV_VV:
4936 case RISCV::VFDIV_VF:
4937 case RISCV::VFRDIV_VF:
4938 case RISCV::VFSQRT_V:
4939 case RISCV::VFRSQRT7_V:
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
Register const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static bool analyzeCandidate(outliner::Candidate &C)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
static unsigned getSize(unsigned Kind)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
This class represents an Operation in the Expression.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
uint8_t OperandType
Information about the type of the operand.
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
unsigned pred_size() const
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
LLVM_ABI iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
Instructions::const_iterator const_instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
RISCVInstrInfo(RISCVSubtarget &STI)
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
unsigned getTailDupAggressiveThreshold() const
const RISCVRegisterInfo * getRegisterInfo() const override
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getOppositeBranchCondition(CondCode)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
static const MachineMemOperand::Flags MONontemporalBit0
unsigned getDeadRegState(bool B)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
CodeGenOptLevel
Code generation optimization level.
unsigned getKillRegState(bool B)
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Description of the encoding of one expression Op.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.