41#define GEN_CHECK_COMPRESS_INSTR
42#include "RISCVGenCompressInstEmitter.inc"
44#define GET_INSTRINFO_CTOR_DTOR
45#define GET_INSTRINFO_NAMED_OPS
46#include "RISCVGenInstrInfo.inc"
48#define DEBUG_TYPE "riscv-instr-info"
50 "Number of registers within vector register groups spilled");
52 "Number of registers within vector register groups reloaded");
56 cl::desc(
"Prefer whole register move for vector registers."));
59 "riscv-force-machine-combiner-strategy",
cl::Hidden,
60 cl::desc(
"Force machine combiner to use a specific strategy for machine "
61 "trace metrics evaluation."),
66 "MinInstrCount strategy.")));
72#define GET_RISCVVPseudosTable_IMPL
73#include "RISCVGenSearchableTables.inc"
79#define GET_RISCVMaskedPseudosTable_IMPL
80#include "RISCVGenSearchableTables.inc"
86 RISCV::ADJCALLSTACKUP),
89#define GET_INSTRINFO_HELPERS
90#include "RISCVGenInstrInfo.inc"
93 if (
STI.hasStdExtZca())
102 int &FrameIndex)
const {
112 case RISCV::VL1RE8_V:
113 case RISCV::VL1RE16_V:
114 case RISCV::VL1RE32_V:
115 case RISCV::VL1RE64_V:
118 case RISCV::VL2RE8_V:
119 case RISCV::VL2RE16_V:
120 case RISCV::VL2RE32_V:
121 case RISCV::VL2RE64_V:
124 case RISCV::VL4RE8_V:
125 case RISCV::VL4RE16_V:
126 case RISCV::VL4RE32_V:
127 case RISCV::VL4RE64_V:
130 case RISCV::VL8RE8_V:
131 case RISCV::VL8RE16_V:
132 case RISCV::VL8RE32_V:
133 case RISCV::VL8RE64_V:
141 switch (
MI.getOpcode()) {
165 case RISCV::VL1RE8_V:
166 case RISCV::VL2RE8_V:
167 case RISCV::VL4RE8_V:
168 case RISCV::VL8RE8_V:
169 if (!
MI.getOperand(1).isFI())
171 FrameIndex =
MI.getOperand(1).getIndex();
174 return MI.getOperand(0).getReg();
177 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
178 MI.getOperand(2).getImm() == 0) {
179 FrameIndex =
MI.getOperand(1).getIndex();
180 return MI.getOperand(0).getReg();
187 int &FrameIndex)
const {
195 switch (
MI.getOpcode()) {
220 if (!
MI.getOperand(1).isFI())
222 FrameIndex =
MI.getOperand(1).getIndex();
225 return MI.getOperand(0).getReg();
228 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
229 MI.getOperand(2).getImm() == 0) {
230 FrameIndex =
MI.getOperand(1).getIndex();
231 return MI.getOperand(0).getReg();
241 case RISCV::VFMV_V_F:
244 case RISCV::VFMV_S_F:
246 return MI.getOperand(1).isUndef();
254 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
265 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
266 "Unexpected COPY instruction.");
270 bool FoundDef =
false;
271 bool FirstVSetVLI =
false;
272 unsigned FirstSEW = 0;
275 if (
MBBI->isMetaInstruction())
278 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
288 unsigned FirstVType =
MBBI->getOperand(2).getImm();
293 if (FirstLMul != LMul)
298 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
304 unsigned VType =
MBBI->getOperand(2).getImm();
322 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
324 }
else if (
MBBI->getNumDefs()) {
327 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
333 if (!MO.isReg() || !MO.isDef())
335 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
350 if (MO.getReg() != SrcReg)
391 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
392 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
394 assert(!Fractional &&
"It is impossible be fractional lmul here.");
395 unsigned NumRegs = NF * LMulVal;
401 SrcEncoding += NumRegs - 1;
402 DstEncoding += NumRegs - 1;
408 unsigned,
unsigned> {
416 uint16_t Diff = DstEncoding - SrcEncoding;
417 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
418 DstEncoding % 8 == 7)
420 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
421 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
422 DstEncoding % 4 == 3)
424 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
425 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
426 DstEncoding % 2 == 1)
428 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
431 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
436 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
438 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
439 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
441 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
442 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
444 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
447 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
450 while (
I != NumRegs) {
455 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
456 GetCopyInfo(SrcEncoding, DstEncoding);
460 if (LMul == LMulCopied &&
463 if (DefMBBI->getOpcode() == VIOpc)
470 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
472 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
480 MIB = MIB.add(DefMBBI->getOperand(2));
488 MIB.addImm(Log2SEW ? Log2SEW : 3);
500 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
501 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
510 bool RenamableDest,
bool RenamableSrc)
const {
514 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
521 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
527 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
533 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
534 if (
STI.isRV32() &&
STI.hasStdExtZdinx()) {
543 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
544 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
546 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
548 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
552 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
553 .
addReg(EvenReg, KillFlag)
556 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
563 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
564 RISCV::GPRRegClass.
contains(DstReg)) {
566 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
571 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
573 if (
STI.hasStdExtZfh()) {
574 Opc = RISCV::FSGNJ_H;
577 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
578 "Unexpected extensions");
580 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
581 &RISCV::FPR32RegClass);
582 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
583 &RISCV::FPR32RegClass);
584 Opc = RISCV::FSGNJ_S;
588 .
addReg(SrcReg, KillFlag);
592 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
595 .
addReg(SrcReg, KillFlag);
599 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
602 .
addReg(SrcReg, KillFlag);
606 if (RISCV::FPR32RegClass.
contains(DstReg) &&
607 RISCV::GPRRegClass.
contains(SrcReg)) {
609 .
addReg(SrcReg, KillFlag);
613 if (RISCV::GPRRegClass.
contains(DstReg) &&
614 RISCV::FPR32RegClass.
contains(SrcReg)) {
616 .
addReg(SrcReg, KillFlag);
620 if (RISCV::FPR64RegClass.
contains(DstReg) &&
621 RISCV::GPRRegClass.
contains(SrcReg)) {
622 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
624 .
addReg(SrcReg, KillFlag);
628 if (RISCV::GPRRegClass.
contains(DstReg) &&
629 RISCV::FPR64RegClass.
contains(SrcReg)) {
630 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
632 .
addReg(SrcReg, KillFlag);
638 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
649 Register SrcReg,
bool IsKill,
int FI,
657 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
658 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW
660 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
661 Opcode = RISCV::SH_INX;
662 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
663 Opcode = RISCV::SW_INX;
664 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
665 Opcode = RISCV::PseudoRV32ZdinxSD;
666 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
668 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
670 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
672 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
673 Opcode = RISCV::VS1R_V;
674 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
675 Opcode = RISCV::VS2R_V;
676 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
677 Opcode = RISCV::VS4R_V;
678 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
679 Opcode = RISCV::VS8R_V;
680 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
681 Opcode = RISCV::PseudoVSPILL2_M1;
682 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
683 Opcode = RISCV::PseudoVSPILL2_M2;
684 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
685 Opcode = RISCV::PseudoVSPILL2_M4;
686 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
687 Opcode = RISCV::PseudoVSPILL3_M1;
688 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
689 Opcode = RISCV::PseudoVSPILL3_M2;
690 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
691 Opcode = RISCV::PseudoVSPILL4_M1;
692 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
693 Opcode = RISCV::PseudoVSPILL4_M2;
694 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
695 Opcode = RISCV::PseudoVSPILL5_M1;
696 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
697 Opcode = RISCV::PseudoVSPILL6_M1;
698 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
699 Opcode = RISCV::PseudoVSPILL7_M1;
700 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
701 Opcode = RISCV::PseudoVSPILL8_M1;
743 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
744 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW
746 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
747 Opcode = RISCV::LH_INX;
748 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
749 Opcode = RISCV::LW_INX;
750 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
751 Opcode = RISCV::PseudoRV32ZdinxLD;
752 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
754 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
756 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
758 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
759 Opcode = RISCV::VL1RE8_V;
760 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
761 Opcode = RISCV::VL2RE8_V;
762 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
763 Opcode = RISCV::VL4RE8_V;
764 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
765 Opcode = RISCV::VL8RE8_V;
766 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
767 Opcode = RISCV::PseudoVRELOAD2_M1;
768 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
769 Opcode = RISCV::PseudoVRELOAD2_M2;
770 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
771 Opcode = RISCV::PseudoVRELOAD2_M4;
772 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
773 Opcode = RISCV::PseudoVRELOAD3_M1;
774 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
775 Opcode = RISCV::PseudoVRELOAD3_M2;
776 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
777 Opcode = RISCV::PseudoVRELOAD4_M1;
778 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
779 Opcode = RISCV::PseudoVRELOAD4_M2;
780 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
781 Opcode = RISCV::PseudoVRELOAD5_M1;
782 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
783 Opcode = RISCV::PseudoVRELOAD6_M1;
784 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
785 Opcode = RISCV::PseudoVRELOAD7_M1;
786 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
787 Opcode = RISCV::PseudoVRELOAD8_M1;
825 if (
Ops.size() != 1 ||
Ops[0] != 1)
828 switch (
MI.getOpcode()) {
830 if (RISCVInstrInfo::isSEXT_W(
MI))
832 if (RISCVInstrInfo::isZEXT_W(
MI))
834 if (RISCVInstrInfo::isZEXT_B(
MI))
841 case RISCV::ZEXT_H_RV32:
842 case RISCV::ZEXT_H_RV64:
849 case RISCV::VMV_X_S: {
852 if (ST.getXLen() < (1U << Log2SEW))
867 case RISCV::VFMV_F_S: {
894 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
904 bool DstIsDead)
const {
920 bool SrcRenamable =
false;
924 bool LastItem = ++Num == Seq.
size();
929 switch (Inst.getOpndKind()) {
939 .
addReg(SrcReg, SrcRegState)
946 .
addReg(SrcReg, SrcRegState)
947 .
addReg(SrcReg, SrcRegState)
953 .
addReg(SrcReg, SrcRegState)
961 SrcRenamable = DstRenamable;
971 case RISCV::CV_BEQIMM:
973 case RISCV::QC_E_BEQI:
975 case RISCV::NDS_BEQC:
980 case RISCV::QC_E_BNEI:
981 case RISCV::CV_BNEIMM:
983 case RISCV::NDS_BNEC:
987 case RISCV::QC_E_BLTI:
991 case RISCV::QC_E_BGEI:
994 case RISCV::QC_BLTUI:
995 case RISCV::QC_E_BLTUI:
998 case RISCV::QC_BGEUI:
999 case RISCV::QC_E_BGEUI:
1031 "Unknown conditional branch");
1042 case RISCV::QC_MVEQ:
1043 return RISCV::QC_MVNE;
1044 case RISCV::QC_MVNE:
1045 return RISCV::QC_MVEQ;
1046 case RISCV::QC_MVLT:
1047 return RISCV::QC_MVGE;
1048 case RISCV::QC_MVGE:
1049 return RISCV::QC_MVLT;
1050 case RISCV::QC_MVLTU:
1051 return RISCV::QC_MVGEU;
1052 case RISCV::QC_MVGEU:
1053 return RISCV::QC_MVLTU;
1054 case RISCV::QC_MVEQI:
1055 return RISCV::QC_MVNEI;
1056 case RISCV::QC_MVNEI:
1057 return RISCV::QC_MVEQI;
1058 case RISCV::QC_MVLTI:
1059 return RISCV::QC_MVGEI;
1060 case RISCV::QC_MVGEI:
1061 return RISCV::QC_MVLTI;
1062 case RISCV::QC_MVLTUI:
1063 return RISCV::QC_MVGEUI;
1064 case RISCV::QC_MVGEUI:
1065 return RISCV::QC_MVLTUI;
1070 switch (SelectOpc) {
1089 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1099 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1104 return RISCV::CV_BEQIMM;
1106 return RISCV::CV_BNEIMM;
1109 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1114 return RISCV::QC_BEQI;
1116 return RISCV::QC_BNEI;
1118 return RISCV::QC_BLTI;
1120 return RISCV::QC_BGEI;
1123 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1128 return RISCV::QC_BLTUI;
1130 return RISCV::QC_BGEUI;
1133 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1138 return RISCV::QC_E_BEQI;
1140 return RISCV::QC_E_BNEI;
1142 return RISCV::QC_E_BLTI;
1144 return RISCV::QC_E_BGEI;
1147 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1152 return RISCV::QC_E_BLTUI;
1154 return RISCV::QC_E_BGEUI;
1157 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1162 return RISCV::NDS_BBC;
1164 return RISCV::NDS_BBS;
1167 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1172 return RISCV::NDS_BEQC;
1174 return RISCV::NDS_BNEC;
1203 bool AllowModify)
const {
1204 TBB = FBB =
nullptr;
1209 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1215 int NumTerminators = 0;
1216 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1219 if (J->getDesc().isUnconditionalBranch() ||
1220 J->getDesc().isIndirectBranch()) {
1227 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1228 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1229 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1232 I = FirstUncondOrIndirectBr;
1236 if (
I->getDesc().isIndirectBranch())
1240 if (
I->isPreISelOpcode())
1244 if (NumTerminators > 2)
1248 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1254 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1260 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1261 I->getDesc().isUnconditionalBranch()) {
1272 int *BytesRemoved)
const {
1279 if (!
I->getDesc().isUnconditionalBranch() &&
1280 !
I->getDesc().isConditionalBranch())
1286 I->eraseFromParent();
1290 if (
I ==
MBB.begin())
1293 if (!
I->getDesc().isConditionalBranch())
1299 I->eraseFromParent();
1312 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1314 "RISC-V branch conditions have two components!");
1348 assert(RS &&
"RegScavenger required for long branching");
1350 "new block should be inserted for expanding unconditional branch");
1353 "restore block should be inserted for restoring clobbered registers");
1362 "Branch offsets outside of the signed 32-bit range not supported");
1367 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1368 auto II =
MBB.end();
1374 RS->enterBasicBlockEnd(
MBB);
1376 if (
STI.hasStdExtZicfilp())
1377 RC = &RISCV::GPRX7RegClass;
1379 RS->scavengeRegisterBackwards(*RC,
MI.getIterator(),
1383 RS->setRegUsed(TmpGPR);
1388 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1390 if (
STI.hasStdExtZicfilp())
1394 if (FrameIndex == -1)
1399 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1402 MI.getOperand(1).setMBB(&RestoreBB);
1406 TRI->eliminateFrameIndex(RestoreBB.
back(),
1410 MRI.replaceRegWith(ScratchReg, TmpGPR);
1411 MRI.clearVirtRegs();
1416 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1421 Cond[0].setImm(RISCV::BNE);
1424 Cond[0].setImm(RISCV::BNEI);
1427 Cond[0].setImm(RISCV::BEQ);
1430 Cond[0].setImm(RISCV::BEQI);
1433 Cond[0].setImm(RISCV::BGE);
1436 Cond[0].setImm(RISCV::BLT);
1439 Cond[0].setImm(RISCV::BGEU);
1442 Cond[0].setImm(RISCV::BLTU);
1444 case RISCV::CV_BEQIMM:
1445 Cond[0].setImm(RISCV::CV_BNEIMM);
1447 case RISCV::CV_BNEIMM:
1448 Cond[0].setImm(RISCV::CV_BEQIMM);
1450 case RISCV::QC_BEQI:
1451 Cond[0].setImm(RISCV::QC_BNEI);
1453 case RISCV::QC_BNEI:
1454 Cond[0].setImm(RISCV::QC_BEQI);
1456 case RISCV::QC_BGEI:
1457 Cond[0].setImm(RISCV::QC_BLTI);
1459 case RISCV::QC_BLTI:
1460 Cond[0].setImm(RISCV::QC_BGEI);
1462 case RISCV::QC_BGEUI:
1463 Cond[0].setImm(RISCV::QC_BLTUI);
1465 case RISCV::QC_BLTUI:
1466 Cond[0].setImm(RISCV::QC_BGEUI);
1468 case RISCV::QC_E_BEQI:
1469 Cond[0].setImm(RISCV::QC_E_BNEI);
1471 case RISCV::QC_E_BNEI:
1472 Cond[0].setImm(RISCV::QC_E_BEQI);
1474 case RISCV::QC_E_BGEI:
1475 Cond[0].setImm(RISCV::QC_E_BLTI);
1477 case RISCV::QC_E_BLTI:
1478 Cond[0].setImm(RISCV::QC_E_BGEI);
1480 case RISCV::QC_E_BGEUI:
1481 Cond[0].setImm(RISCV::QC_E_BLTUI);
1483 case RISCV::QC_E_BLTUI:
1484 Cond[0].setImm(RISCV::QC_E_BGEUI);
1486 case RISCV::NDS_BBC:
1487 Cond[0].setImm(RISCV::NDS_BBS);
1489 case RISCV::NDS_BBS:
1490 Cond[0].setImm(RISCV::NDS_BBC);
1492 case RISCV::NDS_BEQC:
1493 Cond[0].setImm(RISCV::NDS_BNEC);
1495 case RISCV::NDS_BNEC:
1496 Cond[0].setImm(RISCV::NDS_BEQC);
1506 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1507 MI->getOperand(1).getReg() == RISCV::X0) {
1508 Imm =
MI->getOperand(2).getImm();
1521 if (Reg == RISCV::X0) {
1525 return Reg.isVirtual() &&
isLoadImm(
MRI.getVRegDef(Reg), Imm);
1529 bool IsSigned =
false;
1530 bool IsEquality =
false;
1531 switch (
MI.getOpcode()) {
1567 MI.eraseFromParent();
1593 auto searchConst = [&](int64_t C1) ->
Register {
1595 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1598 I.getOperand(0).getReg().isVirtual();
1601 return DefC1->getOperand(0).getReg();
1614 MRI.hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1616 if (
Register RegZ = searchConst(C0 + 1)) {
1623 MRI.clearKillFlags(RegZ);
1624 MI.eraseFromParent();
1635 MRI.hasOneUse(RHS.getReg())) {
1637 if (
Register RegZ = searchConst(C0 - 1)) {
1644 MRI.clearKillFlags(RegZ);
1645 MI.eraseFromParent();
1655 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1657 int NumOp =
MI.getNumExplicitOperands();
1658 return MI.getOperand(NumOp - 1).getMBB();
1662 int64_t BrOffset)
const {
1663 unsigned XLen =
STI.getXLen();
1670 case RISCV::NDS_BBC:
1671 case RISCV::NDS_BBS:
1672 case RISCV::NDS_BEQC:
1673 case RISCV::NDS_BNEC:
1683 case RISCV::CV_BEQIMM:
1684 case RISCV::CV_BNEIMM:
1685 case RISCV::QC_BEQI:
1686 case RISCV::QC_BNEI:
1687 case RISCV::QC_BGEI:
1688 case RISCV::QC_BLTI:
1689 case RISCV::QC_BLTUI:
1690 case RISCV::QC_BGEUI:
1691 case RISCV::QC_E_BEQI:
1692 case RISCV::QC_E_BNEI:
1693 case RISCV::QC_E_BGEI:
1694 case RISCV::QC_E_BLTI:
1695 case RISCV::QC_E_BLTUI:
1696 case RISCV::QC_E_BGEUI:
1699 case RISCV::PseudoBR:
1701 case RISCV::PseudoJump:
1712 case RISCV::ADD:
return RISCV::PseudoCCADD;
1713 case RISCV::SUB:
return RISCV::PseudoCCSUB;
1714 case RISCV::SLL:
return RISCV::PseudoCCSLL;
1715 case RISCV::SRL:
return RISCV::PseudoCCSRL;
1716 case RISCV::SRA:
return RISCV::PseudoCCSRA;
1717 case RISCV::AND:
return RISCV::PseudoCCAND;
1718 case RISCV::OR:
return RISCV::PseudoCCOR;
1719 case RISCV::XOR:
return RISCV::PseudoCCXOR;
1720 case RISCV::MAX:
return RISCV::PseudoCCMAX;
1721 case RISCV::MAXU:
return RISCV::PseudoCCMAXU;
1722 case RISCV::MIN:
return RISCV::PseudoCCMIN;
1723 case RISCV::MINU:
return RISCV::PseudoCCMINU;
1724 case RISCV::MUL:
return RISCV::PseudoCCMUL;
1725 case RISCV::LUI:
return RISCV::PseudoCCLUI;
1726 case RISCV::QC_LI:
return RISCV::PseudoCCQC_LI;
1727 case RISCV::QC_E_LI:
return RISCV::PseudoCCQC_E_LI;
1729 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
1730 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
1731 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
1732 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
1733 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
1734 case RISCV::ORI:
return RISCV::PseudoCCORI;
1735 case RISCV::XORI:
return RISCV::PseudoCCXORI;
1737 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
1738 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
1739 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
1740 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
1741 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
1743 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
1744 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
1745 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
1746 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
1748 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
1749 case RISCV::ORN:
return RISCV::PseudoCCORN;
1750 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
1752 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
1753 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
1757 return RISCV::INSTRUCTION_LIST_END;
1766 if (!
Reg.isVirtual())
1768 if (!
MRI.hasOneNonDBGUse(
Reg))
1774 if (!STI.hasShortForwardBranchIMinMax() &&
1775 (
MI->getOpcode() == RISCV::MAX ||
MI->getOpcode() == RISCV::MIN ||
1776 MI->getOpcode() == RISCV::MINU ||
MI->getOpcode() == RISCV::MAXU))
1779 if (!STI.hasShortForwardBranchIMul() &&
MI->getOpcode() == RISCV::MUL)
1786 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1787 MI->getOperand(1).getReg() == RISCV::X0)
1792 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1802 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1805 bool DontMoveAcrossStores =
true;
1806 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1813 unsigned &TrueOp,
unsigned &FalseOp,
1814 bool &Optimizable)
const {
1815 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1816 "Unknown select instruction");
1826 Cond.push_back(
MI.getOperand(1));
1827 Cond.push_back(
MI.getOperand(2));
1828 Cond.push_back(
MI.getOperand(3));
1830 Optimizable =
STI.hasShortForwardBranchIALU();
1837 bool PreferFalse)
const {
1838 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1839 "Unknown select instruction");
1840 if (!
STI.hasShortForwardBranchIALU())
1846 bool Invert = !
DefMI;
1854 Register DestReg =
MI.getOperand(0).getReg();
1856 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1860 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1867 NewMI.
add(
MI.getOperand(1));
1868 NewMI.
add(
MI.getOperand(2));
1877 NewMI.
add(FalseReg);
1892 if (
DefMI->getParent() !=
MI.getParent())
1896 DefMI->eraseFromParent();
1901 if (
MI.isMetaInstruction())
1904 unsigned Opcode =
MI.getOpcode();
1906 if (Opcode == TargetOpcode::INLINEASM ||
1907 Opcode == TargetOpcode::INLINEASM_BR) {
1909 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1913 if (!
MI.memoperands_empty()) {
1916 if (
STI.hasStdExtZca()) {
1917 if (isCompressibleInst(
MI,
STI))
1925 if (Opcode == TargetOpcode::BUNDLE)
1926 return getInstBundleLength(
MI);
1928 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1929 if (isCompressibleInst(
MI,
STI))
1934 case RISCV::PseudoMV_FPR16INX:
1935 case RISCV::PseudoMV_FPR32INX:
1937 return STI.hasStdExtZca() ? 2 : 4;
1938 case TargetOpcode::STACKMAP:
1941 case TargetOpcode::PATCHPOINT:
1944 case TargetOpcode::STATEPOINT: {
1948 return std::max(NumBytes, 8U);
1950 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
1951 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
1952 case TargetOpcode::PATCHABLE_TAIL_CALL: {
1955 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
1956 F.hasFnAttribute(
"patchable-function-entry")) {
1958 if (
F.getFnAttribute(
"patchable-function-entry")
1960 .getAsInteger(10, Num))
1961 return get(Opcode).getSize();
1964 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
1968 return STI.is64Bit() ? 68 : 44;
1971 return get(Opcode).getSize();
1975unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
1979 while (++
I != E &&
I->isInsideBundle()) {
1980 assert(!
I->isBundle() &&
"No nested bundle!");
1987 const unsigned Opcode =
MI.getOpcode();
1991 case RISCV::FSGNJ_D:
1992 case RISCV::FSGNJ_S:
1993 case RISCV::FSGNJ_H:
1994 case RISCV::FSGNJ_D_INX:
1995 case RISCV::FSGNJ_D_IN32X:
1996 case RISCV::FSGNJ_S_INX:
1997 case RISCV::FSGNJ_H_INX:
1999 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2000 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
2004 return (
MI.getOperand(1).isReg() &&
2005 MI.getOperand(1).getReg() == RISCV::X0) ||
2006 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
2008 return MI.isAsCheapAsAMove();
2011std::optional<DestSourcePair>
2015 switch (
MI.getOpcode()) {
2021 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2022 MI.getOperand(2).isReg())
2024 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2025 MI.getOperand(1).isReg())
2030 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
2031 MI.getOperand(2).getImm() == 0)
2035 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2036 MI.getOperand(1).isReg())
2040 case RISCV::SH1ADD_UW:
2042 case RISCV::SH2ADD_UW:
2044 case RISCV::SH3ADD_UW:
2045 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2046 MI.getOperand(2).isReg())
2049 case RISCV::FSGNJ_D:
2050 case RISCV::FSGNJ_S:
2051 case RISCV::FSGNJ_H:
2052 case RISCV::FSGNJ_D_INX:
2053 case RISCV::FSGNJ_D_IN32X:
2054 case RISCV::FSGNJ_S_INX:
2055 case RISCV::FSGNJ_H_INX:
2057 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2058 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
2062 return std::nullopt;
2070 const auto &SchedModel =
STI.getSchedModel();
2071 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2083 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
2087 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
2088 RISCV::OpName::frm) < 0;
2090 "New instructions require FRM whereas the old one does not have it");
2097 for (
auto *NewMI : InsInstrs) {
2099 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2100 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2142bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2143 bool Invert)
const {
2144#define OPCODE_LMUL_CASE(OPC) \
2145 case RISCV::OPC##_M1: \
2146 case RISCV::OPC##_M2: \
2147 case RISCV::OPC##_M4: \
2148 case RISCV::OPC##_M8: \
2149 case RISCV::OPC##_MF2: \
2150 case RISCV::OPC##_MF4: \
2151 case RISCV::OPC##_MF8
2153#define OPCODE_LMUL_MASK_CASE(OPC) \
2154 case RISCV::OPC##_M1_MASK: \
2155 case RISCV::OPC##_M2_MASK: \
2156 case RISCV::OPC##_M4_MASK: \
2157 case RISCV::OPC##_M8_MASK: \
2158 case RISCV::OPC##_MF2_MASK: \
2159 case RISCV::OPC##_MF4_MASK: \
2160 case RISCV::OPC##_MF8_MASK
2165 Opcode = *InvOpcode;
2182#undef OPCODE_LMUL_MASK_CASE
2183#undef OPCODE_LMUL_CASE
2186bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2193 const TargetRegisterInfo *
TRI =
MRI->getTargetRegisterInfo();
2197 const uint64_t TSFlags =
Desc.TSFlags;
2199 auto checkImmOperand = [&](
unsigned OpIdx) {
2203 auto checkRegOperand = [&](
unsigned OpIdx) {
2211 if (!checkRegOperand(1))
2226 bool SeenMI2 =
false;
2227 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2236 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2237 Register SrcReg = It->getOperand(1).getReg();
2255 if (MI1VReg != SrcReg)
2264 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2303bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2304 bool &Commuted)
const {
2308 "Expect the present of passthrough operand.");
2314 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2315 areRVVInstsReassociable(Inst, *MI2);
2319 return areRVVInstsReassociable(Inst, *MI1) &&
2320 (isVectorAssociativeAndCommutative(*MI1) ||
2321 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2328 if (!isVectorAssociativeAndCommutative(Inst) &&
2329 !isVectorAssociativeAndCommutative(Inst,
true))
2341 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
2343 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
2355 for (
unsigned I = 0;
I < 5; ++
I)
2361 bool &Commuted)
const {
2362 if (isVectorAssociativeAndCommutative(Inst) ||
2363 isVectorAssociativeAndCommutative(Inst,
true))
2364 return hasReassociableVectorSibling(Inst, Commuted);
2370 unsigned OperandIdx = Commuted ? 2 : 1;
2374 int16_t InstFrmOpIdx =
2375 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2376 int16_t SiblingFrmOpIdx =
2377 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2379 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2384 bool Invert)
const {
2385 if (isVectorAssociativeAndCommutative(Inst, Invert))
2393 Opc = *InverseOpcode;
2438std::optional<unsigned>
2440#define RVV_OPC_LMUL_CASE(OPC, INV) \
2441 case RISCV::OPC##_M1: \
2442 return RISCV::INV##_M1; \
2443 case RISCV::OPC##_M2: \
2444 return RISCV::INV##_M2; \
2445 case RISCV::OPC##_M4: \
2446 return RISCV::INV##_M4; \
2447 case RISCV::OPC##_M8: \
2448 return RISCV::INV##_M8; \
2449 case RISCV::OPC##_MF2: \
2450 return RISCV::INV##_MF2; \
2451 case RISCV::OPC##_MF4: \
2452 return RISCV::INV##_MF4; \
2453 case RISCV::OPC##_MF8: \
2454 return RISCV::INV##_MF8
2456#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2457 case RISCV::OPC##_M1_MASK: \
2458 return RISCV::INV##_M1_MASK; \
2459 case RISCV::OPC##_M2_MASK: \
2460 return RISCV::INV##_M2_MASK; \
2461 case RISCV::OPC##_M4_MASK: \
2462 return RISCV::INV##_M4_MASK; \
2463 case RISCV::OPC##_M8_MASK: \
2464 return RISCV::INV##_M8_MASK; \
2465 case RISCV::OPC##_MF2_MASK: \
2466 return RISCV::INV##_MF2_MASK; \
2467 case RISCV::OPC##_MF4_MASK: \
2468 return RISCV::INV##_MF4_MASK; \
2469 case RISCV::OPC##_MF8_MASK: \
2470 return RISCV::INV##_MF8_MASK
2474 return std::nullopt;
2476 return RISCV::FSUB_H;
2478 return RISCV::FSUB_S;
2480 return RISCV::FSUB_D;
2482 return RISCV::FADD_H;
2484 return RISCV::FADD_S;
2486 return RISCV::FADD_D;
2503#undef RVV_OPC_LMUL_MASK_CASE
2504#undef RVV_OPC_LMUL_CASE
2509 bool DoRegPressureReduce) {
2525 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2536 bool DoRegPressureReduce) {
2543 DoRegPressureReduce)) {
2549 DoRegPressureReduce)) {
2559 bool DoRegPressureReduce) {
2567 unsigned CombineOpc) {
2574 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2577 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2588 unsigned OuterShiftAmt) {
2594 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2621 case RISCV::SH1ADD_UW:
2623 case RISCV::SH2ADD_UW:
2625 case RISCV::SH3ADD_UW:
2671 bool DoRegPressureReduce)
const {
2680 DoRegPressureReduce);
2688 return RISCV::FMADD_H;
2690 return RISCV::FMADD_S;
2692 return RISCV::FMADD_D;
2737 bool Mul1IsKill = Mul1.
isKill();
2738 bool Mul2IsKill = Mul2.
isKill();
2739 bool AddendIsKill = Addend.
isKill();
2748 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2773 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2780 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2783 switch (InnerShiftAmt - OuterShiftAmt) {
2787 InnerOpc = RISCV::ADD;
2790 InnerOpc = RISCV::SH1ADD;
2793 InnerOpc = RISCV::SH2ADD;
2796 InnerOpc = RISCV::SH3ADD;
2804 Register NewVR =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2814 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2831 DelInstrs, InstrIdxForVirtReg);
2858 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2860 unsigned OpType = Operand.OperandType;
2866 ErrInfo =
"Expected an immediate operand.";
2869 int64_t Imm = MO.
getImm();
2876#define CASE_OPERAND_UIMM(NUM) \
2877 case RISCVOp::OPERAND_UIMM##NUM: \
2878 Ok = isUInt<NUM>(Imm); \
2880#define CASE_OPERAND_SIMM(NUM) \
2881 case RISCVOp::OPERAND_SIMM##NUM: \
2882 Ok = isInt<NUM>(Imm); \
2913 Ok = Imm >= 1 && Imm <= 32;
2955 Ok = (
isUInt<5>(Imm) && Imm != 0) || Imm == -1;
2965 Ok = Imm >= -15 && Imm <= 16;
2993 Ok = Ok && Imm != 0;
2996 Ok = (
isUInt<5>(Imm) && Imm != 0) || (Imm >= 0xfffe0 && Imm <= 0xfffff);
2999 Ok = Imm >= 0 && Imm <= 10;
3002 Ok = Imm >= 0 && Imm <= 7;
3005 Ok = Imm >= 1 && Imm <= 10;
3008 Ok = Imm >= 2 && Imm <= 14;
3017 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
3052 Ok = Imm == 1 || Imm == 2 || Imm == 4;
3056 ErrInfo =
"Invalid immediate";
3065 ErrInfo =
"Expected a non-register operand.";
3069 ErrInfo =
"Invalid immediate";
3078 ErrInfo =
"Expected a non-register operand.";
3082 ErrInfo =
"Invalid immediate";
3090 ErrInfo =
"Expected a non-register operand.";
3094 ErrInfo =
"Invalid immediate";
3100 int64_t Imm = MO.
getImm();
3103 ErrInfo =
"Invalid immediate";
3106 }
else if (!MO.
isReg()) {
3107 ErrInfo =
"Expected a register or immediate operand.";
3117 if (!
Op.isImm() && !
Op.isReg()) {
3118 ErrInfo =
"Invalid operand type for VL operand";
3121 if (
Op.isReg() &&
Op.getReg().isValid()) {
3123 auto *RC =
MRI.getRegClass(
Op.getReg());
3124 if (!RISCV::GPRNoX0RegClass.hasSubClassEq(RC)) {
3125 ErrInfo =
"Invalid register class for VL operand";
3130 ErrInfo =
"VL operand w/o SEW operand?";
3136 if (!
MI.getOperand(
OpIdx).isImm()) {
3137 ErrInfo =
"SEW value expected to be an immediate";
3142 ErrInfo =
"Unexpected SEW value";
3145 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3147 ErrInfo =
"Unexpected SEW value";
3153 if (!
MI.getOperand(
OpIdx).isImm()) {
3154 ErrInfo =
"Policy operand expected to be an immediate";
3159 ErrInfo =
"Invalid Policy Value";
3163 ErrInfo =
"policy operand w/o VL operand?";
3171 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3172 ErrInfo =
"policy operand w/o tied operand?";
3179 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3180 ErrInfo =
"dynamic rounding mode should read FRM";
3202 case RISCV::LD_RV32:
3212 case RISCV::SD_RV32:
3228 int64_t NewOffset = OldOffset + Disp;
3250 "Addressing mode not supported for folding");
3324 case RISCV::LD_RV32:
3327 case RISCV::SD_RV32:
3334 OffsetIsScalable =
false;
3350 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3358 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3361 auto Base1 = MO1->getValue();
3362 auto Base2 = MO2->getValue();
3363 if (!Base1 || !Base2)
3371 return Base1 == Base2;
3377 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3378 unsigned NumBytes)
const {
3381 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3386 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3392 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3398 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3448 int64_t OffsetA = 0, OffsetB = 0;
3454 int LowOffset = std::min(OffsetA, OffsetB);
3455 int HighOffset = std::max(OffsetA, OffsetB);
3456 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3458 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3465std::pair<unsigned, unsigned>
3468 return std::make_pair(TF & Mask, TF & ~Mask);
3474 static const std::pair<unsigned, const char *> TargetFlags[] = {
3475 {MO_CALL,
"riscv-call"},
3476 {MO_LO,
"riscv-lo"},
3477 {MO_HI,
"riscv-hi"},
3478 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3479 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3480 {MO_GOT_HI,
"riscv-got-hi"},
3481 {MO_TPREL_LO,
"riscv-tprel-lo"},
3482 {MO_TPREL_HI,
"riscv-tprel-hi"},
3483 {MO_TPREL_ADD,
"riscv-tprel-add"},
3484 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3485 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3486 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3487 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3488 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3489 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3497 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3510 unsigned &Flags)
const {
3529 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3530 F.hasFnAttribute(
"patchable-function-entry");
3535 return MI.readsRegister(RegNo,
TRI) ||
3536 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3541 return MI.modifiesRegister(RegNo,
TRI) ||
3542 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3546 if (!
MBB.back().isReturn())
3569 if (
C.back().isReturn()) {
3571 "The candidate who uses return instruction must be outlined "
3584 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
3587std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3590 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3591 unsigned MinRepeats)
const {
3597 if (RepeatedSequenceLocs.size() < MinRepeats)
3598 return std::nullopt;
3602 unsigned InstrSizeCExt =
3604 unsigned CallOverhead = 0, FrameOverhead = 0;
3607 unsigned CFICount = 0;
3608 for (
auto &
I : Candidate) {
3609 if (
I.isCFIInstruction())
3620 std::vector<MCCFIInstruction> CFIInstructions =
3621 C.getMF()->getFrameInstructions();
3623 if (CFICount > 0 && CFICount != CFIInstructions.size())
3624 return std::nullopt;
3632 CallOverhead = 4 + InstrSizeCExt;
3639 FrameOverhead = InstrSizeCExt;
3645 return std::nullopt;
3647 for (
auto &
C : RepeatedSequenceLocs)
3648 C.setCallInfo(MOCI, CallOverhead);
3650 unsigned SequenceSize = 0;
3651 for (
auto &
MI : Candidate)
3654 return std::make_unique<outliner::OutlinedFunction>(
3655 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3661 unsigned Flags)
const {
3665 MBB->getParent()->getSubtarget().getRegisterInfo();
3666 const auto &
F =
MI.getMF()->getFunction();
3671 if (
MI.isCFIInstruction())
3679 for (
const auto &MO :
MI.operands()) {
3684 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3685 F.hasSection() ||
F.getSectionPrefix()))
3702 MBB.addLiveIn(RISCV::X5);
3717 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3725 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3736 return std::nullopt;
3740 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3741 MI.getOperand(2).isImm())
3742 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3744 return std::nullopt;
3752 std::string GenericComment =
3754 if (!GenericComment.empty())
3755 return GenericComment;
3759 return std::string();
3763 return std::string();
3765 std::string Comment;
3772 switch (OpInfo.OperandType) {
3775 unsigned Imm =
Op.getImm();
3780 unsigned Imm =
Op.getImm();
3785 unsigned Imm =
Op.getImm();
3791 unsigned Log2SEW =
Op.getImm();
3792 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3798 unsigned Policy =
Op.getImm();
3800 "Invalid Policy Value");
3810#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3811 RISCV::Pseudo##OP##_##LMUL
3813#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3814 RISCV::Pseudo##OP##_##LMUL##_MASK
3816#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3817 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3818 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3820#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3821 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3822 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3823 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3824 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3825 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3826 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3828#define CASE_RVV_OPCODE_UNMASK(OP) \
3829 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3830 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3832#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3833 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3834 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3835 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3836 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3837 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3838 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3840#define CASE_RVV_OPCODE_MASK(OP) \
3841 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3842 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3844#define CASE_RVV_OPCODE_WIDEN(OP) \
3845 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3846 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3848#define CASE_RVV_OPCODE(OP) \
3849 CASE_RVV_OPCODE_UNMASK(OP): \
3850 case CASE_RVV_OPCODE_MASK(OP)
3854#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3855 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3857#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3858 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3859 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3860 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3861 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3862 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3863 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3864 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3867#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3868 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3870#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3871 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3872 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3873 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3874 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3876#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3877 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3878 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3880#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3881 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3882 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3884#define CASE_VFMA_OPCODE_VV(OP) \
3885 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3886 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
3887 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3888 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3890#define CASE_VFMA_SPLATS(OP) \
3891 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3892 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
3893 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3894 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3898 unsigned &SrcOpIdx1,
3899 unsigned &SrcOpIdx2)
const {
3901 if (!
Desc.isCommutable())
3904 switch (
MI.getOpcode()) {
3905 case RISCV::TH_MVEQZ:
3906 case RISCV::TH_MVNEZ:
3910 if (
MI.getOperand(2).getReg() == RISCV::X0)
3913 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3914 case RISCV::QC_SELECTIEQ:
3915 case RISCV::QC_SELECTINE:
3916 case RISCV::QC_SELECTIIEQ:
3917 case RISCV::QC_SELECTIINE:
3918 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3919 case RISCV::QC_MVEQ:
3920 case RISCV::QC_MVNE:
3921 case RISCV::QC_MVLT:
3922 case RISCV::QC_MVGE:
3923 case RISCV::QC_MVLTU:
3924 case RISCV::QC_MVGEU:
3925 case RISCV::QC_MVEQI:
3926 case RISCV::QC_MVNEI:
3927 case RISCV::QC_MVLTI:
3928 case RISCV::QC_MVGEI:
3929 case RISCV::QC_MVLTUI:
3930 case RISCV::QC_MVGEUI:
3931 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);
3932 case RISCV::TH_MULA:
3933 case RISCV::TH_MULAW:
3934 case RISCV::TH_MULAH:
3935 case RISCV::TH_MULS:
3936 case RISCV::TH_MULSW:
3937 case RISCV::TH_MULSH:
3939 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3940 case RISCV::PseudoCCMOVGPRNoX0:
3941 case RISCV::PseudoCCMOVGPR:
3943 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
3970 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3997 unsigned CommutableOpIdx1 = 1;
3998 unsigned CommutableOpIdx2 = 3;
3999 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4020 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
4022 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
4026 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
4027 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
4033 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
4034 SrcOpIdx2 == CommuteAnyOperandIndex) {
4037 unsigned CommutableOpIdx1 = SrcOpIdx1;
4038 if (SrcOpIdx1 == SrcOpIdx2) {
4041 CommutableOpIdx1 = 1;
4042 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
4044 CommutableOpIdx1 = SrcOpIdx2;
4049 unsigned CommutableOpIdx2;
4050 if (CommutableOpIdx1 != 1) {
4052 CommutableOpIdx2 = 1;
4054 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
4059 if (Op1Reg !=
MI.getOperand(2).getReg())
4060 CommutableOpIdx2 = 2;
4062 CommutableOpIdx2 = 3;
4067 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4080#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
4081 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
4082 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
4085#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
4086 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
4087 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
4088 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
4089 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
4090 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
4091 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
4092 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
4095#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
4096 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
4097 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
4100#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
4101 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
4102 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
4103 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
4104 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
4106#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
4107 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
4108 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
4110#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
4111 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
4112 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
4114#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
4115 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
4116 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
4117 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
4118 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
4120#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
4121 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
4122 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
4123 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
4124 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
4130 unsigned OpIdx2)
const {
4133 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
4137 switch (
MI.getOpcode()) {
4138 case RISCV::TH_MVEQZ:
4139 case RISCV::TH_MVNEZ: {
4140 auto &WorkingMI = cloneIfNew(
MI);
4141 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
4142 : RISCV::TH_MVEQZ));
4146 case RISCV::QC_SELECTIEQ:
4147 case RISCV::QC_SELECTINE:
4148 case RISCV::QC_SELECTIIEQ:
4149 case RISCV::QC_SELECTIINE:
4151 case RISCV::QC_MVEQ:
4152 case RISCV::QC_MVNE:
4153 case RISCV::QC_MVLT:
4154 case RISCV::QC_MVGE:
4155 case RISCV::QC_MVLTU:
4156 case RISCV::QC_MVGEU:
4157 case RISCV::QC_MVEQI:
4158 case RISCV::QC_MVNEI:
4159 case RISCV::QC_MVLTI:
4160 case RISCV::QC_MVGEI:
4161 case RISCV::QC_MVLTUI:
4162 case RISCV::QC_MVGEUI: {
4163 auto &WorkingMI = cloneIfNew(
MI);
4168 case RISCV::PseudoCCMOVGPRNoX0:
4169 case RISCV::PseudoCCMOVGPR: {
4173 auto &WorkingMI = cloneIfNew(
MI);
4174 WorkingMI.getOperand(3).setImm(CC);
4198 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4199 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
4201 switch (
MI.getOpcode()) {
4224 auto &WorkingMI = cloneIfNew(
MI);
4225 WorkingMI.setDesc(
get(
Opc));
4235 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4238 if (OpIdx1 == 3 || OpIdx2 == 3) {
4240 switch (
MI.getOpcode()) {
4251 auto &WorkingMI = cloneIfNew(
MI);
4252 WorkingMI.setDesc(
get(
Opc));
4264#undef CASE_VMA_CHANGE_OPCODE_COMMON
4265#undef CASE_VMA_CHANGE_OPCODE_LMULS
4266#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4267#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4268#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4269#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4270#undef CASE_VFMA_CHANGE_OPCODE_VV
4271#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4273#undef CASE_RVV_OPCODE_UNMASK_LMUL
4274#undef CASE_RVV_OPCODE_MASK_LMUL
4275#undef CASE_RVV_OPCODE_LMUL
4276#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4277#undef CASE_RVV_OPCODE_UNMASK
4278#undef CASE_RVV_OPCODE_MASK_WIDEN
4279#undef CASE_RVV_OPCODE_MASK
4280#undef CASE_RVV_OPCODE_WIDEN
4281#undef CASE_RVV_OPCODE
4283#undef CASE_VMA_OPCODE_COMMON
4284#undef CASE_VMA_OPCODE_LMULS
4285#undef CASE_VFMA_OPCODE_COMMON
4286#undef CASE_VFMA_OPCODE_LMULS_M1
4287#undef CASE_VFMA_OPCODE_LMULS_MF2
4288#undef CASE_VFMA_OPCODE_LMULS_MF4
4289#undef CASE_VFMA_OPCODE_VV
4290#undef CASE_VFMA_SPLATS
4293 switch (
MI.getOpcode()) {
4301 if (
MI.getOperand(1).getReg() == RISCV::X0)
4302 commuteInstruction(
MI);
4304 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4305 MI.getOperand(2).ChangeToImmediate(0);
4306 MI.setDesc(
get(RISCV::ADDI));
4310 if (
MI.getOpcode() == RISCV::XOR &&
4311 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4312 MI.getOperand(1).setReg(RISCV::X0);
4313 MI.getOperand(2).ChangeToImmediate(0);
4314 MI.setDesc(
get(RISCV::ADDI));
4321 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4322 MI.setDesc(
get(RISCV::ADDI));
4328 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4329 MI.getOperand(2).ChangeToImmediate(0);
4330 MI.setDesc(
get(RISCV::ADDI));
4336 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4337 MI.getOperand(2).ChangeToImmediate(0);
4338 MI.setDesc(
get(RISCV::ADDIW));
4345 if (
MI.getOperand(1).getReg() == RISCV::X0)
4346 commuteInstruction(
MI);
4348 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4349 MI.getOperand(2).ChangeToImmediate(0);
4350 MI.setDesc(
get(RISCV::ADDIW));
4355 case RISCV::SH1ADD_UW:
4357 case RISCV::SH2ADD_UW:
4359 case RISCV::SH3ADD_UW:
4361 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4362 MI.removeOperand(1);
4364 MI.setDesc(
get(RISCV::ADDI));
4368 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4369 MI.removeOperand(2);
4370 unsigned Opc =
MI.getOpcode();
4371 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4372 Opc == RISCV::SH3ADD_UW) {
4374 MI.setDesc(
get(RISCV::SLLI_UW));
4378 MI.setDesc(
get(RISCV::SLLI));
4392 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4393 MI.getOperand(2).getReg() == RISCV::X0) {
4394 MI.getOperand(1).setReg(RISCV::X0);
4395 MI.getOperand(2).ChangeToImmediate(0);
4396 MI.setDesc(
get(RISCV::ADDI));
4402 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4403 MI.getOperand(2).setImm(0);
4404 MI.setDesc(
get(RISCV::ADDI));
4412 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4413 MI.getOperand(2).ChangeToImmediate(0);
4414 MI.setDesc(
get(RISCV::ADDI));
4418 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4419 MI.getOperand(2).ChangeToImmediate(0);
4420 MI.setDesc(
get(RISCV::ADDI));
4428 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4429 MI.getOperand(2).ChangeToImmediate(0);
4430 MI.setDesc(
get(RISCV::ADDI));
4440 case RISCV::SLLI_UW:
4442 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4443 MI.getOperand(2).setImm(0);
4444 MI.setDesc(
get(RISCV::ADDI));
4452 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4453 MI.getOperand(2).getReg() == RISCV::X0) {
4454 MI.getOperand(2).ChangeToImmediate(0);
4455 MI.setDesc(
get(RISCV::ADDI));
4459 if (
MI.getOpcode() == RISCV::ADD_UW &&
4460 MI.getOperand(1).getReg() == RISCV::X0) {
4461 MI.removeOperand(1);
4463 MI.setDesc(
get(RISCV::ADDI));
4469 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4470 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4471 MI.setDesc(
get(RISCV::ADDI));
4477 case RISCV::ZEXT_H_RV32:
4478 case RISCV::ZEXT_H_RV64:
4481 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4483 MI.setDesc(
get(RISCV::ADDI));
4492 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4493 MI.getOperand(2).ChangeToImmediate(0);
4494 MI.setDesc(
get(RISCV::ADDI));
4501 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4503 MI.removeOperand(0);
4504 MI.insert(
MI.operands_begin() + 1, {MO0});
4509 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4511 MI.removeOperand(0);
4512 MI.insert(
MI.operands_begin() + 1, {MO0});
4513 MI.setDesc(
get(RISCV::BNE));
4518 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4520 MI.removeOperand(0);
4521 MI.insert(
MI.operands_begin() + 1, {MO0});
4522 MI.setDesc(
get(RISCV::BEQ));
4530#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4531 RISCV::PseudoV##OP##_##LMUL##_TIED
4533#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4534 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4535 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4536 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4537 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4538 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4539 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4541#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4542 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4543 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4546#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4547 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4548 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4549 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4550 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4551 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4552 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4555#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4556 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4558#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4559 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4560 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4561 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4562 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4563 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4564 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4565 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4566 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4567 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4569#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4570 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4571 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4574#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4575 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4576 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4577 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4578 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4579 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4580 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4581 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4582 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4583 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4585#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
4586 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4587 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4588 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4589 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4590 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
4592#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
4593 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4594 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4595 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4596 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4597 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
4604 switch (
MI.getOpcode()) {
4612 MI.getNumExplicitOperands() == 7 &&
4613 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4620 switch (
MI.getOpcode()) {
4632 .
add(
MI.getOperand(0))
4634 .
add(
MI.getOperand(1))
4635 .
add(
MI.getOperand(2))
4636 .
add(
MI.getOperand(3))
4637 .
add(
MI.getOperand(4))
4638 .
add(
MI.getOperand(5))
4639 .
add(
MI.getOperand(6));
4648 MI.getNumExplicitOperands() == 6);
4655 switch (
MI.getOpcode()) {
4667 .
add(
MI.getOperand(0))
4669 .
add(
MI.getOperand(1))
4670 .
add(
MI.getOperand(2))
4671 .
add(
MI.getOperand(3))
4672 .
add(
MI.getOperand(4))
4673 .
add(
MI.getOperand(5));
4680 unsigned NumOps =
MI.getNumOperands();
4683 if (
Op.isReg() &&
Op.isKill())
4691 if (
MI.getOperand(0).isEarlyClobber()) {
4705#undef CASE_WIDEOP_OPCODE_COMMON
4706#undef CASE_WIDEOP_OPCODE_LMULS
4707#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4708#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4709#undef CASE_FP_WIDEOP_OPCODE_COMMON
4710#undef CASE_FP_WIDEOP_OPCODE_LMULS
4711#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4712#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4721 if (ShiftAmount == 0)
4727 }
else if (
int ShXAmount, ShiftAmount;
4729 (ShXAmount =
isShifted359(Amount, ShiftAmount)) != 0) {
4732 switch (ShXAmount) {
4734 Opc = RISCV::SH1ADD;
4737 Opc = RISCV::SH2ADD;
4740 Opc = RISCV::SH3ADD;
4755 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4766 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4776 }
else if (
STI.hasStdExtZmmul()) {
4777 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4786 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
4787 if (Amount & (1U << ShiftAmount)) {
4791 .
addImm(ShiftAmount - PrevShiftAmount)
4793 if (Amount >> (ShiftAmount + 1)) {
4796 Acc =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4807 PrevShiftAmount = ShiftAmount;
4810 assert(Acc &&
"Expected valid accumulator");
4820 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4828 ?
STI.getTailDupAggressiveThreshold()
4835 unsigned Opcode =
MI.getOpcode();
4836 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
4842std::optional<std::pair<unsigned, unsigned>>
4846 return std::nullopt;
4847 case RISCV::PseudoVSPILL2_M1:
4848 case RISCV::PseudoVRELOAD2_M1:
4849 return std::make_pair(2u, 1u);
4850 case RISCV::PseudoVSPILL2_M2:
4851 case RISCV::PseudoVRELOAD2_M2:
4852 return std::make_pair(2u, 2u);
4853 case RISCV::PseudoVSPILL2_M4:
4854 case RISCV::PseudoVRELOAD2_M4:
4855 return std::make_pair(2u, 4u);
4856 case RISCV::PseudoVSPILL3_M1:
4857 case RISCV::PseudoVRELOAD3_M1:
4858 return std::make_pair(3u, 1u);
4859 case RISCV::PseudoVSPILL3_M2:
4860 case RISCV::PseudoVRELOAD3_M2:
4861 return std::make_pair(3u, 2u);
4862 case RISCV::PseudoVSPILL4_M1:
4863 case RISCV::PseudoVRELOAD4_M1:
4864 return std::make_pair(4u, 1u);
4865 case RISCV::PseudoVSPILL4_M2:
4866 case RISCV::PseudoVRELOAD4_M2:
4867 return std::make_pair(4u, 2u);
4868 case RISCV::PseudoVSPILL5_M1:
4869 case RISCV::PseudoVRELOAD5_M1:
4870 return std::make_pair(5u, 1u);
4871 case RISCV::PseudoVSPILL6_M1:
4872 case RISCV::PseudoVRELOAD6_M1:
4873 return std::make_pair(6u, 1u);
4874 case RISCV::PseudoVSPILL7_M1:
4875 case RISCV::PseudoVRELOAD7_M1:
4876 return std::make_pair(7u, 1u);
4877 case RISCV::PseudoVSPILL8_M1:
4878 case RISCV::PseudoVRELOAD8_M1:
4879 return std::make_pair(8u, 1u);
4884 int16_t MI1FrmOpIdx =
4885 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
4886 int16_t MI2FrmOpIdx =
4887 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
4888 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
4895std::optional<unsigned>
4899 return std::nullopt;
4902 case RISCV::VSLL_VX:
4903 case RISCV::VSRL_VX:
4904 case RISCV::VSRA_VX:
4906 case RISCV::VSSRL_VX:
4907 case RISCV::VSSRA_VX:
4909 case RISCV::VROL_VX:
4910 case RISCV::VROR_VX:
4915 case RISCV::VNSRL_WX:
4916 case RISCV::VNSRA_WX:
4918 case RISCV::VNCLIPU_WX:
4919 case RISCV::VNCLIP_WX:
4921 case RISCV::VWSLL_VX:
4926 case RISCV::VADD_VX:
4927 case RISCV::VSUB_VX:
4928 case RISCV::VRSUB_VX:
4930 case RISCV::VWADDU_VX:
4931 case RISCV::VWSUBU_VX:
4932 case RISCV::VWADD_VX:
4933 case RISCV::VWSUB_VX:
4934 case RISCV::VWADDU_WX:
4935 case RISCV::VWSUBU_WX:
4936 case RISCV::VWADD_WX:
4937 case RISCV::VWSUB_WX:
4939 case RISCV::VADC_VXM:
4940 case RISCV::VADC_VIM:
4941 case RISCV::VMADC_VXM:
4942 case RISCV::VMADC_VIM:
4943 case RISCV::VMADC_VX:
4944 case RISCV::VSBC_VXM:
4945 case RISCV::VMSBC_VXM:
4946 case RISCV::VMSBC_VX:
4948 case RISCV::VAND_VX:
4950 case RISCV::VXOR_VX:
4952 case RISCV::VMSEQ_VX:
4953 case RISCV::VMSNE_VX:
4954 case RISCV::VMSLTU_VX:
4955 case RISCV::VMSLT_VX:
4956 case RISCV::VMSLEU_VX:
4957 case RISCV::VMSLE_VX:
4958 case RISCV::VMSGTU_VX:
4959 case RISCV::VMSGT_VX:
4961 case RISCV::VMINU_VX:
4962 case RISCV::VMIN_VX:
4963 case RISCV::VMAXU_VX:
4964 case RISCV::VMAX_VX:
4966 case RISCV::VMUL_VX:
4967 case RISCV::VMULH_VX:
4968 case RISCV::VMULHU_VX:
4969 case RISCV::VMULHSU_VX:
4971 case RISCV::VDIVU_VX:
4972 case RISCV::VDIV_VX:
4973 case RISCV::VREMU_VX:
4974 case RISCV::VREM_VX:
4976 case RISCV::VWMUL_VX:
4977 case RISCV::VWMULU_VX:
4978 case RISCV::VWMULSU_VX:
4980 case RISCV::VMACC_VX:
4981 case RISCV::VNMSAC_VX:
4982 case RISCV::VMADD_VX:
4983 case RISCV::VNMSUB_VX:
4985 case RISCV::VWMACCU_VX:
4986 case RISCV::VWMACC_VX:
4987 case RISCV::VWMACCSU_VX:
4988 case RISCV::VWMACCUS_VX:
4990 case RISCV::VMERGE_VXM:
4992 case RISCV::VMV_V_X:
4994 case RISCV::VSADDU_VX:
4995 case RISCV::VSADD_VX:
4996 case RISCV::VSSUBU_VX:
4997 case RISCV::VSSUB_VX:
4999 case RISCV::VAADDU_VX:
5000 case RISCV::VAADD_VX:
5001 case RISCV::VASUBU_VX:
5002 case RISCV::VASUB_VX:
5004 case RISCV::VSMUL_VX:
5006 case RISCV::VMV_S_X:
5008 case RISCV::VANDN_VX:
5009 return 1U << Log2SEW;
5015 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
5018 return RVV->BaseInstr;
5028 unsigned Scaled = Log2SEW + (DestEEW - 1);
5042 return std::nullopt;
5047 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
5048 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
5049 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
5050 LHS.getReg() == RHS.getReg())
5054 if (LHS.isImm() && LHS.getImm() == 0)
5060 if (!LHSImm || !RHSImm)
5062 return LHSImm <= RHSImm;
5074 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
5076 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5086 std::optional<bool> createTripCountGreaterCondition(
5087 int TC, MachineBasicBlock &
MBB,
5088 SmallVectorImpl<MachineOperand> &CondParam)
override {
5096 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
5098 void adjustTripCount(
int TripCountAdjust)
override {}
5102std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5110 if (
TBB == LoopBB && FBB == LoopBB)
5117 assert((
TBB == LoopBB || FBB == LoopBB) &&
5118 "The Loop must be a single-basic-block loop");
5129 if (!Reg.isVirtual())
5131 return MRI.getVRegDef(Reg);
5136 if (LHS && LHS->isPHI())
5138 if (RHS && RHS->isPHI())
5141 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
5147 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
5164 case RISCV::FDIV_H_INX:
5165 case RISCV::FDIV_S_INX:
5166 case RISCV::FDIV_D_INX:
5167 case RISCV::FDIV_D_IN32X:
5168 case RISCV::FSQRT_H:
5169 case RISCV::FSQRT_S:
5170 case RISCV::FSQRT_D:
5171 case RISCV::FSQRT_H_INX:
5172 case RISCV::FSQRT_S_INX:
5173 case RISCV::FSQRT_D_INX:
5174 case RISCV::FSQRT_D_IN32X:
5176 case RISCV::VDIV_VV:
5177 case RISCV::VDIV_VX:
5178 case RISCV::VDIVU_VV:
5179 case RISCV::VDIVU_VX:
5180 case RISCV::VREM_VV:
5181 case RISCV::VREM_VX:
5182 case RISCV::VREMU_VV:
5183 case RISCV::VREMU_VX:
5185 case RISCV::VFDIV_VV:
5186 case RISCV::VFDIV_VF:
5187 case RISCV::VFRDIV_VF:
5188 case RISCV::VFSQRT_V:
5189 case RISCV::VFRSQRT7_V:
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static bool analyzeCandidate(outliner::Candidate &C)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static unsigned getInverseXqcicmOpcode(unsigned Opcode)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII, const RISCVSubtarget &STI)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
RISCVInstrInfo(const RISCVSubtarget &STI)
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
bool isReMaterializableImpl(const MachineInstr &MI) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getInverseBranchCondition(CondCode)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
@ OPERAND_ATOMIC_ORDERING
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static bool isValidXSfmmVType(unsigned VTypeI)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
bool isValidAtomicOrdering(Int I)
static const MachineMemOperand::Flags MONontemporalBit0
unsigned getDeadRegState(bool B)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
int isShifted359(T Value, int &Shift)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
unsigned getKillRegState(bool B)
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.