73#define DEBUG_TYPE "arm-instrinfo"
75#define GET_INSTRINFO_CTOR_DTOR
76#include "ARMGenInstrInfo.inc"
90 { ARM::VMLAS, ARM::VMULS, ARM::VADDS,
false,
false },
91 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS,
false,
false },
92 { ARM::VMLAD, ARM::VMULD, ARM::VADDD,
false,
false },
93 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD,
false,
false },
94 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS,
true,
false },
95 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS,
true,
false },
96 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD,
true,
false },
97 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD,
true,
false },
100 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd,
false,
false },
101 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd,
false,
false },
102 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq,
false,
false },
103 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq,
false,
false },
104 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd,
false,
true },
105 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd,
false,
true },
106 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq,
false,
true },
107 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq,
false,
true },
113 for (
unsigned i = 0, e = std::size(
ARM_MLxTable); i != e; ++i) {
126 if (usePreRAHazardRecognizer()) {
128 static_cast<const ARMSubtarget *
>(STI)->getInstrItineraryData();
148 std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4,
true));
185 bool AllowModify)
const {
200 bool CantAnalyze =
false;
204 while (
I->isDebugInstr() || !
I->isTerminator() ||
206 I->getOpcode() == ARM::t2DoLoopStartTP){
218 TBB =
I->getOperand(0).getMBB();
224 assert(!FBB &&
"FBB should have been null.");
226 TBB =
I->getOperand(0).getMBB();
227 Cond.push_back(
I->getOperand(1));
228 Cond.push_back(
I->getOperand(2));
229 }
else if (
I->isReturn()) {
232 }
else if (
I->getOpcode() == ARM::t2LoopEnd &&
239 TBB =
I->getOperand(1).getMBB();
241 Cond.push_back(
I->getOperand(0));
298 int *BytesRemoved)
const {
299 assert(!BytesRemoved &&
"code size not handled");
310 I->eraseFromParent();
320 I->eraseFromParent();
329 int *BytesAdded)
const {
330 assert(!BytesAdded &&
"code size not handled");
339 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
341 "ARM branch conditions have two or three components!");
351 }
else if (
Cond.size() == 2) {
362 if (
Cond.size() == 2)
367 else if (
Cond.size() == 3)
378 if (
Cond.size() == 2) {
390 while (++
I != E &&
I->isInsideBundle()) {
391 int PIdx =
I->findFirstPredOperandIdx();
392 if (PIdx != -1 &&
I->getOperand(PIdx).getImm() !=
ARMCC::AL)
398 int PIdx =
MI.findFirstPredOperandIdx();
399 return PIdx != -1 &&
MI.getOperand(PIdx).getImm() !=
ARMCC::AL;
407 std::string GenericComment =
409 if (!GenericComment.empty())
410 return GenericComment;
414 return std::string();
418 int FirstPredOp =
MI.findFirstPredOperandIdx();
419 if (FirstPredOp != (
int)
OpIdx)
420 return std::string();
422 std::string CC =
"CC::";
429 unsigned Opc =
MI.getOpcode();
438 int PIdx =
MI.findFirstPredOperandIdx();
442 MI.getOperand(PIdx+1).setReg(Pred[1].
getReg());
449 "CPSR def isn't expected operand");
450 assert((
MI.getOperand(1).isDead() ||
451 MI.getOperand(1).getReg() != ARM::CPSR) &&
452 "if conversion tried to stop defining used CPSR");
453 MI.getOperand(1).setReg(ARM::NoRegister);
463 if (Pred1.
size() > 2 || Pred2.
size() > 2)
488 std::vector<MachineOperand> &Pred,
489 bool SkipDead)
const {
492 bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR);
493 bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR;
494 if (ClobbersCPSR || IsCPSR) {
512 for (
const auto &MO :
MI.operands())
513 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead())
519 switch (
MI->getOpcode()) {
520 default:
return true;
551 if (!
MI.isPredicable())
589 if (!MO.isReg() || MO.isUndef() || MO.isUse())
591 if (MO.getReg() != ARM::CPSR)
611 switch (
MI.getOpcode()) {
619 case TargetOpcode::BUNDLE:
620 return getInstBundleLength(
MI);
621 case ARM::CONSTPOOL_ENTRY:
622 case ARM::JUMPTABLE_INSTS:
623 case ARM::JUMPTABLE_ADDRS:
624 case ARM::JUMPTABLE_TBB:
625 case ARM::JUMPTABLE_TBH:
628 return MI.getOperand(2).getImm();
630 return MI.getOperand(1).getImm();
632 case ARM::INLINEASM_BR: {
634 unsigned Size = getInlineAsmLength(
MI.getOperand(0).getSymbolName(), *MAI);
642unsigned ARMBaseInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
646 while (++
I != E &&
I->isInsideBundle()) {
647 assert(!
I->isBundle() &&
"No nested bundle!");
657 unsigned Opc = Subtarget.isThumb()
658 ? (Subtarget.
isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR)
677 unsigned Opc = Subtarget.isThumb()
678 ? (Subtarget.
isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR)
712 unsigned Cond,
unsigned Inactive) {
722 bool RenamableSrc)
const {
723 bool GPRDest = ARM::GPRRegClass.contains(DestReg);
724 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
726 if (GPRDest && GPRSrc) {
734 bool SPRDest = ARM::SPRRegClass.contains(DestReg);
735 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
738 if (SPRDest && SPRSrc)
740 else if (GPRDest && SPRSrc)
742 else if (SPRDest && GPRSrc)
744 else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) && Subtarget.hasFP64())
746 else if (ARM::QPRRegClass.
contains(DestReg, SrcReg))
747 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MQPRCopy;
752 if (
Opc == ARM::VORRq ||
Opc == ARM::MVE_VORR)
754 if (
Opc == ARM::MVE_VORR)
756 else if (
Opc != ARM::MQPRCopy)
762 unsigned BeginIdx = 0;
763 unsigned SubRegs = 0;
767 if (ARM::QQPRRegClass.
contains(DestReg, SrcReg)) {
768 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
769 BeginIdx = ARM::qsub_0;
771 }
else if (ARM::QQQQPRRegClass.
contains(DestReg, SrcReg)) {
772 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
773 BeginIdx = ARM::qsub_0;
776 }
else if (ARM::DPairRegClass.
contains(DestReg, SrcReg)) {
778 BeginIdx = ARM::dsub_0;
780 }
else if (ARM::DTripleRegClass.
contains(DestReg, SrcReg)) {
782 BeginIdx = ARM::dsub_0;
784 }
else if (ARM::DQuadRegClass.
contains(DestReg, SrcReg)) {
786 BeginIdx = ARM::dsub_0;
788 }
else if (ARM::GPRPairRegClass.
contains(DestReg, SrcReg)) {
789 Opc = Subtarget.
isThumb2() ? ARM::tMOVr : ARM::MOVr;
790 BeginIdx = ARM::gsub_0;
792 }
else if (ARM::DPairSpcRegClass.
contains(DestReg, SrcReg)) {
794 BeginIdx = ARM::dsub_0;
797 }
else if (ARM::DTripleSpcRegClass.
contains(DestReg, SrcReg)) {
799 BeginIdx = ARM::dsub_0;
802 }
else if (ARM::DQuadSpcRegClass.
contains(DestReg, SrcReg)) {
804 BeginIdx = ARM::dsub_0;
807 }
else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) &&
808 !Subtarget.hasFP64()) {
810 BeginIdx = ARM::ssub_0;
812 }
else if (SrcReg == ARM::CPSR) {
815 }
else if (DestReg == ARM::CPSR) {
818 }
else if (DestReg == ARM::VPR) {
824 }
else if (SrcReg == ARM::VPR) {
830 }
else if (DestReg == ARM::FPSCR_NZCV) {
832 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMSR_FPSCR_NZCVQC), DestReg)
836 }
else if (SrcReg == ARM::FPSCR_NZCV) {
838 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMRS_FPSCR_NZCVQC), DestReg)
844 assert(
Opc &&
"Impossible reg-to-reg copy");
850 if (
TRI->regsOverlap(SrcReg,
TRI->getSubReg(DestReg, BeginIdx))) {
851 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
857 for (
unsigned i = 0; i != SubRegs; ++i) {
858 Register Dst =
TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
859 Register Src =
TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
860 assert(Dst && Src &&
"Bad sub-register");
862 assert(!DstRegs.
count(Src) &&
"destructive vector copy");
867 if (
Opc == ARM::VORRq ||
Opc == ARM::MVE_VORR) {
871 if (
Opc == ARM::MVE_VORR)
876 if (
Opc == ARM::MOVr)
885std::optional<DestSourcePair>
894 if (!
MI.isMoveReg() ||
895 (
MI.getOpcode() == ARM::VORRq &&
896 MI.getOperand(1).getReg() !=
MI.getOperand(2).getReg()))
901std::optional<ParamLoadedValue>
905 Register DstReg = DstSrcPair->Destination->getReg();
933 unsigned SubIdx,
unsigned State,
936 return MIB.
addReg(Reg, State);
939 return MIB.
addReg(
TRI->getSubReg(Reg, SubIdx), State);
940 return MIB.
addReg(Reg, State, SubIdx);
945 Register SrcReg,
bool isKill,
int FI,
958 switch (
TRI->getSpillSize(*RC)) {
960 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
971 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
978 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
985 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
992 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
1003 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1010 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1011 if (Subtarget.hasV5TEOps()) {
1031 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1047 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1048 Subtarget.hasMVEIntegerOps()) {
1053 .addMemOperand(MMO);
1059 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1062 Subtarget.hasNEON()) {
1076 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1083 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1084 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1085 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1087 Subtarget.hasNEON()) {
1096 }
else if (Subtarget.hasMVEIntegerOps()) {
1108 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1109 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, 0,
TRI);
1116 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1117 Subtarget.hasMVEIntegerOps()) {
1122 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1128 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1129 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, 0,
TRI);
1130 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_3, 0,
TRI);
1131 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_4, 0,
TRI);
1132 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_5, 0,
TRI);
1133 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_6, 0,
TRI);
1144 int &FrameIndex)
const {
1145 switch (
MI.getOpcode()) {
1149 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1150 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1151 MI.getOperand(3).getImm() == 0) {
1152 FrameIndex =
MI.getOperand(1).getIndex();
1153 return MI.getOperand(0).getReg();
1162 case ARM::VSTR_P0_off:
1163 case ARM::VSTR_FPSCR_NZCVQC_off:
1164 case ARM::MVE_VSTRWU32:
1165 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1166 MI.getOperand(2).getImm() == 0) {
1167 FrameIndex =
MI.getOperand(1).getIndex();
1168 return MI.getOperand(0).getReg();
1172 case ARM::VST1d64TPseudo:
1173 case ARM::VST1d64QPseudo:
1174 if (
MI.getOperand(0).isFI() &&
MI.getOperand(2).getSubReg() == 0) {
1175 FrameIndex =
MI.getOperand(0).getIndex();
1176 return MI.getOperand(2).getReg();
1180 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1181 FrameIndex =
MI.getOperand(1).getIndex();
1182 return MI.getOperand(0).getReg();
1185 case ARM::MQQPRStore:
1186 case ARM::MQQQQPRStore:
1187 if (
MI.getOperand(1).isFI()) {
1188 FrameIndex =
MI.getOperand(1).getIndex();
1189 return MI.getOperand(0).getReg();
1198 int &FrameIndex)
const {
1200 if (
MI.mayStore() && hasStoreToStackSlot(
MI,
Accesses) &&
1203 cast<FixedStackPseudoSourceValue>(
Accesses.front()->getPseudoValue())
1223 switch (
TRI->getSpillSize(*RC)) {
1225 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1235 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1241 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1247 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1253 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
1263 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1269 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1272 if (Subtarget.hasV5TEOps()) {
1295 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1308 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1309 Subtarget.hasMVEIntegerOps()) {
1311 MIB.addFrameIndex(FI)
1313 .addMemOperand(MMO);
1319 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1321 Subtarget.hasNEON()) {
1342 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1343 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1344 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1346 Subtarget.hasNEON()) {
1352 }
else if (Subtarget.hasMVEIntegerOps()) {
1372 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1373 Subtarget.hasMVEIntegerOps()) {
1377 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1401 int &FrameIndex)
const {
1402 switch (
MI.getOpcode()) {
1406 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1407 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1408 MI.getOperand(3).getImm() == 0) {
1409 FrameIndex =
MI.getOperand(1).getIndex();
1410 return MI.getOperand(0).getReg();
1419 case ARM::VLDR_P0_off:
1420 case ARM::VLDR_FPSCR_NZCVQC_off:
1421 case ARM::MVE_VLDRWU32:
1422 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1423 MI.getOperand(2).getImm() == 0) {
1424 FrameIndex =
MI.getOperand(1).getIndex();
1425 return MI.getOperand(0).getReg();
1429 case ARM::VLD1d8TPseudo:
1430 case ARM::VLD1d16TPseudo:
1431 case ARM::VLD1d32TPseudo:
1432 case ARM::VLD1d64TPseudo:
1433 case ARM::VLD1d8QPseudo:
1434 case ARM::VLD1d16QPseudo:
1435 case ARM::VLD1d32QPseudo:
1436 case ARM::VLD1d64QPseudo:
1437 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1438 FrameIndex =
MI.getOperand(1).getIndex();
1439 return MI.getOperand(0).getReg();
1443 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1444 FrameIndex =
MI.getOperand(1).getIndex();
1445 return MI.getOperand(0).getReg();
1448 case ARM::MQQPRLoad:
1449 case ARM::MQQQQPRLoad:
1450 if (
MI.getOperand(1).isFI()) {
1451 FrameIndex =
MI.getOperand(1).getIndex();
1452 return MI.getOperand(0).getReg();
1461 int &FrameIndex)
const {
1463 if (
MI.mayLoad() && hasLoadFromStackSlot(
MI,
Accesses) &&
1466 cast<FixedStackPseudoSourceValue>(
Accesses.front()->getPseudoValue())
1477 bool isThumb2 = Subtarget.
isThumb2();
1484 if (isThumb1 || !
MI->getOperand(1).isDead()) {
1486 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA_UPD
1487 : isThumb1 ? ARM::tLDMIA_UPD
1491 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA));
1494 if (isThumb1 || !
MI->getOperand(0).isDead()) {
1496 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA_UPD
1497 : isThumb1 ? ARM::tSTMIA_UPD
1501 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA));
1516 [&
TRI](
const unsigned &Reg1,
const unsigned &Reg2) ->
bool {
1517 return TRI.getEncodingValue(Reg1) <
1518 TRI.getEncodingValue(Reg2);
1521 for (
const auto &Reg : ScratchRegs) {
1530 if (
MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1531 expandLoadStackGuard(
MI);
1532 MI.getParent()->erase(
MI);
1536 if (
MI.getOpcode() == ARM::MEMCPY) {
1545 if (!
MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64())
1550 Register DstRegS =
MI.getOperand(0).getReg();
1551 Register SrcRegS =
MI.getOperand(1).getReg();
1552 if (!ARM::SPRRegClass.
contains(DstRegS, SrcRegS))
1557 TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, &ARM::DPRRegClass);
1559 TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, &ARM::DPRRegClass);
1560 if (!DstRegD || !SrcRegD)
1566 if (!
MI.definesRegister(DstRegD,
TRI) ||
MI.readsRegister(DstRegD,
TRI))
1570 if (
MI.getOperand(0).isDead())
1579 int ImpDefIdx =
MI.findRegisterDefOperandIdx(DstRegD,
nullptr);
1580 if (ImpDefIdx != -1)
1581 MI.removeOperand(ImpDefIdx);
1584 MI.setDesc(
get(ARM::VMOVD));
1585 MI.getOperand(0).setReg(DstRegD);
1586 MI.getOperand(1).setReg(SrcRegD);
1593 MI.getOperand(1).setIsUndef();
1598 if (
MI.getOperand(1).isKill()) {
1599 MI.getOperand(1).setIsKill(
false);
1600 MI.addRegisterKilled(SrcRegS,
TRI,
true);
1614 assert(MCPE.isMachineConstantPoolEntry() &&
1615 "Expecting a machine constantpool entry!");
1629 cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId,
ARMCP::CPValue,
1634 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
1637 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
1645 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
1665 case ARM::tLDRpci_pic:
1666 case ARM::t2LDRpci_pic: {
1686 switch (
I->getOpcode()) {
1687 case ARM::tLDRpci_pic:
1688 case ARM::t2LDRpci_pic: {
1690 unsigned CPI =
I->getOperand(1).getIndex();
1692 I->getOperand(1).setIndex(CPI);
1693 I->getOperand(2).setImm(PCLabelId);
1697 if (!
I->isBundledWithSucc())
1708 if (Opcode == ARM::t2LDRpci || Opcode == ARM::t2LDRpci_pic ||
1709 Opcode == ARM::tLDRpci || Opcode == ARM::tLDRpci_pic ||
1710 Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1711 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1712 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1713 Opcode == ARM::t2MOV_ga_pcrel) {
1724 if (Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1725 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1726 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1727 Opcode == ARM::t2MOV_ga_pcrel)
1739 if (isARMCP0 && isARMCP1) {
1745 }
else if (!isARMCP0 && !isARMCP1) {
1749 }
else if (Opcode == ARM::PICLDR) {
1757 if (Addr0 != Addr1) {
1793 int64_t &Offset2)
const {
1800 auto IsLoadOpcode = [&](
unsigned Opcode) {
1815 case ARM::t2LDRSHi8:
1817 case ARM::t2LDRBi12:
1818 case ARM::t2LDRSHi12:
1837 if (isa<ConstantSDNode>(Load1->
getOperand(1)) &&
1839 Offset1 = cast<ConstantSDNode>(Load1->
getOperand(1))->getSExtValue();
1840 Offset2 = cast<ConstantSDNode>(Load2->
getOperand(1))->getSExtValue();
1859 int64_t Offset1, int64_t Offset2,
1860 unsigned NumLoads)
const {
1864 assert(Offset2 > Offset1);
1866 if ((Offset2 - Offset1) / 8 > 64)
1897 if (
MI.isDebugInstr())
1901 if (
MI.isTerminator() ||
MI.isPosition())
1905 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1919 while (++
I !=
MBB->
end() &&
I->isDebugInstr())
1921 if (
I !=
MBB->
end() &&
I->getOpcode() == ARM::t2IT)
1932 if (!
MI.isCall() &&
MI.definesRegister(ARM::SP,
nullptr))
1940 unsigned NumCycles,
unsigned ExtraPredCycles,
1950 if (!Pred->
empty()) {
1952 if (LastMI->
getOpcode() == ARM::t2Bcc) {
1961 MBB, 0, 0, Probability);
1966 unsigned TCycles,
unsigned TExtra,
1968 unsigned FCycles,
unsigned FExtra,
1985 const unsigned ScalingUpFactor = 1024;
1987 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor;
1988 unsigned UnpredCost;
1989 if (!Subtarget.hasBranchPredictor()) {
1992 unsigned NotTakenBranchCost = 1;
1994 unsigned TUnpredCycles, FUnpredCycles;
1997 TUnpredCycles = TCycles + NotTakenBranchCost;
1998 FUnpredCycles = TakenBranchCost;
2001 TUnpredCycles = TCycles + TakenBranchCost;
2002 FUnpredCycles = FCycles + NotTakenBranchCost;
2005 PredCost -= 1 * ScalingUpFactor;
2008 unsigned TUnpredCost = Probability.
scale(TUnpredCycles * ScalingUpFactor);
2009 unsigned FUnpredCost = Probability.
getCompl().
scale(FUnpredCycles * ScalingUpFactor);
2010 UnpredCost = TUnpredCost + FUnpredCost;
2013 if (Subtarget.
isThumb2() && TCycles + FCycles > 4) {
2014 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor;
2017 unsigned TUnpredCost = Probability.
scale(TCycles * ScalingUpFactor);
2018 unsigned FUnpredCost =
2020 UnpredCost = TUnpredCost + FUnpredCost;
2021 UnpredCost += 1 * ScalingUpFactor;
2025 return PredCost <= UnpredCost;
2030 unsigned NumInsts)
const {
2038 unsigned MaxInsts = Subtarget.
restrictIT() ? 1 : 4;
2047 if (
MI.getOpcode() == ARM::t2Bcc &&
2070 return Subtarget.isProfitableToUnpredicate();
2078 int PIdx =
MI.findFirstPredOperandIdx();
2084 PredReg =
MI.getOperand(PIdx+1).getReg();
2093 if (
Opc == ARM::t2B)
2102 unsigned OpIdx2)
const {
2103 switch (
MI.getOpcode()) {
2105 case ARM::t2MOVCCr: {
2110 if (CC ==
ARMCC::AL || PredReg != ARM::CPSR)
2130 if (!Reg.isVirtual())
2132 if (!
MRI.hasOneNonDBGUse(Reg))
2144 if (MO.isFI() || MO.isCPI() || MO.isJTI())
2151 if (MO.getReg().isPhysical())
2153 if (MO.isDef() && !MO.isDead())
2156 bool DontMoveAcrossStores =
true;
2157 if (!
MI->isSafeToMove(DontMoveAcrossStores))
2164 unsigned &TrueOp,
unsigned &FalseOp,
2165 bool &Optimizable)
const {
2166 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2167 "Unknown select instruction");
2176 Cond.push_back(
MI.getOperand(3));
2177 Cond.push_back(
MI.getOperand(4));
2186 bool PreferFalse)
const {
2187 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2188 "Unknown select instruction");
2191 bool Invert = !
DefMI;
2193 DefMI = canFoldIntoMOVCC(
MI.getOperand(1).getReg(),
MRI,
this);
2200 Register DestReg =
MI.getOperand(0).getReg();
2203 if (!
MRI.constrainRegClass(DestReg, FalseClass))
2205 if (!
MRI.constrainRegClass(DestReg, TrueClass))
2216 i != e && !DefDesc.
operands()[i].isPredicate(); ++i)
2219 unsigned CondCode =
MI.getOperand(3).getImm();
2224 NewMI.
add(
MI.getOperand(4));
2235 NewMI.
add(FalseReg);
2266 {ARM::ADDSri, ARM::ADDri},
2267 {ARM::ADDSrr, ARM::ADDrr},
2268 {ARM::ADDSrsi, ARM::ADDrsi},
2269 {ARM::ADDSrsr, ARM::ADDrsr},
2271 {ARM::SUBSri, ARM::SUBri},
2272 {ARM::SUBSrr, ARM::SUBrr},
2273 {ARM::SUBSrsi, ARM::SUBrsi},
2274 {ARM::SUBSrsr, ARM::SUBrsr},
2276 {ARM::RSBSri, ARM::RSBri},
2277 {ARM::RSBSrsi, ARM::RSBrsi},
2278 {ARM::RSBSrsr, ARM::RSBrsr},
2280 {ARM::tADDSi3, ARM::tADDi3},
2281 {ARM::tADDSi8, ARM::tADDi8},
2282 {ARM::tADDSrr, ARM::tADDrr},
2283 {ARM::tADCS, ARM::tADC},
2285 {ARM::tSUBSi3, ARM::tSUBi3},
2286 {ARM::tSUBSi8, ARM::tSUBi8},
2287 {ARM::tSUBSrr, ARM::tSUBrr},
2288 {ARM::tSBCS, ARM::tSBC},
2289 {ARM::tRSBS, ARM::tRSB},
2290 {ARM::tLSLSri, ARM::tLSLri},
2292 {ARM::t2ADDSri, ARM::t2ADDri},
2293 {ARM::t2ADDSrr, ARM::t2ADDrr},
2294 {ARM::t2ADDSrs, ARM::t2ADDrs},
2296 {ARM::t2SUBSri, ARM::t2SUBri},
2297 {ARM::t2SUBSrr, ARM::t2SUBrr},
2298 {ARM::t2SUBSrs, ARM::t2SUBrs},
2300 {ARM::t2RSBSri, ARM::t2RSBri},
2301 {ARM::t2RSBSrs, ARM::t2RSBrs},
2306 if (OldOpc == Entry.PseudoOpc)
2307 return Entry.MachineOpc;
2318 if (NumBytes == 0 && DestReg != BaseReg) {
2327 bool isSub = NumBytes < 0;
2328 if (isSub) NumBytes = -NumBytes;
2332 unsigned ThisVal = NumBytes & llvm::rotr<uint32_t>(0xFF, RotAmt);
2333 assert(ThisVal &&
"Didn't extract field correctly");
2336 NumBytes &= ~ThisVal;
2341 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
2354 unsigned NumBytes) {
2365 if (!IsPush && !IsPop)
2368 bool IsVFPPushPop =
MI->getOpcode() == ARM::VSTMDDB_UPD ||
2369 MI->getOpcode() == ARM::VLDMDIA_UPD;
2370 bool IsT1PushPop =
MI->getOpcode() == ARM::tPUSH ||
2371 MI->getOpcode() == ARM::tPOP ||
2372 MI->getOpcode() == ARM::tPOP_RET;
2374 assert((IsT1PushPop || (
MI->getOperand(0).getReg() == ARM::SP &&
2375 MI->getOperand(1).getReg() == ARM::SP)) &&
2376 "trying to fold sp update into non-sp-updating push/pop");
2381 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
2386 int RegListIdx = IsT1PushPop ? 2 : 4;
2389 unsigned RegsNeeded;
2392 RegsNeeded = NumBytes / 8;
2393 RegClass = &ARM::DPRRegClass;
2395 RegsNeeded = NumBytes / 4;
2396 RegClass = &ARM::GPRRegClass;
2406 unsigned FirstRegEnc = -1;
2409 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i) {
2414 TRI->getEncodingValue(MO.
getReg()) < FirstRegEnc)
2415 FirstRegEnc =
TRI->getEncodingValue(MO.
getReg());
2418 const MCPhysReg *CSRegs =
TRI->getCalleeSavedRegs(&MF);
2421 for (
int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded;
2424 if (IsT1PushPop && CurRegEnc >
TRI->getEncodingValue(ARM::R7))
2431 false,
false,
true));
2441 MI->getParent()->computeRegisterLiveness(
TRI, CurReg,
MI) !=
2463 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i)
2464 MI->removeOperand(i);
2477 unsigned Opcode =
MI.getOpcode();
2483 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
2486 if (Opcode == ARM::ADDri) {
2487 Offset +=
MI.getOperand(FrameRegIdx+1).getImm();
2490 MI.setDesc(
TII.get(ARM::MOVr));
2491 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2492 MI.removeOperand(FrameRegIdx+1);
2498 MI.setDesc(
TII.get(ARM::SUBri));
2504 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2505 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(
Offset);
2513 unsigned ThisImmVal =
Offset & llvm::rotr<uint32_t>(0xFF, RotAmt);
2520 "Bit extraction didn't work?");
2521 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
2523 unsigned ImmIdx = 0;
2525 unsigned NumBits = 0;
2529 ImmIdx = FrameRegIdx + 1;
2530 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2534 ImmIdx = FrameRegIdx+2;
2541 ImmIdx = FrameRegIdx+2;
2552 ImmIdx = FrameRegIdx+1;
2560 ImmIdx = FrameRegIdx+1;
2570 ImmIdx = FrameRegIdx+1;
2571 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2580 Offset += InstrOffs * Scale;
2581 assert((
Offset & (Scale-1)) == 0 &&
"Can't encode this offset!");
2591 int ImmedOffset =
Offset / Scale;
2592 unsigned Mask = (1 << NumBits) - 1;
2593 if ((
unsigned)
Offset <= Mask * Scale) {
2595 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2601 ImmedOffset = -ImmedOffset;
2603 ImmedOffset |= 1 << NumBits;
2611 ImmedOffset = ImmedOffset & Mask;
2614 ImmedOffset = -ImmedOffset;
2616 ImmedOffset |= 1 << NumBits;
2632 Register &SrcReg2, int64_t &CmpMask,
2633 int64_t &CmpValue)
const {
2634 switch (
MI.getOpcode()) {
2639 SrcReg =
MI.getOperand(0).getReg();
2642 CmpValue =
MI.getOperand(1).getImm();
2647 SrcReg =
MI.getOperand(0).getReg();
2648 SrcReg2 =
MI.getOperand(1).getReg();
2654 SrcReg =
MI.getOperand(0).getReg();
2656 CmpMask =
MI.getOperand(1).getImm();
2669 int CmpMask,
bool CommonUse) {
2670 switch (
MI->getOpcode()) {
2673 if (CmpMask !=
MI->getOperand(2).getImm())
2675 if (SrcReg ==
MI->getOperand(CommonUse ? 1 : 0).getReg())
2765 switch (
MI->getOpcode()) {
2766 default:
return false;
2862 if (!
MI)
return false;
2865 if (CmpMask != ~0) {
2869 UI =
MRI->use_instr_begin(SrcReg), UE =
MRI->use_instr_end();
2871 if (UI->getParent() != CmpInstr.
getParent())
2880 if (!
MI)
return false;
2889 if (
I ==
B)
return false;
2900 else if (
MI->getParent() != CmpInstr.
getParent() || CmpValue != 0) {
2905 if (CmpInstr.
getOpcode() == ARM::CMPri ||
2913 bool IsThumb1 =
false;
2930 if (
MI && IsThumb1) {
2932 if (
I != E && !
MI->readsRegister(ARM::CPSR,
TRI)) {
2933 bool CanReorder =
true;
2934 for (;
I != E; --
I) {
2935 if (
I->getOpcode() != ARM::tMOVi8) {
2941 MI =
MI->removeFromParent();
2952 bool SubAddIsThumb1 =
false;
2967 if (Instr.modifiesRegister(ARM::CPSR,
TRI) ||
2968 Instr.readsRegister(ARM::CPSR,
TRI))
2990 IsThumb1 = SubAddIsThumb1;
3005 bool isSafe =
false;
3008 while (!isSafe && ++
I != E) {
3010 for (
unsigned IO = 0, EO = Instr.getNumOperands();
3011 !isSafe && IO != EO; ++IO) {
3025 bool IsInstrVSel =
true;
3026 switch (Instr.getOpcode()) {
3028 IsInstrVSel =
false;
3062 bool IsSub =
Opc == ARM::SUBrr ||
Opc == ARM::t2SUBrr ||
3063 Opc == ARM::SUBri ||
Opc == ARM::t2SUBri ||
3064 Opc == ARM::tSUBrr ||
Opc == ARM::tSUBi3 ||
3066 unsigned OpI =
Opc != ARM::tSUBrr ? 1 : 2;
3078 std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
3112 if (Succ->isLiveIn(ARM::CPSR))
3119 unsigned CPSRRegNum =
MI->getNumExplicitOperands() - 1;
3120 MI->getOperand(CPSRRegNum).setReg(ARM::CPSR);
3121 MI->getOperand(CPSRRegNum).setIsDef(
true);
3129 for (
auto &[MO,
Cond] : OperandsToUpdate)
3132 MI->clearRegisterDeads(ARM::CPSR);
3146 int64_t CmpMask, CmpValue;
3148 if (Next !=
MI.getParent()->end() &&
3159 unsigned DefOpc =
DefMI.getOpcode();
3160 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm &&
3161 DefOpc != ARM::tMOVi32imm)
3163 if (!
DefMI.getOperand(1).isImm())
3167 if (!
MRI->hasOneNonDBGUse(Reg))
3183 if (
UseMI.getOperand(NumOps - 1).
getReg() == ARM::CPSR)
3189 unsigned UseOpc =
UseMI.getOpcode();
3190 unsigned NewUseOpc = 0;
3192 uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
3193 bool Commute =
false;
3195 default:
return false;
3203 case ARM::t2EORrr: {
3209 if (UseOpc == ARM::SUBrr && Commute)
3215 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
3218 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
3232 case ARM::ORRrr: NewUseOpc = ARM::ORRri;
break;
3233 case ARM::EORrr: NewUseOpc = ARM::EORri;
break;
3237 case ARM::t2SUBrr: {
3238 if (UseOpc == ARM::t2SUBrr && Commute)
3243 const bool ToSP =
DefMI.getOperand(0).
getReg() == ARM::SP;
3244 const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
3245 const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
3247 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB;
3250 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD;
3265 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri;
break;
3266 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri;
break;
3273 unsigned OpIdx = Commute ? 2 : 1;
3275 bool isKill =
UseMI.getOperand(
OpIdx).isKill();
3277 Register NewReg =
MRI->createVirtualRegister(TRC);
3285 UseMI.getOperand(1).setReg(NewReg);
3286 UseMI.getOperand(1).setIsKill();
3287 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
3288 DefMI.eraseFromParent();
3295 case ARM::t2ADDspImm:
3296 case ARM::t2SUBspImm:
3306 switch (
MI.getOpcode()) {
3310 assert(UOps >= 0 &&
"bad # UOps");
3318 unsigned ShOpVal =
MI.getOperand(3).getImm();
3323 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3331 if (!
MI.getOperand(2).getReg())
3334 unsigned ShOpVal =
MI.getOperand(3).getImm();
3339 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3349 case ARM::LDRSB_POST:
3350 case ARM::LDRSH_POST: {
3353 return (Rt == Rm) ? 4 : 3;
3356 case ARM::LDR_PRE_REG:
3357 case ARM::LDRB_PRE_REG: {
3362 unsigned ShOpVal =
MI.getOperand(4).getImm();
3367 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3373 case ARM::STR_PRE_REG:
3374 case ARM::STRB_PRE_REG: {
3375 unsigned ShOpVal =
MI.getOperand(4).getImm();
3380 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3387 case ARM::STRH_PRE: {
3397 case ARM::LDR_POST_REG:
3398 case ARM::LDRB_POST_REG:
3399 case ARM::LDRH_POST: {
3402 return (Rt == Rm) ? 3 : 2;
3405 case ARM::LDR_PRE_IMM:
3406 case ARM::LDRB_PRE_IMM:
3407 case ARM::LDR_POST_IMM:
3408 case ARM::LDRB_POST_IMM:
3409 case ARM::STRB_POST_IMM:
3410 case ARM::STRB_POST_REG:
3411 case ARM::STRB_PRE_IMM:
3412 case ARM::STRH_POST:
3413 case ARM::STR_POST_IMM:
3414 case ARM::STR_POST_REG:
3415 case ARM::STR_PRE_IMM:
3418 case ARM::LDRSB_PRE:
3419 case ARM::LDRSH_PRE: {
3426 unsigned ShOpVal =
MI.getOperand(4).getImm();
3431 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3444 return (Rt == Rn) ? 3 : 2;
3455 case ARM::LDRD_POST:
3456 case ARM::t2LDRD_POST:
3459 case ARM::STRD_POST:
3460 case ARM::t2STRD_POST:
3463 case ARM::LDRD_PRE: {
3470 return (Rt == Rn) ? 4 : 3;
3473 case ARM::t2LDRD_PRE: {
3476 return (Rt == Rn) ? 4 : 3;
3479 case ARM::STRD_PRE: {
3487 case ARM::t2STRD_PRE:
3490 case ARM::t2LDR_POST:
3491 case ARM::t2LDRB_POST:
3492 case ARM::t2LDRB_PRE:
3493 case ARM::t2LDRSBi12:
3494 case ARM::t2LDRSBi8:
3495 case ARM::t2LDRSBpci:
3497 case ARM::t2LDRH_POST:
3498 case ARM::t2LDRH_PRE:
3500 case ARM::t2LDRSB_POST:
3501 case ARM::t2LDRSB_PRE:
3502 case ARM::t2LDRSH_POST:
3503 case ARM::t2LDRSH_PRE:
3504 case ARM::t2LDRSHi12:
3505 case ARM::t2LDRSHi8:
3506 case ARM::t2LDRSHpci:
3510 case ARM::t2LDRDi8: {
3513 return (Rt == Rn) ? 3 : 2;
3516 case ARM::t2STRB_POST:
3517 case ARM::t2STRB_PRE:
3520 case ARM::t2STRH_POST:
3521 case ARM::t2STRH_PRE:
3523 case ARM::t2STR_POST:
3524 case ARM::t2STR_PRE:
3555 E =
MI.memoperands_end();
3557 Size += (*I)->getSize().getValue();
3564 return std::min(
Size / 4, 16U);
3569 unsigned UOps = 1 + NumRegs;
3573 case ARM::VLDMDIA_UPD:
3574 case ARM::VLDMDDB_UPD:
3575 case ARM::VLDMSIA_UPD:
3576 case ARM::VLDMSDB_UPD:
3577 case ARM::VSTMDIA_UPD:
3578 case ARM::VSTMDDB_UPD:
3579 case ARM::VSTMSIA_UPD:
3580 case ARM::VSTMSDB_UPD:
3581 case ARM::LDMIA_UPD:
3582 case ARM::LDMDA_UPD:
3583 case ARM::LDMDB_UPD:
3584 case ARM::LDMIB_UPD:
3585 case ARM::STMIA_UPD:
3586 case ARM::STMDA_UPD:
3587 case ARM::STMDB_UPD:
3588 case ARM::STMIB_UPD:
3589 case ARM::tLDMIA_UPD:
3590 case ARM::tSTMIA_UPD:
3591 case ARM::t2LDMIA_UPD:
3592 case ARM::t2LDMDB_UPD:
3593 case ARM::t2STMIA_UPD:
3594 case ARM::t2STMDB_UPD:
3597 case ARM::LDMIA_RET:
3599 case ARM::t2LDMIA_RET:
3608 if (!ItinData || ItinData->
isEmpty())
3612 unsigned Class =
Desc.getSchedClass();
3614 if (ItinUOps >= 0) {
3621 unsigned Opc =
MI.getOpcode();
3640 case ARM::VLDMDIA_UPD:
3641 case ARM::VLDMDDB_UPD:
3643 case ARM::VLDMSIA_UPD:
3644 case ARM::VLDMSDB_UPD:
3646 case ARM::VSTMDIA_UPD:
3647 case ARM::VSTMDDB_UPD:
3649 case ARM::VSTMSIA_UPD:
3650 case ARM::VSTMSDB_UPD: {
3651 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands();
3652 return (NumRegs / 2) + (NumRegs % 2) + 1;
3655 case ARM::LDMIA_RET:
3660 case ARM::LDMIA_UPD:
3661 case ARM::LDMDA_UPD:
3662 case ARM::LDMDB_UPD:
3663 case ARM::LDMIB_UPD:
3668 case ARM::STMIA_UPD:
3669 case ARM::STMDA_UPD:
3670 case ARM::STMDB_UPD:
3671 case ARM::STMIB_UPD:
3673 case ARM::tLDMIA_UPD:
3674 case ARM::tSTMIA_UPD:
3678 case ARM::t2LDMIA_RET:
3681 case ARM::t2LDMIA_UPD:
3682 case ARM::t2LDMDB_UPD:
3685 case ARM::t2STMIA_UPD:
3686 case ARM::t2STMDB_UPD: {
3687 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands() + 1;
3699 unsigned UOps = (NumRegs / 2);
3705 unsigned UOps = (NumRegs / 2);
3708 if ((NumRegs % 2) || !
MI.hasOneMemOperand() ||
3719std::optional<unsigned>
3722 unsigned DefIdx,
unsigned DefAlign)
const {
3731 DefCycle = RegNo / 2 + 1;
3736 bool isSLoad =
false;
3741 case ARM::VLDMSIA_UPD:
3742 case ARM::VLDMSDB_UPD:
3749 if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
3753 DefCycle = RegNo + 2;
3759std::optional<unsigned>
3762 unsigned DefIdx,
unsigned DefAlign)
const {
3772 DefCycle = RegNo / 2;
3778 DefCycle = (RegNo / 2);
3781 if ((RegNo % 2) || DefAlign < 8)
3787 DefCycle = RegNo + 2;
3793std::optional<unsigned>
3796 unsigned UseIdx,
unsigned UseAlign)
const {
3804 UseCycle = RegNo / 2 + 1;
3809 bool isSStore =
false;
3814 case ARM::VSTMSIA_UPD:
3815 case ARM::VSTMSDB_UPD:
3822 if ((isSStore && (RegNo % 2)) || UseAlign < 8)
3826 UseCycle = RegNo + 2;
3832std::optional<unsigned>
3835 unsigned UseIdx,
unsigned UseAlign)
const {
3842 UseCycle = RegNo / 2;
3848 UseCycle = (RegNo / 2);
3851 if ((RegNo % 2) || UseAlign < 8)
3862 unsigned DefIdx,
unsigned DefAlign,
const MCInstrDesc &UseMCID,
3863 unsigned UseIdx,
unsigned UseAlign)
const {
3873 std::optional<unsigned> DefCycle;
3874 bool LdmBypass =
false;
3881 case ARM::VLDMDIA_UPD:
3882 case ARM::VLDMDDB_UPD:
3884 case ARM::VLDMSIA_UPD:
3885 case ARM::VLDMSDB_UPD:
3886 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
3889 case ARM::LDMIA_RET:
3894 case ARM::LDMIA_UPD:
3895 case ARM::LDMDA_UPD:
3896 case ARM::LDMDB_UPD:
3897 case ARM::LDMIB_UPD:
3899 case ARM::tLDMIA_UPD:
3901 case ARM::t2LDMIA_RET:
3904 case ARM::t2LDMIA_UPD:
3905 case ARM::t2LDMDB_UPD:
3907 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
3915 std::optional<unsigned> UseCycle;
3922 case ARM::VSTMDIA_UPD:
3923 case ARM::VSTMDDB_UPD:
3925 case ARM::VSTMSIA_UPD:
3926 case ARM::VSTMSDB_UPD:
3927 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
3934 case ARM::STMIA_UPD:
3935 case ARM::STMDA_UPD:
3936 case ARM::STMDB_UPD:
3937 case ARM::STMIB_UPD:
3938 case ARM::tSTMIA_UPD:
3943 case ARM::t2STMIA_UPD:
3944 case ARM::t2STMDB_UPD:
3945 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
3953 if (UseCycle > *DefCycle + 1)
3954 return std::nullopt;
3956 UseCycle = *DefCycle - *UseCycle + 1;
3957 if (UseCycle > 0u) {
3963 UseCycle = *UseCycle - 1;
3965 UseClass, UseIdx)) {
3966 UseCycle = *UseCycle - 1;
3975 unsigned &DefIdx,
unsigned &Dist) {
3980 assert(
II->isInsideBundle() &&
"Empty bundle?");
3983 while (
II->isInsideBundle()) {
3984 Idx =
II->findRegisterDefOperandIdx(Reg,
TRI,
false,
true);
3991 assert(
Idx != -1 &&
"Cannot find bundled definition!");
3998 unsigned &UseIdx,
unsigned &Dist) {
4002 assert(
II->isInsideBundle() &&
"Empty bundle?");
4007 while (
II != E &&
II->isInsideBundle()) {
4008 Idx =
II->findRegisterUseOperandIdx(Reg,
TRI,
false);
4011 if (
II->getOpcode() != ARM::t2IT)
4039 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4049 case ARM::t2LDRSHs: {
4051 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4052 if (ShAmt == 0 || ShAmt == 2)
4057 }
else if (Subtarget.
isSwift()) {
4064 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4069 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4080 case ARM::t2LDRSHs: {
4082 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4083 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
4090 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
4097 case ARM::VLD1q8wb_fixed:
4098 case ARM::VLD1q16wb_fixed:
4099 case ARM::VLD1q32wb_fixed:
4100 case ARM::VLD1q64wb_fixed:
4101 case ARM::VLD1q8wb_register:
4102 case ARM::VLD1q16wb_register:
4103 case ARM::VLD1q32wb_register:
4104 case ARM::VLD1q64wb_register:
4111 case ARM::VLD2d8wb_fixed:
4112 case ARM::VLD2d16wb_fixed:
4113 case ARM::VLD2d32wb_fixed:
4114 case ARM::VLD2q8wb_fixed:
4115 case ARM::VLD2q16wb_fixed:
4116 case ARM::VLD2q32wb_fixed:
4117 case ARM::VLD2d8wb_register:
4118 case ARM::VLD2d16wb_register:
4119 case ARM::VLD2d32wb_register:
4120 case ARM::VLD2q8wb_register:
4121 case ARM::VLD2q16wb_register:
4122 case ARM::VLD2q32wb_register:
4127 case ARM::VLD3d8_UPD:
4128 case ARM::VLD3d16_UPD:
4129 case ARM::VLD3d32_UPD:
4130 case ARM::VLD1d64Twb_fixed:
4131 case ARM::VLD1d64Twb_register:
4132 case ARM::VLD3q8_UPD:
4133 case ARM::VLD3q16_UPD:
4134 case ARM::VLD3q32_UPD:
4139 case ARM::VLD4d8_UPD:
4140 case ARM::VLD4d16_UPD:
4141 case ARM::VLD4d32_UPD:
4142 case ARM::VLD1d64Qwb_fixed:
4143 case ARM::VLD1d64Qwb_register:
4144 case ARM::VLD4q8_UPD:
4145 case ARM::VLD4q16_UPD:
4146 case ARM::VLD4q32_UPD:
4147 case ARM::VLD1DUPq8:
4148 case ARM::VLD1DUPq16:
4149 case ARM::VLD1DUPq32:
4150 case ARM::VLD1DUPq8wb_fixed:
4151 case ARM::VLD1DUPq16wb_fixed:
4152 case ARM::VLD1DUPq32wb_fixed:
4153 case ARM::VLD1DUPq8wb_register:
4154 case ARM::VLD1DUPq16wb_register:
4155 case ARM::VLD1DUPq32wb_register:
4156 case ARM::VLD2DUPd8:
4157 case ARM::VLD2DUPd16:
4158 case ARM::VLD2DUPd32:
4159 case ARM::VLD2DUPd8wb_fixed:
4160 case ARM::VLD2DUPd16wb_fixed:
4161 case ARM::VLD2DUPd32wb_fixed:
4162 case ARM::VLD2DUPd8wb_register:
4163 case ARM::VLD2DUPd16wb_register:
4164 case ARM::VLD2DUPd32wb_register:
4165 case ARM::VLD4DUPd8:
4166 case ARM::VLD4DUPd16:
4167 case ARM::VLD4DUPd32:
4168 case ARM::VLD4DUPd8_UPD:
4169 case ARM::VLD4DUPd16_UPD:
4170 case ARM::VLD4DUPd32_UPD:
4172 case ARM::VLD1LNd16:
4173 case ARM::VLD1LNd32:
4174 case ARM::VLD1LNd8_UPD:
4175 case ARM::VLD1LNd16_UPD:
4176 case ARM::VLD1LNd32_UPD:
4178 case ARM::VLD2LNd16:
4179 case ARM::VLD2LNd32:
4180 case ARM::VLD2LNq16:
4181 case ARM::VLD2LNq32:
4182 case ARM::VLD2LNd8_UPD:
4183 case ARM::VLD2LNd16_UPD:
4184 case ARM::VLD2LNd32_UPD:
4185 case ARM::VLD2LNq16_UPD:
4186 case ARM::VLD2LNq32_UPD:
4188 case ARM::VLD4LNd16:
4189 case ARM::VLD4LNd32:
4190 case ARM::VLD4LNq16:
4191 case ARM::VLD4LNq32:
4192 case ARM::VLD4LNd8_UPD:
4193 case ARM::VLD4LNd16_UPD:
4194 case ARM::VLD4LNd32_UPD:
4195 case ARM::VLD4LNq16_UPD:
4196 case ARM::VLD4LNq32_UPD:
4210 if (!ItinData || ItinData->
isEmpty())
4211 return std::nullopt;
4217 unsigned DefAdj = 0;
4218 if (
DefMI.isBundle())
4227 unsigned UseAdj = 0;
4228 if (
UseMI.isBundle()) {
4232 return std::nullopt;
4235 return getOperandLatencyImpl(
4236 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->
getDesc(), DefAdj, DefMO,
4237 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->
getDesc(), UseAdj);
4240std::optional<unsigned> ARMBaseInstrInfo::getOperandLatencyImpl(
4242 unsigned DefIdx,
const MCInstrDesc &DefMCID,
unsigned DefAdj,
4244 unsigned UseIdx,
const MCInstrDesc &UseMCID,
unsigned UseAdj)
const {
4245 if (Reg == ARM::CPSR) {
4246 if (
DefMI.getOpcode() == ARM::FMSTAT) {
4248 return Subtarget.
isLikeA9() ? 1 : 20;
4252 if (
UseMI.isBranch())
4271 return std::nullopt;
4273 unsigned DefAlign =
DefMI.hasOneMemOperand()
4276 unsigned UseAlign =
UseMI.hasOneMemOperand()
4282 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4285 return std::nullopt;
4288 int Adj = DefAdj + UseAdj;
4292 if (Adj >= 0 || (
int)*
Latency > -Adj) {
4299std::optional<unsigned>
4301 SDNode *DefNode,
unsigned DefIdx,
4302 SDNode *UseNode,
unsigned UseIdx)
const {
4308 if (isZeroCost(DefMCID.
Opcode))
4311 if (!ItinData || ItinData->
isEmpty())
4312 return DefMCID.
mayLoad() ? 3 : 1;
4315 std::optional<unsigned>
Latency =
4318 int Threshold = 1 + Adj;
4323 auto *DefMN = cast<MachineSDNode>(DefNode);
4324 unsigned DefAlign = !DefMN->memoperands_empty()
4325 ? (*DefMN->memoperands_begin())->
getAlign().value()
4327 auto *UseMN = cast<MachineSDNode>(UseNode);
4328 unsigned UseAlign = !UseMN->memoperands_empty()
4329 ? (*UseMN->memoperands_begin())->
getAlign().value()
4332 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4334 return std::nullopt;
4355 case ARM::t2LDRSHs: {
4358 if (ShAmt == 0 || ShAmt == 2)
4373 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4390 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
4397 case ARM::VLD1q8wb_register:
4398 case ARM::VLD1q16wb_register:
4399 case ARM::VLD1q32wb_register:
4400 case ARM::VLD1q64wb_register:
4401 case ARM::VLD1q8wb_fixed:
4402 case ARM::VLD1q16wb_fixed:
4403 case ARM::VLD1q32wb_fixed:
4404 case ARM::VLD1q64wb_fixed:
4408 case ARM::VLD2q8Pseudo:
4409 case ARM::VLD2q16Pseudo:
4410 case ARM::VLD2q32Pseudo:
4411 case ARM::VLD2d8wb_fixed:
4412 case ARM::VLD2d16wb_fixed:
4413 case ARM::VLD2d32wb_fixed:
4414 case ARM::VLD2q8PseudoWB_fixed:
4415 case ARM::VLD2q16PseudoWB_fixed:
4416 case ARM::VLD2q32PseudoWB_fixed:
4417 case ARM::VLD2d8wb_register:
4418 case ARM::VLD2d16wb_register:
4419 case ARM::VLD2d32wb_register:
4420 case ARM::VLD2q8PseudoWB_register:
4421 case ARM::VLD2q16PseudoWB_register:
4422 case ARM::VLD2q32PseudoWB_register:
4423 case ARM::VLD3d8Pseudo:
4424 case ARM::VLD3d16Pseudo:
4425 case ARM::VLD3d32Pseudo:
4426 case ARM::VLD1d8TPseudo:
4427 case ARM::VLD1d16TPseudo:
4428 case ARM::VLD1d32TPseudo:
4429 case ARM::VLD1d64TPseudo:
4430 case ARM::VLD1d64TPseudoWB_fixed:
4431 case ARM::VLD1d64TPseudoWB_register:
4432 case ARM::VLD3d8Pseudo_UPD:
4433 case ARM::VLD3d16Pseudo_UPD:
4434 case ARM::VLD3d32Pseudo_UPD:
4435 case ARM::VLD3q8Pseudo_UPD:
4436 case ARM::VLD3q16Pseudo_UPD:
4437 case ARM::VLD3q32Pseudo_UPD:
4438 case ARM::VLD3q8oddPseudo:
4439 case ARM::VLD3q16oddPseudo:
4440 case ARM::VLD3q32oddPseudo:
4441 case ARM::VLD3q8oddPseudo_UPD:
4442 case ARM::VLD3q16oddPseudo_UPD:
4443 case ARM::VLD3q32oddPseudo_UPD:
4444 case ARM::VLD4d8Pseudo:
4445 case ARM::VLD4d16Pseudo:
4446 case ARM::VLD4d32Pseudo:
4447 case ARM::VLD1d8QPseudo:
4448 case ARM::VLD1d16QPseudo:
4449 case ARM::VLD1d32QPseudo:
4450 case ARM::VLD1d64QPseudo:
4451 case ARM::VLD1d64QPseudoWB_fixed:
4452 case ARM::VLD1d64QPseudoWB_register:
4453 case ARM::VLD1q8HighQPseudo:
4454 case ARM::VLD1q8LowQPseudo_UPD:
4455 case ARM::VLD1q8HighTPseudo:
4456 case ARM::VLD1q8LowTPseudo_UPD:
4457 case ARM::VLD1q16HighQPseudo:
4458 case ARM::VLD1q16LowQPseudo_UPD:
4459 case ARM::VLD1q16HighTPseudo:
4460 case ARM::VLD1q16LowTPseudo_UPD:
4461 case ARM::VLD1q32HighQPseudo:
4462 case ARM::VLD1q32LowQPseudo_UPD:
4463 case ARM::VLD1q32HighTPseudo:
4464 case ARM::VLD1q32LowTPseudo_UPD:
4465 case ARM::VLD1q64HighQPseudo:
4466 case ARM::VLD1q64LowQPseudo_UPD:
4467 case ARM::VLD1q64HighTPseudo:
4468 case ARM::VLD1q64LowTPseudo_UPD:
4469 case ARM::VLD4d8Pseudo_UPD:
4470 case ARM::VLD4d16Pseudo_UPD:
4471 case ARM::VLD4d32Pseudo_UPD:
4472 case ARM::VLD4q8Pseudo_UPD:
4473 case ARM::VLD4q16Pseudo_UPD:
4474 case ARM::VLD4q32Pseudo_UPD:
4475 case ARM::VLD4q8oddPseudo:
4476 case ARM::VLD4q16oddPseudo:
4477 case ARM::VLD4q32oddPseudo:
4478 case ARM::VLD4q8oddPseudo_UPD:
4479 case ARM::VLD4q16oddPseudo_UPD:
4480 case ARM::VLD4q32oddPseudo_UPD:
4481 case ARM::VLD1DUPq8:
4482 case ARM::VLD1DUPq16:
4483 case ARM::VLD1DUPq32:
4484 case ARM::VLD1DUPq8wb_fixed:
4485 case ARM::VLD1DUPq16wb_fixed:
4486 case ARM::VLD1DUPq32wb_fixed:
4487 case ARM::VLD1DUPq8wb_register:
4488 case ARM::VLD1DUPq16wb_register:
4489 case ARM::VLD1DUPq32wb_register:
4490 case ARM::VLD2DUPd8:
4491 case ARM::VLD2DUPd16:
4492 case ARM::VLD2DUPd32:
4493 case ARM::VLD2DUPd8wb_fixed:
4494 case ARM::VLD2DUPd16wb_fixed:
4495 case ARM::VLD2DUPd32wb_fixed:
4496 case ARM::VLD2DUPd8wb_register:
4497 case ARM::VLD2DUPd16wb_register:
4498 case ARM::VLD2DUPd32wb_register:
4499 case ARM::VLD2DUPq8EvenPseudo:
4500 case ARM::VLD2DUPq8OddPseudo:
4501 case ARM::VLD2DUPq16EvenPseudo:
4502 case ARM::VLD2DUPq16OddPseudo:
4503 case ARM::VLD2DUPq32EvenPseudo:
4504 case ARM::VLD2DUPq32OddPseudo:
4505 case ARM::VLD3DUPq8EvenPseudo:
4506 case ARM::VLD3DUPq8OddPseudo:
4507 case ARM::VLD3DUPq16EvenPseudo:
4508 case ARM::VLD3DUPq16OddPseudo:
4509 case ARM::VLD3DUPq32EvenPseudo:
4510 case ARM::VLD3DUPq32OddPseudo:
4511 case ARM::VLD4DUPd8Pseudo:
4512 case ARM::VLD4DUPd16Pseudo:
4513 case ARM::VLD4DUPd32Pseudo:
4514 case ARM::VLD4DUPd8Pseudo_UPD:
4515 case ARM::VLD4DUPd16Pseudo_UPD:
4516 case ARM::VLD4DUPd32Pseudo_UPD:
4517 case ARM::VLD4DUPq8EvenPseudo:
4518 case ARM::VLD4DUPq8OddPseudo:
4519 case ARM::VLD4DUPq16EvenPseudo:
4520 case ARM::VLD4DUPq16OddPseudo:
4521 case ARM::VLD4DUPq32EvenPseudo:
4522 case ARM::VLD4DUPq32OddPseudo:
4523 case ARM::VLD1LNq8Pseudo:
4524 case ARM::VLD1LNq16Pseudo:
4525 case ARM::VLD1LNq32Pseudo:
4526 case ARM::VLD1LNq8Pseudo_UPD:
4527 case ARM::VLD1LNq16Pseudo_UPD:
4528 case ARM::VLD1LNq32Pseudo_UPD:
4529 case ARM::VLD2LNd8Pseudo:
4530 case ARM::VLD2LNd16Pseudo:
4531 case ARM::VLD2LNd32Pseudo:
4532 case ARM::VLD2LNq16Pseudo:
4533 case ARM::VLD2LNq32Pseudo:
4534 case ARM::VLD2LNd8Pseudo_UPD:
4535 case ARM::VLD2LNd16Pseudo_UPD:
4536 case ARM::VLD2LNd32Pseudo_UPD:
4537 case ARM::VLD2LNq16Pseudo_UPD:
4538 case ARM::VLD2LNq32Pseudo_UPD:
4539 case ARM::VLD4LNd8Pseudo:
4540 case ARM::VLD4LNd16Pseudo:
4541 case ARM::VLD4LNd32Pseudo:
4542 case ARM::VLD4LNq16Pseudo:
4543 case ARM::VLD4LNq32Pseudo:
4544 case ARM::VLD4LNd8Pseudo_UPD:
4545 case ARM::VLD4LNd16Pseudo_UPD:
4546 case ARM::VLD4LNd32Pseudo_UPD:
4547 case ARM::VLD4LNq16Pseudo_UPD:
4548 case ARM::VLD4LNq32Pseudo_UPD:
4558unsigned ARMBaseInstrInfo::getPredicationCost(
const MachineInstr &
MI)
const {
4559 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4569 !Subtarget.cheapPredicableCPSRDef())) {
4579 unsigned *PredCost)
const {
4580 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4586 if (
MI.isBundle()) {
4590 while (++
I != E &&
I->isInsideBundle()) {
4591 if (
I->getOpcode() != ARM::t2IT)
4592 Latency += getInstrLatency(ItinData, *
I, PredCost);
4599 !Subtarget.cheapPredicableCPSRDef()))) {
4607 return MI.mayLoad() ? 3 : 1;
4620 MI.hasOneMemOperand() ? (*
MI.memoperands_begin())->
getAlign().value() : 0;
4622 if (Adj >= 0 || (
int)
Latency > -Adj) {
4630 if (!
Node->isMachineOpcode())
4633 if (!ItinData || ItinData->
isEmpty())
4636 unsigned Opcode =
Node->getMachineOpcode();
4646bool ARMBaseInstrInfo::hasHighOperandLatency(
const TargetSchedModel &SchedModel,
4651 unsigned UseIdx)
const {
4654 if (Subtarget.nonpipelinedVFP() &&
4669 unsigned DefIdx)
const {
4671 if (!ItinData || ItinData->
isEmpty())
4676 unsigned DefClass =
DefMI.getDesc().getSchedClass();
4677 std::optional<unsigned> DefCycle =
4679 return DefCycle && DefCycle <= 2U;
4687 ErrInfo =
"Pseudo flag setting opcodes only exist in Selection DAG";
4690 if (
MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) {
4692 if (!ARM::hGPRRegClass.
contains(
MI.getOperand(0).getReg()) &&
4693 !ARM::hGPRRegClass.contains(
MI.getOperand(1).getReg())) {
4694 ErrInfo =
"Non-flag-setting Thumb1 mov is v6-only";
4698 if (
MI.getOpcode() == ARM::tPUSH ||
4699 MI.getOpcode() == ARM::tPOP ||
4700 MI.getOpcode() == ARM::tPOP_RET) {
4702 if (MO.isImplicit() || !MO.isReg())
4705 if (Reg < ARM::R0 || Reg > ARM::R7) {
4706 if (!(
MI.getOpcode() == ARM::tPUSH && Reg == ARM::LR) &&
4707 !(
MI.getOpcode() == ARM::tPOP_RET && Reg == ARM::PC)) {
4708 ErrInfo =
"Unsupported register in Thumb1 push/pop";
4714 if (
MI.getOpcode() == ARM::MVE_VMOV_q_rr) {
4715 assert(
MI.getOperand(4).isImm() &&
MI.getOperand(5).isImm());
4716 if ((
MI.getOperand(4).getImm() != 2 &&
MI.getOperand(4).getImm() != 3) ||
4717 MI.getOperand(4).getImm() !=
MI.getOperand(5).getImm() + 2) {
4718 ErrInfo =
"Incorrect array index for MVE_VMOV_q_rr";
4739 for (
auto Op :
MI.operands()) {
4746 ErrInfo =
"Incorrect AddrMode Imm for instruction";
4756 unsigned LoadImmOpc,
4757 unsigned LoadOpc)
const {
4759 "ROPI/RWPI not currently supported with stack guard");
4767 if (LoadImmOpc == ARM::MRC || LoadImmOpc == ARM::t2MRC) {
4769 "TLS stack protector requires hardware TLS register");
4780 Offset = M.getStackProtectorGuardOffset();
4785 unsigned AddOpc = (LoadImmOpc == ARM::MRC) ? ARM::ADDri : ARM::t2ADDri;
4795 cast<GlobalValue>((*
MI->memoperands_begin())->getValue());
4804 else if (IsIndirect)
4806 }
else if (IsIndirect) {
4810 if (LoadImmOpc == ARM::tMOVi32imm) {
4813 ARMSysReg::lookupMClassSysRegByName(
"apsr_nzcvq")->Encoding;
4849 unsigned &AddSubOpc,
4850 bool &NegAcc,
bool &HasLane)
const {
4852 if (
I == MLxEntryMap.
end())
4856 MulOpc = Entry.MulOpc;
4857 AddSubOpc = Entry.AddSubOpc;
4858 NegAcc = Entry.NegAcc;
4859 HasLane = Entry.HasLane;
4883std::pair<uint16_t, uint16_t>
4887 if (Subtarget.hasNEON()) {
4896 (
MI.getOpcode() == ARM::VMOVRS ||
MI.getOpcode() == ARM::VMOVSR ||
4897 MI.getOpcode() == ARM::VMOVS))
4904 return std::make_pair(
ExeNEON, 0);
4909 return std::make_pair(
ExeNEON, 0);
4912 return std::make_pair(
ExeVFP, 0);
4918 unsigned SReg,
unsigned &Lane) {
4920 TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
4927 DReg =
TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
4929 assert(DReg &&
"S-register with no D super-register?");
4954 if (
MI.definesRegister(DReg,
TRI) ||
MI.readsRegister(DReg,
TRI)) {
4960 ImplicitSReg =
TRI->getSubReg(DReg,
4961 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
4963 MI.getParent()->computeRegisterLiveness(
TRI, ImplicitSReg,
MI);
4978 unsigned DstReg, SrcReg;
4983 switch (
MI.getOpcode()) {
4995 assert(Subtarget.hasNEON() &&
"VORRd requires NEON");
4998 DstReg =
MI.getOperand(0).getReg();
4999 SrcReg =
MI.getOperand(1).getReg();
5001 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5002 MI.removeOperand(i - 1);
5005 MI.setDesc(
get(ARM::VORRd));
5017 DstReg =
MI.getOperand(0).getReg();
5018 SrcReg =
MI.getOperand(1).getReg();
5020 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5021 MI.removeOperand(i - 1);
5028 MI.setDesc(
get(ARM::VGETLNi32));
5044 DstReg =
MI.getOperand(0).getReg();
5045 SrcReg =
MI.getOperand(1).getReg();
5053 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5054 MI.removeOperand(i - 1);
5058 MI.setDesc(
get(ARM::VSETLNi32));
5077 DstReg =
MI.getOperand(0).getReg();
5078 SrcReg =
MI.getOperand(1).getReg();
5080 unsigned DstLane = 0, SrcLane = 0;
5089 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5090 MI.removeOperand(i - 1);
5095 MI.setDesc(
get(ARM::VDUPLN32d));
5129 MCRegister CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
5130 bool CurUndef = !
MI.readsRegister(CurReg,
TRI);
5133 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
5134 CurUndef = !
MI.readsRegister(CurReg,
TRI);
5139 if (SrcLane == DstLane)
5142 MI.setDesc(
get(ARM::VEXTd32));
5147 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
5148 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5151 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
5152 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5157 if (SrcLane != DstLane)
5163 if (ImplicitSReg != 0)
5190 if (!PartialUpdateClearance)
5201 switch (
MI.getOpcode()) {
5207 case ARM::VMOVv4i16:
5208 case ARM::VMOVv2i32:
5209 case ARM::VMOVv2f32:
5210 case ARM::VMOVv1i64:
5211 UseOp =
MI.findRegisterUseOperandIdx(Reg,
TRI,
false);
5215 case ARM::VLD1LNd32:
5224 if (UseOp != -1 &&
MI.getOperand(UseOp).readsReg())
5228 if (Reg.isVirtual()) {
5230 if (!MO.
getSubReg() ||
MI.readsVirtualRegister(Reg))
5232 }
else if (ARM::SPRRegClass.
contains(Reg)) {
5235 TRI->getMatchingSuperReg(Reg, ARM::ssub_0, &ARM::DPRRegClass);
5236 if (!DReg || !
MI.definesRegister(DReg,
TRI))
5242 return PartialUpdateClearance;
5249 assert(OpNum <
MI.getDesc().getNumDefs() &&
"OpNum is not a def");
5254 assert(Reg.isPhysical() &&
"Can't break virtual register dependencies.");
5255 unsigned DReg = Reg;
5258 if (ARM::SPRRegClass.
contains(Reg)) {
5259 DReg = ARM::D0 + (Reg - ARM::S0) / 2;
5260 assert(
TRI->isSuperRegister(Reg, DReg) &&
"Register enums broken");
5263 assert(ARM::DPRRegClass.
contains(DReg) &&
"Can only break D-reg deps");
5264 assert(
MI.definesRegister(DReg,
TRI) &&
"MI doesn't clobber full D-reg");
5277 MI.addRegisterKilled(DReg,
TRI,
true);
5281 return Subtarget.hasFeature(ARM::HasV6KOps);
5285 if (
MI->getNumOperands() < 4)
5287 unsigned ShOpVal =
MI->getOperand(3).getImm();
5291 ((ShImm == 1 || ShImm == 2) &&
5301 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5302 assert(
MI.isRegSequenceLike() &&
"Invalid kind of instruction");
5304 switch (
MI.getOpcode()) {
5316 MOReg = &
MI.getOperand(2);
5328 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5329 assert(
MI.isExtractSubregLike() &&
"Invalid kind of instruction");
5331 switch (
MI.getOpcode()) {
5342 InputReg.
SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1;
5351 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5352 assert(
MI.isInsertSubregLike() &&
"Invalid kind of instruction");
5354 switch (
MI.getOpcode()) {
5355 case ARM::VSETLNi32:
5356 case ARM::MVE_VMOV_to_lane_32:
5364 BaseReg.Reg = MOBaseReg.
getReg();
5367 InsertedReg.
Reg = MOInsertedReg.
getReg();
5375std::pair<unsigned, unsigned>
5378 return std::make_pair(TF & Mask, TF & ~Mask);
5383 using namespace ARMII;
5385 static const std::pair<unsigned, const char *> TargetFlags[] = {
5386 {MO_LO16,
"arm-lo16"}, {MO_HI16,
"arm-hi16"},
5387 {MO_LO_0_7,
"arm-lo-0-7"}, {MO_HI_0_7,
"arm-hi-0-7"},
5388 {MO_LO_8_15,
"arm-lo-8-15"}, {MO_HI_8_15,
"arm-hi-8-15"},
5395 using namespace ARMII;
5397 static const std::pair<unsigned, const char *> TargetFlags[] = {
5398 {MO_COFFSTUB,
"arm-coffstub"},
5399 {MO_GOT,
"arm-got"},
5400 {MO_SBREL,
"arm-sbrel"},
5401 {MO_DLLIMPORT,
"arm-dllimport"},
5402 {MO_SECREL,
"arm-secrel"},
5403 {MO_NONLAZY,
"arm-nonlazy"}};
5407std::optional<RegImmPair>
5410 unsigned Opcode =
MI.getOpcode();
5417 return std::nullopt;
5420 if (Opcode == ARM::SUBri)
5422 else if (Opcode != ARM::ADDri)
5423 return std::nullopt;
5428 if (!
MI.getOperand(1).isReg() || !
MI.getOperand(2).isImm())
5429 return std::nullopt;
5431 Offset =
MI.getOperand(2).getImm() * Sign;
5439 for (
auto I =
From;
I != To; ++
I)
5440 if (
I->modifiesRegister(Reg,
TRI))
5453 if (CmpMI->modifiesRegister(ARM::CPSR,
TRI))
5455 if (CmpMI->readsRegister(ARM::CPSR,
TRI))
5461 if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri)
5463 Register Reg = CmpMI->getOperand(0).getReg();
5466 if (Pred !=
ARMCC::AL || CmpMI->getOperand(1).getImm() != 0)
5479 if (Subtarget->isThumb()) {
5481 return ForCodesize ? 2 : 1;
5482 if (Subtarget->hasV6T2Ops() && (Val <= 0xffff ||
5485 return ForCodesize ? 4 : 1;
5487 return ForCodesize ? 4 : 2;
5489 return ForCodesize ? 4 : 2;
5491 return ForCodesize ? 4 : 2;
5494 return ForCodesize ? 4 : 1;
5496 return ForCodesize ? 4 : 1;
5497 if (Subtarget->hasV6T2Ops() && Val <= 0xffff)
5498 return ForCodesize ? 4 : 1;
5500 return ForCodesize ? 8 : 2;
5502 return ForCodesize ? 8 : 2;
5505 return ForCodesize ? 8 : 2;
5506 return ForCodesize ? 8 : 3;
5655 : CallTailCall(target.
isThumb() ? 4 : 4),
5656 FrameTailCall(target.
isThumb() ? 0 : 0),
5657 CallThunk(target.
isThumb() ? 4 : 4),
5658 FrameThunk(target.
isThumb() ? 0 : 0),
5659 CallNoLRSave(target.
isThumb() ? 4 : 4),
5660 FrameNoLRSave(target.
isThumb() ? 2 : 4),
5661 CallRegSave(target.
isThumb() ? 8 : 12),
5662 FrameRegSave(target.
isThumb() ? 2 : 4),
5663 CallDefault(target.
isThumb() ? 8 : 12),
5664 FrameDefault(target.
isThumb() ? 2 : 4),
5665 SaveRestoreLROnStack(target.
isThumb() ? 8 : 8) {}
5678 for (
Register Reg : ARM::rGPRRegClass) {
5679 if (!(Reg < regsReserved.
size() && regsReserved.
test(Reg)) &&
5682 C.isAvailableAcrossAndOutOfSeq(Reg,
TRI) &&
5683 C.isAvailableInsideSeq(Reg,
TRI))
5697 for (;
I != E; ++
I) {
5701 if (
MI.modifiesRegister(ARM::LR, &
TRI))
5705 unsigned Opcode =
MI.getOpcode();
5706 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR ||
5707 Opcode == ARM::SUBS_PC_LR || Opcode == ARM::tBX_RET ||
5708 Opcode == ARM::tBXNS_RET) {
5714 if (
MI.readsRegister(ARM::LR, &
TRI))
5720std::optional<std::unique_ptr<outliner::OutlinedFunction>>
5723 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
5724 unsigned MinRepeats)
const {
5725 unsigned SequenceSize = 0;
5726 for (
auto &
MI : RepeatedSequenceLocs[0])
5730 unsigned FlagsSetInAll = 0xF;
5735 FlagsSetInAll &=
C.Flags;
5754 return C.isAnyUnavailableAcrossOrOutOfSeq({ARM::R12, ARM::CPSR},
TRI);
5762 llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall);
5765 if (RepeatedSequenceLocs.size() < MinRepeats)
5766 return std::nullopt;
5785 if (std::distance(RepeatedSequenceLocs.begin(), NoBTI) >
5786 std::distance(NoBTI, RepeatedSequenceLocs.end()))
5787 RepeatedSequenceLocs.erase(NoBTI, RepeatedSequenceLocs.end());
5789 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoBTI);
5791 if (RepeatedSequenceLocs.size() < MinRepeats)
5792 return std::nullopt;
5802 if (std::distance(RepeatedSequenceLocs.begin(), NoPAC) >
5803 std::distance(NoPAC, RepeatedSequenceLocs.end()))
5804 RepeatedSequenceLocs.erase(NoPAC, RepeatedSequenceLocs.end());
5806 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoPAC);
5808 if (RepeatedSequenceLocs.size() < MinRepeats)
5809 return std::nullopt;
5814 unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back().getOpcode();
5817 auto SetCandidateCallInfo =
5818 [&RepeatedSequenceLocs](
unsigned CallID,
unsigned NumBytesForCall) {
5820 C.setCallInfo(CallID, NumBytesForCall);
5825 const auto &SomeMFI =
5828 if (SomeMFI.branchTargetEnforcement()) {
5837 if (SomeMFI.shouldSignReturnAddress(
true)) {
5847 if (RepeatedSequenceLocs[0].back().isTerminator()) {
5851 }
else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX ||
5852 LastInstrOpcode == ARM::BLX_noip || LastInstrOpcode == ARM::tBL ||
5853 LastInstrOpcode == ARM::tBLXr ||
5854 LastInstrOpcode == ARM::tBLXr_noip ||
5855 LastInstrOpcode == ARM::tBLXi) {
5863 unsigned NumBytesNoStackCalls = 0;
5864 std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
5869 const auto Last =
C.getMBB()->rbegin();
5870 const bool LRIsAvailable =
5871 C.getMBB()->isReturnBlock() && !
Last->isCall()
5874 :
C.isAvailableAcrossAndOutOfSeq(ARM::LR,
TRI);
5875 if (LRIsAvailable) {
5879 CandidatesWithoutStackFixups.push_back(
C);
5884 else if (findRegisterToSaveLRTo(
C)) {
5888 CandidatesWithoutStackFixups.push_back(
C);
5893 else if (
C.isAvailableInsideSeq(ARM::SP,
TRI)) {
5896 CandidatesWithoutStackFixups.push_back(
C);
5902 NumBytesNoStackCalls += SequenceSize;
5908 if (NumBytesNoStackCalls <=
5909 RepeatedSequenceLocs.size() * Costs.
CallDefault) {
5910 RepeatedSequenceLocs = CandidatesWithoutStackFixups;
5912 if (RepeatedSequenceLocs.size() < MinRepeats)
5913 return std::nullopt;
5920 if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) {
5938 return std::make_unique<outliner::OutlinedFunction>(
5939 RepeatedSequenceLocs, SequenceSize, NumBytesToCreateFrame, FrameID);
5942bool ARMBaseInstrInfo::checkAndUpdateStackOffset(
MachineInstr *
MI,
5945 int SPIdx =
MI->findRegisterUseOperandIdx(ARM::SP,
nullptr);
5970 unsigned NumOps =
MI->getDesc().getNumOperands();
5971 unsigned ImmIdx = NumOps - 3;
5975 int64_t OffVal =
Offset.getImm();
5981 unsigned NumBits = 0;
6010 assert((
Fixup & 3) == 0 &&
"Can't encode this offset!");
6030 assert(((OffVal * Scale +
Fixup) & (Scale - 1)) == 0 &&
6031 "Can't encode this offset!");
6032 OffVal +=
Fixup / Scale;
6034 unsigned Mask = (1 << NumBits) - 1;
6036 if (OffVal <= Mask) {
6038 MI->getOperand(ImmIdx).setImm(OffVal);
6046 Function &
F, std::vector<outliner::Candidate> &Candidates)
const {
6050 const Function &CFn =
C.getMF()->getFunction();
6057 ARMGenInstrInfo::mergeOutliningCandidateAttributes(
F, Candidates);
6065 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
6084 unsigned &Flags)
const {
6088 "Suitable Machine Function for outlining must track liveness");
6096 bool R12AvailableInBlock = LRU.
available(ARM::R12);
6097 bool CPSRAvailableInBlock = LRU.
available(ARM::CPSR);
6101 if (R12AvailableInBlock && CPSRAvailableInBlock)
6102 Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead;
6109 if (R12AvailableInBlock && !LRU.
available(ARM::R12))
6111 if (CPSRAvailableInBlock && !LRU.
available(ARM::CPSR))
6117 Flags |= MachineOutlinerMBBFlags::HasCalls;
6121 bool LRIsAvailable =
6126 Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere;
6134 unsigned Flags)
const {
6140 unsigned Opc =
MI.getOpcode();
6141 if (
Opc == ARM::tPICADD ||
Opc == ARM::PICADD ||
Opc == ARM::PICSTR ||
6142 Opc == ARM::PICSTRB ||
Opc == ARM::PICSTRH ||
Opc == ARM::PICLDR ||
6143 Opc == ARM::PICLDRB ||
Opc == ARM::PICLDRH ||
Opc == ARM::PICLDRSB ||
6144 Opc == ARM::PICLDRSH ||
Opc == ARM::t2LDRpci_pic ||
6145 Opc == ARM::t2MOVi16_ga_pcrel ||
Opc == ARM::t2MOVTi16_ga_pcrel ||
6146 Opc == ARM::t2MOV_ga_pcrel)
6150 if (
Opc == ARM::t2BF_LabelPseudo ||
Opc == ARM::t2DoLoopStart ||
6151 Opc == ARM::t2DoLoopStartTP ||
Opc == ARM::t2WhileLoopStart ||
6152 Opc == ARM::t2WhileLoopStartLR ||
Opc == ARM::t2WhileLoopStartTP ||
6153 Opc == ARM::t2LoopDec ||
Opc == ARM::t2LoopEnd ||
6154 Opc == ARM::t2LoopEndDec)
6163 if (
MI.isTerminator())
6169 if (
MI.readsRegister(ARM::LR,
TRI) ||
MI.readsRegister(ARM::PC,
TRI))
6177 if (MOP.isGlobal()) {
6178 Callee = dyn_cast<Function>(MOP.getGlobal());
6186 (Callee->getName() ==
"\01__gnu_mcount_nc" ||
6187 Callee->getName() ==
"\01mcount" || Callee->getName() ==
"__mcount"))
6195 if (
Opc == ARM::BL ||
Opc == ARM::tBL ||
Opc == ARM::BLX ||
6196 Opc == ARM::BLX_noip ||
Opc == ARM::tBLXr ||
Opc == ARM::tBLXr_noip ||
6201 return UnknownCallOutlineType;
6209 return UnknownCallOutlineType;
6217 return UnknownCallOutlineType;
6225 if (
MI.modifiesRegister(ARM::LR,
TRI) ||
MI.modifiesRegister(ARM::PC,
TRI))
6229 if (
MI.modifiesRegister(ARM::SP,
TRI) ||
MI.readsRegister(ARM::SP,
TRI)) {
6242 bool MightNeedStackFixUp =
6243 (Flags & (MachineOutlinerMBBFlags::LRUnavailableSomewhere |
6244 MachineOutlinerMBBFlags::HasCalls));
6246 if (!MightNeedStackFixUp)
6252 if (
MI.modifiesRegister(ARM::SP,
TRI))
6266 if (
MI.readsRegister(ARM::ITSTATE,
TRI) ||
6267 MI.modifiesRegister(ARM::ITSTATE,
TRI))
6271 if (
MI.isCFIInstruction())
6302 unsigned Opc = Subtarget.isThumb() ? ARM::t2STR_PRE : ARM::STR_PRE_IMM;
6316 CFIBuilder.buildDefCFAOffset(
Align);
6321 CFIBuilder.buildOffset(ARM::LR, -LROffset);
6324 CFIBuilder.buildOffset(ARM::RA_AUTH_CODE, -
Align);
6330 bool CFI,
bool Auth)
const {
6346 unsigned Opc = Subtarget.isThumb() ? ARM::t2LDR_POST : ARM::LDR_POST_IMM;
6350 if (!Subtarget.isThumb())
6360 CFIBuilder.buildDefCFAOffset(0);
6361 CFIBuilder.buildRestore(ARM::LR);
6363 CFIBuilder.buildUndefined(ARM::RA_AUTH_CODE);
6377 bool isThumb = Subtarget.isThumb();
6378 unsigned FuncOp =
isThumb ? 2 : 0;
6379 unsigned Opc = Call->getOperand(FuncOp).isReg()
6380 ?
isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr
6385 .
add(Call->getOperand(FuncOp));
6386 if (
isThumb && !Call->getOperand(FuncOp).isReg())
6388 Call->eraseFromParent();
6393 return MI.isCall() && !
MI.isReturn();
6401 Et = std::prev(
MBB.
end());
6411 saveLROnStack(
MBB, It,
true, Auth);
6416 "Can only fix up stack references once");
6417 fixupPostOutline(
MBB);
6420 restoreLRFromStack(
MBB, Et,
true, Auth);
6440 fixupPostOutline(
MBB);
6449 bool isThumb = Subtarget.isThumb();
6455 ? Subtarget.
isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND
6482 Register Reg = findRegisterToSaveLRTo(
C);
6483 assert(Reg != 0 &&
"No callee-saved register available?");
6513bool ARMBaseInstrInfo::isReallyTriviallyReMaterializable(
6547 static int constexpr MAX_STAGES = 30;
6548 static int constexpr LAST_IS_USE = MAX_STAGES;
6549 static int constexpr SEEN_AS_LIVE = MAX_STAGES + 1;
6550 typedef std::bitset<MAX_STAGES + 2> IterNeed;
6551 typedef std::map<unsigned, IterNeed> IterNeeds;
6554 const IterNeeds &CIN);
6566 : EndLoop(EndLoop), LoopCount(LoopCount),
6568 TII(MF->getSubtarget().getInstrInfo()) {}
6570 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
6572 return MI == EndLoop ||
MI == LoopCount;
6576 if (tooMuchRegisterPressure(SSD, SMS))
6582 std::optional<bool> createTripCountGreaterCondition(
6593 }
else if (EndLoop->
getOpcode() == ARM::t2LoopEnd) {
6598 if (
I.getOpcode() == ARM::t2LoopDec)
6600 assert(LoopDec &&
"Unable to find copied LoopDec");
6606 .
addReg(ARM::NoRegister);
6616 void adjustTripCount(
int TripCountAdjust)
override {}
6620 const IterNeeds &CIN) {
6622 for (
const auto &
N : CIN) {
6623 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6624 for (
int I = 0;
I < Cnt; ++
I)
6629 for (
const auto &
N : CIN) {
6630 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6631 for (
int I = 0;
I < Cnt; ++
I)
6639 IterNeeds CrossIterationNeeds;
6644 for (
auto &SU : SSD.
SUnits) {
6647 for (
auto &S : SU.Succs)
6650 if (
Reg.isVirtual())
6651 CrossIterationNeeds[
Reg.id()].set(0);
6652 }
else if (S.isAssignedRegDep()) {
6654 if (OStg >= 0 && OStg != Stg) {
6656 if (
Reg.isVirtual())
6657 CrossIterationNeeds[
Reg.id()] |= ((1 << (OStg - Stg)) - 1);
6666 std::vector<SUnit *> ProposedSchedule;
6670 std::deque<SUnit *> Instrs =
6672 std::sort(Instrs.begin(), Instrs.end(),
6673 [](
SUnit *
A,
SUnit *
B) { return A->NodeNum > B->NodeNum; });
6680 for (
auto *SU : ProposedSchedule)
6684 if (!MO.isReg() || !MO.getReg())
6687 auto CIter = CrossIterationNeeds.find(
Reg.id());
6688 if (CIter == CrossIterationNeeds.end() || CIter->second[LAST_IS_USE] ||
6689 CIter->second[SEEN_AS_LIVE])
6691 if (MO.isDef() && !MO.isDead())
6692 CIter->second.set(SEEN_AS_LIVE);
6693 else if (MO.isUse())
6694 CIter->second.set(LAST_IS_USE);
6696 for (
auto &CI : CrossIterationNeeds)
6697 CI.second.reset(LAST_IS_USE);
6703 RPTracker.init(MF, &RegClassInfo,
nullptr, EndLoop->
getParent(),
6706 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6708 for (
auto *SU : ProposedSchedule) {
6710 RPTracker.setPos(std::next(CurInstI));
6716 if (!MO.isReg() || !MO.getReg())
6719 if (MO.isDef() && !MO.isDead()) {
6720 auto CIter = CrossIterationNeeds.find(
Reg.id());
6721 if (CIter != CrossIterationNeeds.end()) {
6722 CIter->second.reset(0);
6723 CIter->second.reset(SEEN_AS_LIVE);
6727 for (
auto &S : SU->Preds) {
6729 if (S.isAssignedRegDep()) {
6731 auto CIter = CrossIterationNeeds.find(
Reg.id());
6732 if (CIter != CrossIterationNeeds.end()) {
6734 assert(Stg2 <= Stg &&
"Data dependence upon earlier stage");
6735 if (Stg - Stg2 < MAX_STAGES)
6736 CIter->second.set(Stg - Stg2);
6737 CIter->second.set(SEEN_AS_LIVE);
6742 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6745 auto &
P = RPTracker.getPressure().MaxSetPressure;
6746 for (
unsigned I = 0, E =
P.size();
I < E; ++
I) {
6748 if (
I == ARM::DQuad_with_ssub_0 ||
I == ARM::DTripleSpc_with_ssub_0 ||
6749 I == ARM::DTriple_with_qsub_0_in_QPR)
6761std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
6765 if (Preheader == LoopBB)
6766 Preheader = *std::next(LoopBB->
pred_begin());
6768 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2Bcc) {
6774 for (
auto &L : LoopBB->
instrs()) {
6781 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, CCSetter);
6795 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2LoopEnd) {
6796 for (
auto &L : LoopBB->
instrs())
6801 Register LoopDecResult =
I->getOperand(0).getReg();
6804 if (!LoopDec || LoopDec->
getOpcode() != ARM::t2LoopDec)
6807 for (
auto &J : Preheader->
instrs())
6808 if (J.getOpcode() == ARM::t2DoLoopStart)
6812 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, LoopDec);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineOutlinerClass
Constants defining how certain sequences should be outlined.
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
@ MachineOutlinerRegSave
Emit a call and tail-call.
@ MachineOutlinerNoLRSave
Only emit a branch.
@ MachineOutlinerThunk
Emit a call and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isThumb(const MCSubtargetInfo &STI)
static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, MachineInstr &MI, MCRegister DReg, unsigned Lane, MCRegister &ImplicitSReg)
getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, set ImplicitSReg to a register n...
static const MachineInstr * getBundledUseMI(const TargetRegisterInfo *TRI, const MachineInstr &MI, unsigned Reg, unsigned &UseIdx, unsigned &Dist)
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI)
Create a copy of a const pool value.
static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg, int CmpMask, bool CommonUse)
isSuitableForMask - Identify a suitable 'and' instruction that operates on the given source register ...
static int adjustDefLatency(const ARMSubtarget &Subtarget, const MachineInstr &DefMI, const MCInstrDesc &DefMCID, unsigned DefAlign)
Return the number of cycles to add to (or subtract from) the static itinerary based on the def opcode...
static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, const MachineInstr &MI)
static MCRegister getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, unsigned SReg, unsigned &Lane)
static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[]
static bool isEligibleForITBlock(const MachineInstr *MI)
static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC)
getCmpToAddCondition - assume the flags are set by CMP(a,b), return the condition code if we modify t...
static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1)
static bool isLRAvailable(const TargetRegisterInfo &TRI, MachineBasicBlock::reverse_iterator I, MachineBasicBlock::reverse_iterator E)
static const ARM_MLxEntry ARM_MLxTable[]
static bool isRedundantFlagInstr(const MachineInstr *CmpI, Register SrcReg, Register SrcReg2, int64_t ImmValue, const MachineInstr *OI, bool &IsThumb1)
isRedundantFlagInstr - check whether the first instruction, whose only purpose is to update flags,...
static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc, unsigned NumRegs)
static const MachineInstr * getBundledDefMI(const TargetRegisterInfo *TRI, const MachineInstr *MI, unsigned Reg, unsigned &DefIdx, unsigned &Dist)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
DXIL Forward Handle Accesses
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Looks at all the uses of the given value Returns the Liveness deduced from the uses of this value Adds all uses that cause the result to be MaybeLive to MaybeLiveRetUses If the result is Live
This file defines the DenseMap class.
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
PowerPC TLS Dynamic Call Fixup
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallSet class.
This file defines the SmallVector class.
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
static bool isCPSRDefined(const MachineInstr &MI)
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction to set the zero flag so that we can remove a "comparis...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
foldImmediate - 'Reg' is known to be defined by a move immediate instruction, try to fold the immedia...
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
const MachineInstrBuilder & AddDReg(MachineInstrBuilder &MIB, unsigned Reg, unsigned SubIdx, unsigned State, const TargetRegisterInfo *TRI) const
void copyFromCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MCRegister DestReg, bool KillSrc, const ARMSubtarget &Subtarget) const
unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
unsigned getPartialRegUpdateClearance(const MachineInstr &, unsigned, const TargetRegisterInfo *) const override
unsigned getNumLDMAddresses(const MachineInstr &MI) const
Get the number of addresses by LDM or VLDM or zero for unknown.
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI) const override
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
virtual const ARMBaseRegisterInfo & getRegisterInfo() const =0
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Returns the size of the specified MachineInstr.
void copyToCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MCRegister SrcReg, bool KillSrc, const ARMSubtarget &Subtarget) const
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
ARM supports the MachineOutliner.
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
Enable outlining by default at -Oz.
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is an instruction that moves/copies value from one register to an...
MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const override
ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *II, const ScheduleDAGMI *DAG) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
ARMBaseInstrInfo(const ARMSubtarget &STI)
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isPredicated(const MachineInstr &MI) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
void expandLoadStackGuardBase(MachineBasicBlock::iterator MI, unsigned LoadImmOpc, unsigned LoadOpc) const
bool isPredicable(const MachineInstr &MI) const override
isPredicable - Return true if the specified instruction can be predicated.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
Specialization of TargetInstrInfo::describeLoadedValue, used to enhance debug entry value description...
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, unsigned NumInsts) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const override
areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to determine if two loads are lo...
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const override
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const override
bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const override
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
bool expandPostRAPseudo(MachineInstr &MI) const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const override
shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to determine (in conjunction w...
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const override
VFP/NEON execution domains.
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const override
bool isFpMLxInstruction(unsigned Opcode) const
isFpMLxInstruction - Return true if the specified opcode is a fp MLA / MLS instruction.
bool isSwiftFastImmShift(const MachineInstr *MI) const
Returns true if the instruction has a shift by immediate that can be executed in one cycle less.
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2 if h...
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void breakPartialRegDependency(MachineInstr &, unsigned, const TargetRegisterInfo *TRI) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
const ARMSubtarget & getSubtarget() const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const override
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
bool shouldSink(const MachineInstr &MI) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
ARMConstantPoolConstant - ARM-specific constant pool values for Constants, Functions,...
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
ARMConstantPoolMBB - ARM-specific constantpool value of a machine basic block.
ARMConstantPoolSymbol - ARM-specific constantpool values for external symbols.
ARMConstantPoolValue - ARM specific constantpool value.
bool isMachineBasicBlock() const
bool isGlobalValue() const
ARMCP::ARMCPModifier getModifier() const
bool mustAddCurrentAddress() const
virtual bool hasSameValue(ARMConstantPoolValue *ACPV)
hasSameValue - Return true if this ARM constpool value can share the same constantpool entry as anoth...
bool isBlockAddress() const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
bool isThumb2Function() const
bool branchTargetEnforcement() const
unsigned createPICLabelUId()
bool isThumb1OnlyFunction() const
bool isThumbFunction() const
bool shouldSignReturnAddress() const
bool isTargetMachO() const
ARMLdStMultipleTiming getLdStMultipleTiming() const
const ARMBaseInstrInfo * getInstrInfo() const override
bool isThumb1Only() const
bool isReadTPSoft() const
bool isGVIndirectSymbol(const GlobalValue *GV) const
True if the GV will be accessed via an indirect symbol.
unsigned getMispredictionPenalty() const
unsigned getReturnOpcode() const
Returns the correct return opcode for the current feature set.
Align getStackAlignment() const
getStackAlignment - Returns the minimum alignment known to hold of the stack frame on entry to the fu...
bool enableMachinePipeliner() const override
Returns true if machine pipeliner should be enabled.
bool isTargetCOFF() const
unsigned getPartialUpdateClearance() const
@ DoubleIssueCheckUnalignedAccess
Can load/store 2 registers/cycle, but needs an extra cycle if the access is not 64-bit aligned.
@ SingleIssue
Can load/store 1 register/cycle.
@ DoubleIssue
Can load/store 2 registers/cycle.
@ SingleIssuePlusExtras
Can load/store 1 register/cycle, but needs an extra cycle for address computation and potentially als...
int getPreISelOperandLatencyAdjustment() const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool test(unsigned Idx) const
size_type size() const
size - Returns the number of bits in this bitvector.
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
Helper class for creating CFI instructions and inserting them into MIR.
void buildRegister(MCRegister Reg1, MCRegister Reg2) const
void buildRestore(MCRegister Reg) const
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
This class represents an Operation in the Expression.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
A possibly irreducible generalization of a Loop.
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
Itinerary data supplied by a subtarget to be used by a target.
int getNumMicroOps(unsigned ItinClassIndx) const
Return the number of micro-ops that the given class decodes to.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Return true if there is a pipeline forwarding between instructions of itinerary classes DefClass and ...
bool isEmpty() const
Returns true if there are no itineraries.
A set of register units used to track register liveness.
bool available(MCRegister Reg) const
Returns true if no part of physical register Reg is live.
LLVM_ABI void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
LLVM_ABI void accumulate(const MachineInstr &MI)
Adds all register units used, defined or clobbered in MI.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayLoad() const
Return true if this instruction could possibly read memory.
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
bool isCall() const
Return true if the instruction is a call.
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
unsigned getOpcode() const
Return the opcode number for this descriptor.
LLVM_ABI bool hasImplicitDefOfPhysReg(MCRegister Reg, const MCRegisterInfo *MRI=nullptr) const
Return true if this instruction implicitly defines the specified physical register.
Wrapper class representing physical registers. Should be passed by value.
bool isValid() const
isValid - Returns true until all the operands have been visited.
unsigned pred_size() const
instr_iterator instr_begin()
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
bool isReturnBlock() const
Convenience function that returns true if the block ends in a return instruction.
Instructions::iterator instr_iterator
pred_iterator pred_begin()
LLVM_ABI iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
LLVM_ABI bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
instr_iterator instr_end()
Instructions::const_iterator const_instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< succ_iterator > successors()
reverse_iterator rbegin()
LivenessQueryResult
Possible outcome of a register liveness query to computeRegisterLiveness()
@ LQR_Dead
Register is known to be fully dead.
@ LQR_Live
Register is known to be (at least partially) live.
@ LQR_Unknown
Register liveness not decidable from local neighborhood.
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
This class is a data container for one entry in a MachineConstantPool.
union llvm::MachineConstantPoolEntry::@205 Val
The constant itself.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
MachineConstantPoolValue * MachineCPVal
const Constant * ConstVal
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getNumObjects() const
Return the number of objects.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
bool isCall(QueryType Type=AnyInBundle) const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isRegSequence() const
bool isInsertSubreg() const
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
LLVM_ABI void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
LLVM_ABI MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void setImplicit(bool Val=true)
void setImm(int64_t immVal)
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool tracksLiveness() const
tracksLiveness - Returns true when tracking register liveness accurately.
const TargetRegisterInfo * getTargetRegisterInfo() const
A Module instance is used to store all the information related to an LLVM module.
void AddHazardRecognizer(std::unique_ptr< ScheduleHazardRecognizer > &&)
Track the current register pressure at some position in the instruction stream, and remember the high...
LLVM_ABI void increaseRegPressure(Register RegUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
LLVM_ABI void decreaseRegPressure(Register RegUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
unsigned getRegPressureSetLimit(unsigned Idx) const
Get the register unit limit for the given pressure set index.
LLVM_ABI void runOnMachineFunction(const MachineFunction &MF, bool Rev=false)
runOnFunction - Prepare to answer questions about MF.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
@ Anti
A register anti-dependence (aka WAR).
This class represents the scheduled code.
unsigned getMaxStageCount()
Return the maximum stage count needed for this schedule.
int stageScheduled(SUnit *SU) const
Return the stage for a scheduled instruction.
int getInitiationInterval() const
Return the initiation interval for this schedule.
std::deque< SUnit * > & getInstructions(int cycle)
Return the instructions that are scheduled at the specified cycle.
int getFirstCycle() const
Return the first cycle in the completed schedule.
int getFinalCycle() const
Return the last cycle in the finalized schedule.
Scheduling unit. This is a node in the scheduling DAG.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
std::vector< SUnit > SUnits
The scheduling units.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
This class builds the dependence graph for the instructions in a loop, and attempts to schedule the i...
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
LLVM_ABI unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const
Compute operand latency based on the available machine model.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static CondCodes getOppositeCondition(CondCodes CC)
@ MO_OPTION_MASK
MO_OPTION_MASK - Most flags are mutually exclusive; this mask selects just that part of the flag set.
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
@ MO_GOT
MO_GOT - On a symbol operand, this represents a GOT relative relocation.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
AddrMode
ARM Addressing Modes.
unsigned char getAM3Offset(unsigned AM3Opc)
unsigned char getAM5FP16Offset(unsigned AM5Opc)
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
ShiftOpc getAM2ShiftOpc(unsigned AM2Opc)
unsigned getAM2Offset(unsigned AM2Opc)
unsigned getSOImmValRotate(unsigned Imm)
getSOImmValRotate - Try to handle Imm with an immediate shifter operand, computing the rotate amount ...
bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
ShiftOpc getSORegShOp(unsigned Op)
AddrOpc getAM5Op(unsigned AM5Opc)
bool isSOImmTwoPartValNeg(unsigned V)
isSOImmTwoPartValNeg - Return true if the specified value can be obtained by two SOImmVal,...
unsigned getSOImmTwoPartSecond(unsigned V)
getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal, return the second chunk of ...
bool isSOImmTwoPartVal(unsigned V)
isSOImmTwoPartVal - Return true if the specified value can be obtained by or'ing together two SOImmVa...
AddrOpc getAM5FP16Op(unsigned AM5Opc)
unsigned getT2SOImmTwoPartSecond(unsigned Imm)
unsigned getT2SOImmTwoPartFirst(unsigned Imm)
bool isT2SOImmTwoPartVal(unsigned Imm)
unsigned char getAM5Offset(unsigned AM5Opc)
unsigned getSOImmTwoPartFirst(unsigned V)
getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal, return the first chunk of it...
AddrOpc getAM2Op(unsigned AM2Opc)
AddrOpc getAM3Op(unsigned AM3Opc)
@ C
The default llvm calling convention, compatible with C.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
Reg
All possible values of the reg field in the ModR/M byte.
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
static bool isIndirectCall(const MachineInstr &MI)
MachineInstr * findCMPToFoldIntoCBZ(MachineInstr *Br, const TargetRegisterInfo *TRI)
Search backwards from a tBcc to find a tCMPi8 against 0, meaning we can convert them to a tCBZ or tCB...
static bool isCondBranchOpcode(int Opc)
MaybeAlign getAlign(const CallInst &I, unsigned Index)
bool HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns true if Val1 has a lower Constant Materialization Cost than Val2.
static bool isPushOpcode(int Opc)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond)
static bool isVCTP(const MachineInstr *MI)
bool IsCPSRDead< MachineInstr >(const MachineInstr *MI)
unsigned getBLXpredOpcode(const MachineFunction &MF)
static bool isARMLowRegister(MCRegister Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
static bool isIndirectBranchOpcode(int Opc)
bool isLegalAddressImm(unsigned Opcode, int Imm, const TargetInstrInfo *TII)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool registerDefinedBetween(unsigned Reg, MachineBasicBlock::iterator From, MachineBasicBlock::iterator To, const TargetRegisterInfo *TRI)
Return true if Reg is defd between From and To.
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
static bool isSEHInstruction(const MachineInstr &MI)
static bool isCalleeSavedRegister(MCRegister Reg, const MCPhysReg *CSRegs)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, MachineFunction &MF, MachineInstr *MI, unsigned NumBytes)
Tries to add registers to the reglist of a given base-updating push/pop instruction to adjust the sta...
auto reverse(ContainerTy &&C)
static bool isJumpTableBranchOpcode(int Opc)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static bool isPopOpcode(int Opc)
void addPredicatedMveVpredROp(MachineInstrBuilder &MIB, unsigned Cond, unsigned Inactive)
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
unsigned getUndefRegState(bool B)
void addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, Register DestReg)
unsigned ConstantMaterializationCost(unsigned Val, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns the number of instructions required to materialize the given constant in a register,...
unsigned getKillRegState(bool B)
bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII)
rewriteARMFrameIndex / rewriteT2FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
static bool isIndirectControlFlowNotComingBack(const MachineInstr &MI)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
ARMCC::CondCodes getInstrPredicate(const MachineInstr &MI, Register &PredReg)
getInstrPredicate - If instruction is predicated, returns its predicate condition,...
unsigned getMatchingCondBranchOpcode(unsigned Opc)
static bool isUncondBranchOpcode(int Opc)
auto partition(R &&Range, UnaryPredicate P)
Provide wrappers to std::partition which take ranges instead of having to pass begin/end explicitly.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
static bool isSpeculationBarrierEndBBOpcode(int Opc)
unsigned getBLXOpcode(const MachineFunction &MF)
void addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB)
bool isV8EligibleForIT(const InstrType *Instr)
void emitARMRegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of instructions to materializea des...
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
ARM_MLxEntry - Record information about MLA / MLS instructions.
Map pseudo instructions that imply an 'S' bit onto real opcodes.
OutlinerCosts(const ARMSubtarget &target)
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Description of the encoding of one expression Op.
static constexpr LaneBitmask getAll()
static constexpr LaneBitmask getNone()
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.