71#define DEBUG_TYPE "machine-sink"
75 cl::desc(
"Split critical edges during machine sinking"),
80 cl::desc(
"Use block frequency info to find successors to sink"),
84 "machine-sink-split-probability-threshold",
86 "Percentage threshold for splitting single-instruction critical edge. "
87 "If the branch threshold is higher than this threshold, we allow "
88 "speculative execution of up to 1 instruction to avoid branching to "
89 "splitted critical edge"),
93 "machine-sink-load-instrs-threshold",
94 cl::desc(
"Do not try to find alias store for a load if there is a in-path "
95 "block whose instruction number is higher than this threshold."),
99 "machine-sink-load-blocks-threshold",
100 cl::desc(
"Do not try to find alias store for a load if the block number in "
101 "the straight line is higher than this threshold."),
106 cl::desc(
"Sink instructions into cycles to avoid "
111 "machine-sink-cycle-limit",
113 "The maximum number of instructions considered for cycle sinking."),
116STATISTIC(NumSunk,
"Number of machine instructions sunk");
117STATISTIC(NumCycleSunk,
"Number of machine instructions sunk into a cycle");
120STATISTIC(NumPostRACopySink,
"Number of copies sunk after RA");
126class MachineSinking {
164 using AllSuccsCache =
178 using SinkItem = std::pair<MachineInstr *, MachineBasicBlock *>;
197 CachedRegisterPressure;
199 bool EnableSinkAndFold;
208 : DT(DT), PDT(PDT), CI(CI), PSI(PSI), MBFI(MBFI), MBPI(MBPI), AA(AA),
209 LIS(LIS), SI(SI), LV(LV), MLI(MLI),
210 EnableSinkAndFold(EnableSinkAndFold) {}
214 void releaseMemory() {
215 CEBCandidates.
clear();
216 CEMergeCandidates.
clear();
246 AllSuccsCache &AllSuccessors);
256 bool &LocalUse)
const;
259 AllSuccsCache &AllSuccessors);
271 AllSuccsCache &AllSuccessors);
280 AllSuccsCache &AllSuccessors)
const;
283 bool UseCache =
true);
285 bool registerPressureSetExceedsLimit(
unsigned NRegs,
320char MachineSinkingLegacy::ID = 0;
345 if (!
TII->isBasicBlockPrologue(*PI))
347 for (
auto &MO :
MI.operands()) {
355 (
TII->isIgnorableUse(MO) || (
MRI &&
MRI->isConstantPhysReg(
Reg))))
357 if (PI->modifiesRegister(
Reg,
TRI))
360 if (PI->readsRegister(
Reg,
TRI))
363 auto *DefOp = PI->findRegisterDefOperand(
Reg,
TRI,
false,
true);
364 if (DefOp && !DefOp->isDead())
373bool MachineSinking::PerformTrivialForwardCoalescing(
MachineInstr &
MI,
381 !
MRI->hasOneNonDBGUse(SrcReg))
394 MRI->replaceRegWith(DstReg, SrcReg);
395 MI.eraseFromParent();
399 MRI->clearKillFlags(SrcReg);
407 if (
MI.isCopy() ||
MI.mayLoadOrStore() ||
408 MI.getOpcode() == TargetOpcode::REG_SEQUENCE)
416 bool SawStore =
true;
417 if (!
MI.isSafeToMove(SawStore))
422 if (
MI.isConvergent())
432 if (MO.isImm() || MO.isRegMask() || MO.isRegLiveOut() || MO.isMetadata() ||
433 MO.isMCSymbol() || MO.isDbgInstrRef() || MO.isCFIIndex() ||
434 MO.isIntrinsicID() || MO.isPredicate() || MO.isShuffleMask())
453 else if (UsedRegB == 0)
461 (
MRI->isConstantPhysReg(
Reg) ||
TII->isIgnorableUse(MO)))
471 using SinkInfo = std::pair<MachineInstr *, ExtAddrMode>;
477 UsedRegA == 0 ? nullptr :
MRI->getRegClass(UsedRegA);
479 UsedRegB == 0 ? nullptr :
MRI->getRegClass(UsedRegB);
482 while (!Worklist.
empty()) {
508 if (!
TII->canFoldIntoAddrMode(UseInst,
Reg,
MI, AM))
525 if (RCA ==
nullptr) {
530 unsigned NRegs = !!RCA + !!RCB;
536 if (RCB ==
nullptr) {
537 if (registerPressureSetExceedsLimit(NRegs, RCA,
MBB))
539 }
else if (registerPressureSetExceedsLimit(1, RCA,
MBB) ||
540 registerPressureSetExceedsLimit(1, RCB,
MBB)) {
550 if (SinkInto.
empty())
554 for (
auto &[SinkDst, MaybeAM] : SinkInto) {
558 if (SinkDst->isCopy()) {
571 Register DstReg = SinkDst->getOperand(0).getReg();
572 TII->reMaterialize(*SinkDst->getParent(), InsertPt, DstReg, 0,
MI, *
TRI);
573 New = &*std::prev(InsertPt);
574 if (!
New->getDebugLoc())
575 New->setDebugLoc(SinkDst->getDebugLoc());
581 MRI->clearKillFlags(UsedRegA);
583 MRI->clearKillFlags(UsedRegB);
586 New =
TII->emitLdStWithAddr(*SinkDst, MaybeAM);
592 MRI->clearKillFlags(R);
594 MRI->clearKillFlags(R);
598 if (SinkDst->mayStore() && !SinkDst->hasOrderedMemoryRef())
599 StoreInstrCache.clear();
600 SinkDst->eraseFromParent();
608 while (!Worklist.
empty()) {
612 assert((
U->isCopy() ||
U->isDebugInstr()) &&
613 "Only debug uses and copies must remain");
615 Worklist.
push_back(
U->getOperand(0).getReg());
624 I->eraseFromParent();
631 MI.eraseFromParent();
639bool MachineSinking::AllUsesDominatedByBlock(
Register Reg,
643 bool &LocalUse)
const {
647 if (
MRI->use_nodbg_empty(
Reg))
665 MachineInstr *UseInst = MO.getParent();
666 unsigned OpNo = MO.getOperandNo();
667 MachineBasicBlock *UseBlock = UseInst->getParent();
668 return UseBlock == MBB && UseInst->isPHI() &&
669 UseInst->getOperand(OpNo + 1).getMBB() == DefMBB;
678 unsigned OpNo = &MO - &UseInst->
getOperand(0);
680 if (UseInst->
isPHI()) {
684 }
else if (UseBlock == DefMBB) {
700 assert(
MI.mayLoad() &&
"Expected MI that loads!");
704 if (
MI.memoperands_empty())
709 if (PSV->isGOT() || PSV->isConstantPool())
715void MachineSinking::FindCycleSinkCandidates(
718 for (
auto &
MI : *BB) {
720 if (
MI.isMetaInstruction()) {
721 LLVM_DEBUG(
dbgs() <<
"CycleSink: not sinking meta instruction\n");
725 LLVM_DEBUG(
dbgs() <<
"CycleSink: Instruction not a candidate for this "
730 LLVM_DEBUG(
dbgs() <<
"CycleSink: Instruction is not cycle invariant\n");
733 bool DontMoveAcrossStore =
true;
734 if (!
MI.isSafeToMove(DontMoveAcrossStore)) {
735 LLVM_DEBUG(
dbgs() <<
"CycleSink: Instruction not safe to move.\n");
739 LLVM_DEBUG(
dbgs() <<
"CycleSink: Dont sink GOT or constant pool loads\n");
742 if (
MI.isConvergent())
751 LLVM_DEBUG(
dbgs() <<
"CycleSink: Instruction added as candidate.\n");
763 .getCachedResult<ProfileSummaryAnalysis>(
776 MachineSinking Impl(EnableSinkAndFold, DT, PDT, LV, MLI, SI, LIS, CI, PSI,
778 bool Changed = Impl.run(MF);
789 OS << MapClassName2PassName(
name());
790 if (EnableSinkAndFold)
791 OS <<
"<enable-sink-fold>";
801 auto *DT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
803 &getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
804 auto *CI = &getAnalysis<MachineCycleInfoWrapperPass>().getCycleInfo();
805 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
808 ? &getAnalysis<MachineBlockFrequencyInfoWrapperPass>().getMBFI()
811 &getAnalysis<MachineBranchProbabilityInfoWrapperPass>().getMBPI();
812 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
814 auto *LISWrapper = getAnalysisIfAvailable<LiveIntervalsWrapperPass>();
815 auto *LIS = LISWrapper ? &LISWrapper->getLIS() :
nullptr;
816 auto *SIWrapper = getAnalysisIfAvailable<SlotIndexesWrapperPass>();
817 auto *SI = SIWrapper ? &SIWrapper->getSI() :
nullptr;
818 auto *LVWrapper = getAnalysisIfAvailable<LiveVariablesWrapperPass>();
819 auto *LV = LVWrapper ? &LVWrapper->getLV() :
nullptr;
820 auto *MLIWrapper = getAnalysisIfAvailable<MachineLoopInfoWrapperPass>();
821 auto *MLI = MLIWrapper ? &MLIWrapper->getLI() :
nullptr;
823 MachineSinking Impl(EnableSinkAndFold, DT, PDT, LV, MLI, SI, LIS, CI, PSI,
832 TII = STI->getInstrInfo();
833 TRI = STI->getRegisterInfo();
836 RegClassInfo.runOnMachineFunction(MF);
838 bool EverMadeChange =
false;
841 bool MadeChange =
false;
844 CEBCandidates.clear();
845 CEMergeCandidates.clear();
852 MachineDomTreeUpdater::UpdateStrategy::Lazy);
853 for (
const auto &Pair : ToSplit) {
854 auto NewSucc = Pair.first->SplitCriticalEdge(
855 Pair.second, {LIS, SI, LV, MLI},
nullptr, &MDTU);
856 if (NewSucc !=
nullptr) {
862 MBFI->onEdgeSplit(*Pair.first, *NewSucc, *MBPI);
866 CI->splitCriticalEdge(Pair.first, Pair.second, NewSucc);
873 EverMadeChange =
true;
878 SchedModel.init(STI);
879 bool HasHighPressure;
883 enum CycleSinkStage { COPY, LOW_LATENCY, AGGRESSIVE, END };
884 for (
unsigned Stage = CycleSinkStage::COPY; Stage != CycleSinkStage::END;
885 ++Stage, SunkInstrs.
clear()) {
886 HasHighPressure =
false;
888 for (
auto *
Cycle : Cycles) {
895 FindCycleSinkCandidates(
Cycle, Preheader, Candidates);
904 if (Stage == CycleSinkStage::COPY) {
907 <<
"CycleSink: Limit reached of instructions to "
918 if (Stage == CycleSinkStage::LOW_LATENCY &&
919 !
TII->hasLowDefLatency(SchedModel, *
I, 0))
922 if (!aggressivelySinkIntoCycle(
Cycle, *
I, SunkInstrs))
924 EverMadeChange =
true;
929 if (!HasHighPressure)
930 HasHighPressure = registerPressureExceedsLimit(*Preheader);
932 if (!HasHighPressure)
937 HasStoreCache.clear();
938 StoreInstrCache.clear();
941 for (
auto I : RegsToClearKillFlags)
942 MRI->clearKillFlags(
I);
943 RegsToClearKillFlags.clear();
946 return EverMadeChange;
959 bool MadeChange =
false;
962 AllSuccsCache AllSuccessors;
967 bool ProcessedBegin, SawStore =
false;
977 if (
MI.isDebugOrPseudoInstr() ||
MI.isFakeUse()) {
978 if (
MI.isDebugValue())
983 if (EnableSinkAndFold && PerformSinkAndFold(
MI, &
MBB)) {
992 if (PerformTrivialForwardCoalescing(
MI, &
MBB)) {
1003 }
while (!ProcessedBegin);
1005 SeenDbgUsers.clear();
1006 SeenDbgVars.clear();
1008 CachedRegisterPressure.clear();
1015 assert(
MI.isDebugValue() &&
"Expected DBG_VALUE for processing");
1018 MI.getDebugLoc()->getInlinedAt());
1019 bool SeenBefore = SeenDbgVars.contains(Var);
1023 SeenDbgUsers[MO.
getReg()].push_back(SeenDbgUser(&
MI, SeenBefore));
1027 SeenDbgVars.insert(Var);
1030bool MachineSinking::isWorthBreakingCriticalEdge(
1038 if (!CEBCandidates.insert(std::make_pair(
From, To)).second)
1049 for (
const auto &MO :
MI.all_defs()) {
1054 auto Key = std::make_pair(SrcReg, To);
1055 auto Res = CEMergeCandidates.try_emplace(Key,
From);
1060 DeferredFromBlock = Res.first->second;
1065 if (
From->isSuccessor(To) &&
1066 MBPI->getEdgeProbability(
From, To) <=
1080 if (
Reg.isPhysical())
1086 if (
MRI->hasOneNonDBGUse(Reg)) {
1099 return TII->shouldBreakCriticalEdgeToSink(
MI);
1105 bool BreakPHIEdge) {
1114 if (FromCycle == ToCycle && FromCycle &&
1157 if (!BreakPHIEdge) {
1159 if (Pred != FromBB && !DT->
dominates(ToBB, Pred))
1169 bool BreakPHIEdge) {
1172 if (isWorthBreakingCriticalEdge(
MI, FromBB, ToBB, DeferredFromBB)) {
1175 if ((!DeferredFromBB ||
1176 ToSplit.count(std::make_pair(DeferredFromBB, ToBB)) ||
1177 isLegalToBreakCriticalEdge(
MI, DeferredFromBB, ToBB, BreakPHIEdge)) &&
1178 isLegalToBreakCriticalEdge(
MI, FromBB, ToBB, BreakPHIEdge)) {
1179 ToSplit.
insert(std::make_pair(FromBB, ToBB));
1181 ToSplit.insert(std::make_pair(DeferredFromBB, ToBB));
1189std::vector<unsigned> &
1197 auto RP = CachedRegisterPressure.find(&
MBB);
1198 if (UseCache && RP != CachedRegisterPressure.end())
1210 MII != MIE; --MII) {
1212 if (
MI.isDebugInstr() ||
MI.isPseudoProbe())
1216 RPTracker.recedeSkipDebugValues();
1217 assert(&*RPTracker.getPos() == &
MI &&
"RPTracker sync error!");
1218 RPTracker.recede(RegOpers);
1221 RPTracker.closeRegion();
1223 if (RP != CachedRegisterPressure.end()) {
1224 CachedRegisterPressure[&
MBB] = RPTracker.getPressure().MaxSetPressure;
1225 return CachedRegisterPressure[&
MBB];
1228 auto It = CachedRegisterPressure.insert(
1229 std::make_pair(&
MBB, RPTracker.getPressure().MaxSetPressure));
1230 return It.first->second;
1233bool MachineSinking::registerPressureSetExceedsLimit(
1236 unsigned Weight = NRegs *
TRI->getRegClassWeight(RC).RegWeight;
1237 const int *PS =
TRI->getRegClassPressureSets(RC);
1238 std::vector<unsigned> BBRegisterPressure = getBBRegisterPressure(
MBB);
1239 for (; *PS != -1; PS++)
1240 if (Weight + BBRegisterPressure[*PS] >=
1241 RegClassInfo.getRegPressureSetLimit(*PS))
1247bool MachineSinking::registerPressureExceedsLimit(
1249 std::vector<unsigned> BBRegisterPressure = getBBRegisterPressure(
MBB,
false);
1251 for (
unsigned PS = 0; PS < BBRegisterPressure.size(); ++PS) {
1252 if (BBRegisterPressure[PS] >=
1265 AllSuccsCache &AllSuccessors) {
1266 assert(SuccToSinkTo &&
"Invalid SinkTo Candidate BB");
1268 if (
MBB == SuccToSinkTo)
1272 if (!PDT->dominates(SuccToSinkTo,
MBB))
1277 if (CI->getCycleDepth(
MBB) > CI->getCycleDepth(SuccToSinkTo))
1281 bool NonPHIUse =
false;
1284 if (UseBlock == SuccToSinkTo && !UseInst.
isPHI())
1292 bool BreakPHIEdge =
false;
1295 FindSuccToSinkTo(
MI, SuccToSinkTo, BreakPHIEdge, AllSuccessors))
1296 return isProfitableToSinkTo(Reg,
MI, SuccToSinkTo, MBB2, AllSuccessors);
1315 if (
Reg.isPhysical()) {
1317 if (MO.
isUse() && !
MRI->isConstantPhysReg(Reg) &&
1318 !
TII->isIgnorableUse(MO))
1326 bool LocalUse =
false;
1327 if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo,
MBB, BreakPHIEdge,
1345 if (registerPressureSetExceedsLimit(1,
MRI->getRegClass(Reg),
1347 LLVM_DEBUG(
dbgs() <<
"register pressure exceed limit, not profitable.");
1363 AllSuccsCache &AllSuccessors)
const {
1365 auto Succs = AllSuccessors.find(
MBB);
1366 if (Succs != AllSuccessors.end())
1367 return Succs->second;
1380 if (DTChild->getIDom()->getBlock() ==
MI.getParent() &&
1383 AllSuccs.push_back(DTChild->getBlock());
1389 uint64_t LHSFreq = MBFI ? MBFI->getBlockFreq(L).getFrequency() : 0;
1390 uint64_t RHSFreq = MBFI ? MBFI->getBlockFreq(R).getFrequency() : 0;
1392 (!LHSFreq && !RHSFreq))
1393 return CI->getCycleDepth(L) < CI->getCycleDepth(R);
1394 return LHSFreq < RHSFreq;
1397 auto it = AllSuccessors.insert(std::make_pair(
MBB, AllSuccs));
1399 return it.first->second;
1406 AllSuccsCache &AllSuccessors) {
1407 assert(
MBB &&
"Invalid MachineBasicBlock!");
1423 if (
Reg.isPhysical()) {
1428 if (!
MRI->isConstantPhysReg(Reg) && !
TII->isIgnorableUse(MO))
1430 }
else if (!MO.
isDead()) {
1440 if (!
TII->isSafeToMoveRegClassDefs(
MRI->getRegClass(Reg)))
1448 bool LocalUse =
false;
1449 if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo,
MBB, BreakPHIEdge,
1461 GetAllSortedSuccessors(
MI,
MBB, AllSuccessors)) {
1462 bool LocalUse =
false;
1463 if (AllUsesDominatedByBlock(Reg, SuccBlock,
MBB, BreakPHIEdge,
1465 SuccToSinkTo = SuccBlock;
1476 if (!isProfitableToSinkTo(Reg,
MI,
MBB, SuccToSinkTo, AllSuccessors))
1483 if (
MBB == SuccToSinkTo)
1488 if (SuccToSinkTo && SuccToSinkTo->
isEHPad())
1498 if (SuccToSinkTo && !
TII->isSafeToSink(
MI, SuccToSinkTo, CI))
1501 return SuccToSinkTo;
1516 auto *
MBB =
MI.getParent();
1521 auto *PredBB = PredMBB->getBasicBlock();
1527 !PredBB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit))
1532 bool OffsetIsScalable;
1533 if (!
TII->getMemOperandWithOffset(
MI, BaseOp,
Offset, OffsetIsScalable,
TRI))
1536 if (!BaseOp->
isReg())
1539 if (!(
MI.mayLoad() && !
MI.isPredicable()))
1542 MachineBranchPredicate MBP;
1543 if (
TII->analyzeBranchPredicate(*PredMBB, MBP,
false))
1546 return MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
1547 (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
1548 MBP.Predicate == MachineBranchPredicate::PRED_EQ) &&
1549 MBP.LHS.getReg() == BaseOp->
getReg();
1566 auto CopyOperands =
TII.isCopyInstr(SinkInst);
1569 SrcMO = CopyOperands->Source;
1570 DstMO = CopyOperands->Destination;
1573 bool PostRA =
MRI.getNumVirtRegs() == 0;
1581 bool arePhysRegs = !Reg.isVirtual();
1582 if (arePhysRegs != PostRA)
1589 if (DbgMO.getSubReg() != SrcMO->
getSubReg() ||
1590 DbgMO.getSubReg() != DstMO->getSubReg())
1596 if (PostRA && Reg != DstMO->getReg())
1600 DbgMO.setReg(SrcMO->
getReg());
1606using MIRegs = std::pair<MachineInstr *, SmallVector<Register, 2>>;
1614 if (!SuccToSinkTo.
empty() && InsertPos != SuccToSinkTo.
end())
1616 InsertPos->getDebugLoc()));
1622 SuccToSinkTo.
splice(InsertPos, ParentBlock,
MI,
1629 for (
const auto &DbgValueToSink : DbgValuesToSink) {
1632 SuccToSinkTo.
insert(InsertPos, NewDbgMI);
1634 bool PropagatedAllSunkOps =
true;
1635 for (
Register Reg : DbgValueToSink.second) {
1638 PropagatedAllSunkOps =
false;
1643 if (!PropagatedAllSunkOps)
1657 auto BlockPair = std::make_pair(
From, To);
1661 if (
auto It = HasStoreCache.find(BlockPair); It != HasStoreCache.end())
1664 if (
auto It = StoreInstrCache.find(BlockPair); It != StoreInstrCache.end())
1666 return I->mayAlias(AA, MI, false);
1669 bool SawStore =
false;
1670 bool HasAliasedStore =
false;
1679 if (BB == To || BB ==
From)
1683 if (HandledBlocks.
count(BB))
1686 HandledBlocks.
insert(BB);
1688 if (PDT->dominates(To, BB)) {
1689 if (!HandledDomBlocks.
count(BB))
1690 HandledDomBlocks.
insert(BB);
1696 for (
auto *DomBB : HandledDomBlocks) {
1697 if (DomBB != BB && DT->
dominates(DomBB, BB))
1698 HasStoreCache[std::make_pair(DomBB, To)] =
true;
1699 else if (DomBB != BB && DT->
dominates(BB, DomBB))
1700 HasStoreCache[std::make_pair(
From, DomBB)] =
true;
1702 HasStoreCache[BlockPair] =
true;
1709 if (
I.isCall() ||
I.hasOrderedMemoryRef()) {
1710 for (
auto *DomBB : HandledDomBlocks) {
1711 if (DomBB != BB && DT->
dominates(DomBB, BB))
1712 HasStoreCache[std::make_pair(DomBB, To)] =
true;
1713 else if (DomBB != BB && DT->
dominates(BB, DomBB))
1714 HasStoreCache[std::make_pair(
From, DomBB)] =
true;
1716 HasStoreCache[BlockPair] =
true;
1726 if (
I.mayAlias(AA,
MI,
false))
1727 HasAliasedStore =
true;
1728 StoreInstrCache[BlockPair].push_back(&
I);
1735 HasStoreCache[BlockPair] =
false;
1736 return HasAliasedStore;
1744bool MachineSinking::aggressivelySinkIntoCycle(
1748 if (
I.getNumDefs() > 1)
1751 LLVM_DEBUG(
dbgs() <<
"AggressiveCycleSink: Finding sink block for: " <<
I);
1760 for (std::pair<RegSubRegPair, MachineInstr *> Entry :
Uses) {
1765 dbgs() <<
"AggressiveCycleSink: Not attempting to sink for PHI.\n");
1769 if (
MI->isPosition() ||
TII->isBasicBlockPrologue(*
MI)) {
1770 LLVM_DEBUG(
dbgs() <<
"AggressiveCycleSink: Use is BasicBlock prologue, "
1776 dbgs() <<
"AggressiveCycleSink: Use not in cycle, can't sink.\n");
1782 SinkItem MapEntry(&
I, SinkBlock);
1784 auto SI = SunkInstrs.
find(MapEntry);
1788 if (SI != SunkInstrs.
end()) {
1789 LLVM_DEBUG(
dbgs() <<
"AggressiveCycleSink: Already sunk to block: "
1796 LLVM_DEBUG(
dbgs() <<
"AggressiveCycleSink: Sinking instruction to block: "
1799 NewMI =
I.getMF()->CloneMachineInstr(&
I);
1802 Register DestReg =
MRI->createVirtualRegister(TRC);
1808 SunkInstrs.
insert({MapEntry, NewMI});
1814 RegsToClearKillFlags.insert(MO.
getReg());
1829 I.eraseFromParent();
1835bool MachineSinking::SinkInstruction(
MachineInstr &
MI,
bool &SawStore,
1836 AllSuccsCache &AllSuccessors) {
1842 if (!
MI.isSafeToMove(SawStore))
1847 if (
MI.isConvergent())
1863 bool BreakPHIEdge =
false;
1866 FindSuccToSinkTo(
MI, ParentBlock, BreakPHIEdge, AllSuccessors);
1877 if (Reg == 0 || !
Reg.isPhysical())
1883 LLVM_DEBUG(
dbgs() <<
"Sink instr " <<
MI <<
"\tinto block " << *SuccToSinkTo);
1890 bool TryBreak =
false;
1892 MI.mayLoad() ? hasStoreBetween(ParentBlock, SuccToSinkTo,
MI) :
true;
1893 if (!
MI.isSafeToMove(Store)) {
1894 LLVM_DEBUG(
dbgs() <<
" *** NOTE: Won't sink load along critical edge.\n");
1900 if (!TryBreak && !DT->
dominates(ParentBlock, SuccToSinkTo)) {
1906 if (!TryBreak && CI->getCycle(SuccToSinkTo) &&
1907 (!CI->getCycle(SuccToSinkTo)->isReducible() ||
1908 CI->getCycle(SuccToSinkTo)->getHeader() == SuccToSinkTo)) {
1920 bool Status = PostponeSplitCriticalEdge(
MI, ParentBlock, SuccToSinkTo,
1924 "break critical edge\n");
1935 PostponeSplitCriticalEdge(
MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
1938 "break critical edge\n");
1947 LLVM_DEBUG(
dbgs() <<
" *** Not sinking: prologue interference\n");
1953 for (
auto &MO :
MI.all_defs()) {
1956 auto It = SeenDbgUsers.find(MO.
getReg());
1957 if (It == SeenDbgUsers.end())
1961 auto &
Users = It->second;
1964 if (
User.getInt()) {
1979 if (
MI.getMF()->getFunction().getSubprogram() &&
MI.isCopy())
1980 SalvageUnsunkDebugUsersOfCopy(
MI, SuccToSinkTo);
1990 RegsToClearKillFlags.insert(MO.
getReg());
1995void MachineSinking::SalvageUnsunkDebugUsersOfCopy(
2006 for (
auto &MO :
MI.all_defs()) {
2015 if (
User.getParent() ==
MI.getParent())
2019 "DBG_VALUE user of vreg, but has no operand for it?");
2026 for (
auto *
User : DbgDefUsers) {
2027 for (
auto &Reg : DbgUseRegs) {
2028 for (
auto &
DbgOp :
User->getDebugOperandsForReg(Reg)) {
2029 DbgOp.setReg(
MI.getOperand(1).getReg());
2030 DbgOp.setSubReg(
MI.getOperand(1).getSubReg());
2072class PostRAMachineSinkingImpl {
2097 StringRef getPassName()
const override {
return "PostRA Machine Sink"; }
2111char PostRAMachineSinkingLegacy::ID = 0;
2115 "PostRA Machine Sink",
false,
false)
2130 for (
auto *SI : SinkableBBs) {
2131 if (aliasWithRegsInLiveIn(*SI, Reg,
TRI)) {
2145 if (!SinkableBBs.
count(SI) && aliasWithRegsInLiveIn(*SI, Reg,
TRI))
2157 for (
auto DefReg : DefedRegsInCopy) {
2160 if (!BB || (SingleBB && SingleBB != BB))
2171 for (
auto U : UsedOpsInCopy) {
2177 if (UI.killsRegister(SrcReg,
TRI)) {
2178 UI.clearRegisterKills(SrcReg,
TRI);
2192 for (
Register DefReg : DefedRegsInCopy)
2195 for (
auto U : UsedOpsInCopy)
2205 bool HasRegDependency =
false;
2206 for (
unsigned i = 0, e =
MI->getNumOperands(); i != e; ++i) {
2215 HasRegDependency =
true;
2224 }
else if (MO.
isUse()) {
2226 HasRegDependency =
true;
2232 return HasRegDependency;
2244 if (!
SI->livein_empty() &&
SI->pred_size() == 1)
2247 if (SinkableBBs.
empty())
2250 bool Changed =
false;
2254 ModifiedRegUnits.clear();
2255 UsedRegUnits.clear();
2256 SeenDbgInstrs.clear();
2266 if (
MI.isDebugValue() && !
MI.isDebugRef()) {
2268 bool IsValid =
true;
2274 ModifiedRegUnits, UsedRegUnits)) {
2281 MIUnits[Unit].push_back(MO.
getReg());
2285 for (
auto &RegOps : MIUnits)
2286 SeenDbgInstrs[RegOps.first].emplace_back(&
MI,
2287 std::move(RegOps.second));
2292 if (
MI.isDebugOrPseudoInstr())
2299 if (!
MI.isCopy() || !
MI.getOperand(0).isRenamable()) {
2307 ModifiedRegUnits, UsedRegUnits)) {
2313 "Unexpect SrcReg or DefReg");
2324 "Unexpected predecessor");
2330 for (
auto &MO :
MI.all_defs()) {
2332 for (
const auto &
MIRegs : SeenDbgInstrs.lookup(Unit)) {
2333 auto &Regs = DbgValsToSinkMap[
MIRegs.first];
2338 auto DbgValsToSink = DbgValsToSinkMap.
takeVector();
2347 LLVM_DEBUG(
dbgs() <<
" *** Not sinking: prologue interference\n");
2358 ++NumPostRACopySink;
2364 bool Changed =
false;
2368 ModifiedRegUnits.init(*
TRI);
2369 UsedRegUnits.init(*
TRI);
2371 Changed |= tryToSinkCopy(BB, MF,
TRI,
TII);
2376bool PostRAMachineSinkingLegacy::runOnMachineFunction(
MachineFunction &MF) {
2380 return PostRAMachineSinkingImpl().run(MF);
2388 if (!PostRAMachineSinkingImpl().
run(MF))
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
BlockVerifier::State From
COFF::MachineTypes Machine
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static const HTTPClientCleanup Cleanup
static Register UseReg(const MachineOperand &MO)
const HexagonInstrInfo * TII
iv Induction Variable Users
static cl::opt< unsigned > SinkLoadInstsPerBlockThreshold("machine-sink-load-instrs-threshold", cl::desc("Do not try to find alias store for a load if there is a in-path " "block whose instruction number is higher than this threshold."), cl::init(2000), cl::Hidden)
static cl::opt< unsigned > SinkIntoCycleLimit("machine-sink-cycle-limit", cl::desc("The maximum number of instructions considered for cycle sinking."), cl::init(50), cl::Hidden)
static void clearKillFlags(MachineInstr *MI, MachineBasicBlock &CurBB, const SmallVectorImpl< unsigned > &UsedOpsInCopy, const LiveRegUnits &UsedRegUnits, const TargetRegisterInfo *TRI)
static void performSink(MachineInstr &MI, MachineBasicBlock &SuccToSinkTo, MachineBasicBlock::iterator InsertPos, ArrayRef< MIRegs > DbgValuesToSink)
Sink an instruction and its associated debug instructions.
static cl::opt< bool > SplitEdges("machine-sink-split", cl::desc("Split critical edges during machine sinking"), cl::init(true), cl::Hidden)
static bool mayLoadFromGOTOrConstantPool(MachineInstr &MI)
Return true if this machine instruction loads from global offset table or constant pool.
static bool SinkingPreventsImplicitNullCheck(MachineInstr &MI, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
Return true if MI is likely to be usable as a memory operation by the implicit null check optimizatio...
static cl::opt< bool > SinkInstsIntoCycle("sink-insts-to-avoid-spills", cl::desc("Sink instructions into cycles to avoid " "register spills"), cl::init(false), cl::Hidden)
static cl::opt< unsigned > SinkLoadBlocksThreshold("machine-sink-load-blocks-threshold", cl::desc("Do not try to find alias store for a load if the block number in " "the straight line is higher than this threshold."), cl::init(20), cl::Hidden)
static void updateLiveIn(MachineInstr *MI, MachineBasicBlock *SuccBB, const SmallVectorImpl< unsigned > &UsedOpsInCopy, const SmallVectorImpl< Register > &DefedRegsInCopy)
static bool hasRegisterDependency(MachineInstr *MI, SmallVectorImpl< unsigned > &UsedOpsInCopy, SmallVectorImpl< Register > &DefedRegsInCopy, LiveRegUnits &ModifiedRegUnits, LiveRegUnits &UsedRegUnits)
Register const TargetRegisterInfo * TRI
std::pair< MachineInstr *, SmallVector< Register, 2 > > MIRegs
Machine code static false bool blockPrologueInterferes(const MachineBasicBlock *BB, MachineBasicBlock::const_iterator End, const MachineInstr &MI, const TargetRegisterInfo *TRI, const TargetInstrInfo *TII, const MachineRegisterInfo *MRI)
Return true if a target defined block prologue instruction interferes with a sink candidate.
static cl::opt< unsigned > SplitEdgeProbabilityThreshold("machine-sink-split-probability-threshold", cl::desc("Percentage threshold for splitting single-instruction critical edge. " "If the branch threshold is higher than this threshold, we allow " "speculative execution of up to 1 instruction to avoid branching to " "splitted critical edge"), cl::init(40), cl::Hidden)
static bool attemptDebugCopyProp(MachineInstr &SinkInst, MachineInstr &DbgMI, Register Reg)
If the sunk instruction is a copy, try to forward the copy instead of leaving an 'undef' DBG_VALUE in...
static cl::opt< bool > UseBlockFreqInfo("machine-sink-bfi", cl::desc("Use block frequency info to find successors to sink"), cl::init(true), cl::Hidden)
static MachineBasicBlock * getSingleLiveInSuccBB(MachineBasicBlock &CurBB, const SmallPtrSetImpl< MachineBasicBlock * > &SinkableBBs, Register Reg, const TargetRegisterInfo *TRI)
This file implements a map that provides insertion order iteration.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
This file defines the PointerIntPair class.
Remove Loads Into Fake Uses
This file implements a set that has insertion order iteration characteristics.
static bool ProcessBlock(BasicBlock &BB, DominatorTree &DT, LoopInfo &LI, AAResults &AA)
static bool SinkInstruction(Instruction *Inst, SmallPtrSetImpl< Instruction * > &Stores, DominatorTree &DT, LoopInfo &LI, AAResults &AA)
SinkInstruction - Determine whether it is safe to sink the specified machine instruction out of its c...
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Target-Independent Code Generator Pass Configuration Options pass.
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
A private abstract base class describing the concept of an individual alias analysis implementation.
A container for analyses that lazily runs them and caches their results.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Represents analyses that only rely on functions' control flow.
static LLVM_ABI DebugLoc getMergedLocation(DebugLoc LocA, DebugLoc LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
Identifies a unique instance of a variable.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
Base class for the actual dominator tree node.
iterator_range< iterator > children()
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
A possibly irreducible generalization of a Loop.
BlockT * getHeader() const
bool isReducible() const
Whether the cycle is a natural loop.
BlockT * getCyclePreheader() const
Return the preheader block for this cycle.
bool contains(const BlockT *Block) const
Return whether Block is contained in the cycle.
Module * getParent()
Get the module that this global value is contained inside of...
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool shouldSink(const MachineInstr &MI) const override
A set of register units used to track register liveness.
static void accumulateUsedDefed(const MachineInstr &MI, LiveRegUnits &ModifiedRegUnits, LiveRegUnits &UsedRegUnits, const TargetRegisterInfo *TRI)
For a machine instruction MI, adds all register units used in UsedRegUnits and defined or clobbered i...
bool available(MCRegister Reg) const
Returns true if no part of physical register Reg is live.
LLVM_ABI void addLiveIns(const MachineBasicBlock &MBB)
Adds registers living into block MBB.
An RAII based helper class to modify MachineFunctionProperties when running pass.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
instr_iterator instr_begin()
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
LLVM_ABI void removeLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Remove the specified register from the live in set.
unsigned succ_size() const
LLVM_ABI void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
pred_iterator pred_begin()
instr_iterator instr_end()
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
LLVM_ABI bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
Legacy analysis pass which computes a MachineCycleInfo.
Analysis pass which computes a MachineDominatorTree.
Analysis pass which computes a MachineDominatorTree.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
Properties which a MachineFunction may have at a given point in time.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
Representation of each machine instruction.
bool hasDebugOperandForReg(Register Reg) const
Returns whether this debug value has at least one debug operand with the register Reg.
void setDebugValueUndef()
Sets all register debug operands in this debug value instruction to be undef.
LLVM_ABI iterator_range< filter_iterator< const MachineOperand *, std::function< bool(const MachineOperand &Op)> > > getDebugOperandsForReg(Register Reg) const
Returns a range of all of the operands that correspond to a debug use of Reg.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
bool isDebugInstr() const
LLVM_ABI void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
const MachineOperand & getOperand(unsigned i) const
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
Analysis pass that exposes the MachineLoopInfo for a machine function.
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
MachinePostDominatorTree - an analysis pass wrapper for DominatorTree used to compute the post-domina...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
This class implements a map that also provides access to all stored values in a deterministic order.
VectorType takeVector()
Clear the MapVector and return the underlying vector.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
PointerIntPair - This class implements a pair of a pointer and small integer.
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
Special value supplied for machine level alias analysis.
Track the current register pressure at some position in the instruction stream, and remember the high...
List of registers defined and used by a machine instruction.
LLVM_ABI void collect(const MachineInstr &MI, const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, bool TrackLaneMasks, bool IgnoreDead)
Analyze the given instruction MI and fill in the Uses, Defs and DeadDefs list based on the MachineOpe...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
A vector that has set insertion semantics.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
Target-Independent Code Generator Pass Configuration Options.
bool getEnableSinkAndFold() const
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
std::pair< iterator, bool > insert(const ValueT &V)
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
LLVM_ABI void initializeMachineSinkingLegacyPass(PassRegistry &)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isCycleInvariant(const MachineCycle *Cycle, MachineInstr &I)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI char & PostRAMachineSinkingID
This pass perform post-ra machine sink for COPY instructions.
LLVM_ABI char & MachineSinkingLegacyID
MachineSinking - This pass performs sinking on machine instructions.
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
TODO: Might pack better if we changed this to a Struct of Arrays, since MachineOperand is width 32,...
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
static StringRef name()
Gets the name of the pass we are mixed into.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
Represents a predicate at the MachineFunction level.
A pair composed of a register and a sub-register index.