65#define DEBUG_TYPE "hexagon-pei"
156 cl::desc(
"Set the number of scavenger slots"),
161 cl::desc(
"Specify O2(not Os) spill func threshold"),
166 cl::desc(
"Specify Os spill func threshold"),
175 cl::desc(
"Enable stack frame shrink wrapping"));
180 cl::desc(
"Max count of stack frame shrink-wraps"));
184 cl::desc(
"Enable long calls for save-restore stubs."),
195 cl::init(std::numeric_limits<unsigned>::max()));
214 char HexagonCallFrameInformation::ID = 0;
218bool HexagonCallFrameInformation::runOnMachineFunction(
MachineFunction &MF) {
224 HFI.insertCFIInstructions(MF);
229 "Hexagon call frame information",
false,
false)
232 return new HexagonCallFrameInformation();
240 if (Reg < Hexagon::D0 || Reg > Hexagon::D15)
249 if (!RegNo ||
SubReg < RegNo)
259 static_assert(Hexagon::R1 > 0,
260 "Assume physical registers are encoded as positive integers");
265 for (
unsigned I = 1, E = CSI.
size();
I < E; ++
I) {
280 unsigned Opc =
MI.getOpcode();
282 case Hexagon::PS_alloca:
283 case Hexagon::PS_aligna:
306 for (
MCPhysReg S : HRI.subregs_inclusive(R))
311 if (MO.isRegMask()) {
316 const uint32_t *BM = MO.getRegMask();
320 if (!(BM[R/32] & (1u << (R%32))))
335 unsigned RetOpc =
I->getOpcode();
336 return RetOpc == Hexagon::PS_tailcall_i || RetOpc == Hexagon::PS_tailcall_r;
358 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
359 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
360 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT:
361 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC:
362 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT:
363 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC:
364 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4:
365 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC:
378 return F.hasOptSize() && !
F.hasMinSize();
391 static unsigned ShrinkCounter = 0;
416 RPO[
I->getNumber()] = RPON++;
422 unsigned BN = RPO[
I.getNumber()];
425 if (RPO[Succ->getNumber()] <= BN)
433 for (
const MCPhysReg *
P = HRI.getCalleeSavedRegs(&MF); *
P; ++
P)
442 dbgs() <<
"Blocks needing SF: {";
443 for (
auto &
B : SFBlocks)
448 if (SFBlocks.
empty())
453 for (
unsigned i = 1, n = SFBlocks.
size(); i < n; ++i) {
459 for (
unsigned i = 1, n = SFBlocks.
size(); i < n; ++i) {
465 dbgs() <<
"Computed dom block: ";
470 dbgs() <<
", computed pdom block: ";
486 LLVM_DEBUG(
dbgs() <<
"PDom block does not post-dominate dom block\n");
509 findShrunkPrologEpilog(MF, PrologB, EpilogB);
511 bool PrologueStubs =
false;
512 insertCSRSpillsInBlock(*PrologB, CSI, HRI, PrologueStubs);
513 insertPrologueInBlock(*PrologB, PrologueStubs);
514 updateEntryPaths(MF, *PrologB);
517 insertCSRRestoresInBlock(*EpilogB, CSI, HRI);
518 insertEpilogueInBlock(*EpilogB);
521 if (
B.isReturnBlock())
522 insertCSRRestoresInBlock(
B, CSI, HRI);
525 if (
B.isReturnBlock())
526 insertEpilogueInBlock(
B);
544 BitVector DoneT(MaxBN+1), DoneF(MaxBN+1), Path(MaxBN+1);
545 updateExitPaths(*EpilogB, *EpilogB, DoneT, DoneF, Path);
554 assert(
F.hasFnAttribute(Attribute::NoReturn) &&
555 F.getFunction().hasFnAttribute(Attribute::NoUnwind) &&
556 !
F.getFunction().hasFnAttribute(Attribute::UWTable));
572 assert(!MFI.hasVarSizedObjects() &&
573 !HST.getRegisterInfo()->hasStackRealignment(MF));
574 return F.hasFnAttribute(Attribute::NoReturn) &&
575 F.hasFnAttribute(Attribute::NoUnwind) &&
576 !
F.hasFnAttribute(Attribute::UWTable) && HST.noreturnStackElim() &&
577 MFI.getStackSize() == 0;
581 bool PrologueStubs)
const {
586 auto &HRI = *HST.getRegisterInfo();
597 FrameSize = MaxCFA +
alignTo(FrameSize, MaxAlign);
611 if (
MI.getOpcode() == Hexagon::PS_alloca)
614 for (
auto *
MI : AdjustRegs) {
615 assert((
MI->getOpcode() == Hexagon::PS_alloca) &&
"Expected alloca");
616 expandAlloca(
MI, HII, SP, MaxCF);
617 MI->eraseFromParent();
622 if (MF.getFunction().isVarArg() &&
626 int RegisterSavedAreaSizePlusPadding = (NumVarArgRegs % 2 == 0)
628 : NumVarArgRegs * 4 + 4;
629 if (RegisterSavedAreaSizePlusPadding > 0) {
632 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
634 .
addImm(-RegisterSavedAreaSizePlusPadding)
640 for (
int i = HMFI.getFirstNamedArgFrameIndex(),
641 e = HMFI.getLastNamedArgFrameIndex(); i >=
e; --i) {
646 unsigned LDOpc, STOpc;
650 if (ObjAlign > ObjSize) {
653 else if (ObjSize <= 2)
655 else if (ObjSize <= 4)
657 else if (ObjSize > 4)
661 switch (OpcodeChecker) {
663 LDOpc = Hexagon::L2_loadrb_io;
664 STOpc = Hexagon::S2_storerb_io;
667 LDOpc = Hexagon::L2_loadrh_io;
668 STOpc = Hexagon::S2_storerh_io;
671 LDOpc = Hexagon::L2_loadri_io;
672 STOpc = Hexagon::S2_storeri_io;
676 LDOpc = Hexagon::L2_loadrd_io;
677 STOpc = Hexagon::S2_storerd_io;
681 Register RegUsed = LDOpc == Hexagon::L2_loadrd_io ? Hexagon::D3
683 int LoadStoreCount = ObjSize / OpcodeChecker;
685 if (ObjSize % OpcodeChecker)
693 NumBytes =
alignTo(NumBytes, ObjAlign);
696 while (Count < LoadStoreCount) {
698 BuildMI(
MBB, InsertPt, dl, HII.get(LDOpc), RegUsed)
700 .
addImm(RegisterSavedAreaSizePlusPadding +
701 ObjAlign.
value() * Count + NumBytes)
717 NumBytes =
alignTo(NumBytes, 8);
722 NumBytes = (NumVarArgRegs % 2 == 0) ? NumBytes : NumBytes + 4;
725 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::S2_storeri_io))
735 insertAllocframe(
MBB, InsertPt, NumBytes);
737 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_andir), SP)
745 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::PS_call_stk))
747 }
else if (NumBytes > 0) {
749 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
759 auto &HRI = *HST.getRegisterInfo();
772 int RegisterSavedAreaSizePlusPadding = (NumVarArgRegs % 2 == 0) ?
773 (NumVarArgRegs * 4) : (NumVarArgRegs * 4 + 4);
774 NumBytes += RegisterSavedAreaSizePlusPadding;
777 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
785 unsigned RetOpc = RetI ? RetI->
getOpcode() : 0;
788 if (RetOpc == Hexagon::EH_RETURN_JMPR) {
789 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
792 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_add), SP)
800 if (RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4 ||
801 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC ||
802 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT ||
803 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC) {
819 bool NeedsDeallocframe =
true;
822 unsigned COpc = PrevIt->getOpcode();
823 if (COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 ||
824 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC ||
825 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT ||
826 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC ||
827 COpc == Hexagon::PS_call_nr || COpc == Hexagon::PS_callr_nr)
828 NeedsDeallocframe =
false;
833 if (!NeedsDeallocframe)
839 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
844 unsigned NewOpc = Hexagon::L4_return;
855 int RegisterSavedAreaSizePlusPadding = (NumVarArgRegs % 2 == 0) ?
856 (NumVarArgRegs * 4) : (NumVarArgRegs * 4 + 4);
862 (
I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT &&
863 I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC &&
864 I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 &&
865 I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC))
866 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
869 if (RegisterSavedAreaSizePlusPadding != 0)
870 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
872 .
addImm(RegisterSavedAreaSizePlusPadding);
881 auto &HRI = *HST.getRegisterInfo();
885 const unsigned int ALLOCFRAME_MAX = 16384;
895 if (NumBytes >= ALLOCFRAME_MAX) {
897 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
906 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
911 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
930 for (
unsigned i = 0; i < Worklist.
size(); ++i) {
931 unsigned BN = Worklist[i];
938 Worklist.
insert(SB->getNumber());
947 if (Path[BN] || DoneF[BN])
955 bool ReachedExit =
false;
957 ReachedExit |= updateExitPaths(*SB, RestoreB, DoneT, DoneF, Path);
973 if (ReachedExit && &
MBB != &RestoreB) {
986static std::optional<MachineBasicBlock::iterator>
993 auto End =
B.instr_end();
998 if (
I.getOpcode() == Hexagon::S2_allocframe)
999 return std::next(It);
1003 bool HasCall =
false, HasAllocFrame =
false;
1005 while (++
T !=
End &&
T->isBundled()) {
1006 if (
T->getOpcode() == Hexagon::S2_allocframe)
1007 HasAllocFrame =
true;
1008 else if (
T->isCall())
1012 return HasCall ? It : std::next(It);
1014 return std::nullopt;
1020 insertCFIInstructionsAt(
B, *At);
1029 auto &HRI = *HST.getRegisterInfo();
1035 const MCInstrDesc &CFID = HII.get(TargetOpcode::CFI_INSTRUCTION);
1038 bool HasFP =
hasFP(MF);
1041 unsigned DwFPReg = HRI.getDwarfRegNum(HRI.getFrameRegister(),
true);
1042 unsigned DwRAReg = HRI.getDwarfRegNum(HRI.getRARegister(),
true);
1069 Hexagon::R1, Hexagon::R0, Hexagon::R3, Hexagon::R2,
1070 Hexagon::R17, Hexagon::R16, Hexagon::R19, Hexagon::R18,
1071 Hexagon::R21, Hexagon::R20, Hexagon::R23, Hexagon::R22,
1072 Hexagon::R25, Hexagon::R24, Hexagon::R27, Hexagon::R26,
1073 Hexagon::D0, Hexagon::D1, Hexagon::D8, Hexagon::D9,
1074 Hexagon::D10, Hexagon::D11, Hexagon::D12, Hexagon::D13
1081 return C.getReg() ==
Reg;
1105 if (Reg < Hexagon::D0 || Reg > Hexagon::D15) {
1106 unsigned DwarfReg = HRI.getDwarfRegNum(Reg,
true);
1118 Register HiReg = HRI.getSubReg(Reg, Hexagon::isub_hi);
1119 Register LoReg = HRI.getSubReg(Reg, Hexagon::isub_lo);
1120 unsigned HiDwarfReg = HRI.getDwarfRegNum(HiReg,
true);
1121 unsigned LoDwarfReg = HRI.getDwarfRegNum(LoReg,
true);
1137 bool HasExtraAlign = HRI.hasStackRealignment(MF);
1153 if (HasAlloca || HasExtraAlign)
1179 bool Stkchk =
false) {
1180 const char * V4SpillToMemoryFunctions[] = {
1181 "__save_r16_through_r17",
1182 "__save_r16_through_r19",
1183 "__save_r16_through_r21",
1184 "__save_r16_through_r23",
1185 "__save_r16_through_r25",
1186 "__save_r16_through_r27" };
1188 const char * V4SpillToMemoryStkchkFunctions[] = {
1189 "__save_r16_through_r17_stkchk",
1190 "__save_r16_through_r19_stkchk",
1191 "__save_r16_through_r21_stkchk",
1192 "__save_r16_through_r23_stkchk",
1193 "__save_r16_through_r25_stkchk",
1194 "__save_r16_through_r27_stkchk" };
1196 const char * V4SpillFromMemoryFunctions[] = {
1197 "__restore_r16_through_r17_and_deallocframe",
1198 "__restore_r16_through_r19_and_deallocframe",
1199 "__restore_r16_through_r21_and_deallocframe",
1200 "__restore_r16_through_r23_and_deallocframe",
1201 "__restore_r16_through_r25_and_deallocframe",
1202 "__restore_r16_through_r27_and_deallocframe" };
1204 const char * V4SpillFromMemoryTailcallFunctions[] = {
1205 "__restore_r16_through_r17_and_deallocframe_before_tailcall",
1206 "__restore_r16_through_r19_and_deallocframe_before_tailcall",
1207 "__restore_r16_through_r21_and_deallocframe_before_tailcall",
1208 "__restore_r16_through_r23_and_deallocframe_before_tailcall",
1209 "__restore_r16_through_r25_and_deallocframe_before_tailcall",
1210 "__restore_r16_through_r27_and_deallocframe_before_tailcall"
1213 const char **SpillFunc =
nullptr;
1217 SpillFunc = Stkchk ? V4SpillToMemoryStkchkFunctions
1218 : V4SpillToMemoryFunctions;
1221 SpillFunc = V4SpillFromMemoryFunctions;
1224 SpillFunc = V4SpillFromMemoryTailcallFunctions;
1227 assert(SpillFunc &&
"Unknown spill kind");
1232 return SpillFunc[0];
1234 return SpillFunc[1];
1236 return SpillFunc[2];
1238 return SpillFunc[3];
1240 return SpillFunc[4];
1242 return SpillFunc[5];
1257 bool HasExtraAlign = HRI.hasStackRealignment(MF);
1262 Register SP = HRI.getStackRegister();
1264 Register AP = HMFI.getStackAlignBaseReg();
1279 bool UseFP =
false, UseAP =
false;
1284 if (NoOpt && !HasExtraAlign)
1289 UseFP |= (HasAlloca || HasExtraAlign);
1300 bool HasFP =
hasFP(MF);
1301 assert((HasFP || !UseFP) &&
"This function must have frame pointer");
1327 if (
Offset > 0 && !HasFP)
1342 if (!UseFP && !UseAP)
1343 RealOffset = FrameSize+
Offset;
1349 bool &PrologueStubs)
const {
1354 PrologueStubs =
false;
1359 if (useSpillFunction(MF, CSI)) {
1360 PrologueStubs =
true;
1372 if (StkOvrFlowEnabled) {
1374 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT_PIC
1375 : Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT;
1377 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_PIC
1378 : Hexagon::SAVE_REGISTERS_CALL_V4STK;
1381 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC
1382 : Hexagon::SAVE_REGISTERS_CALL_V4_EXT;
1384 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_PIC
1385 : Hexagon::SAVE_REGISTERS_CALL_V4;
1393 addCalleeSaveRegistersAsImpOperand(SaveRegsCall, CSI,
false,
true);
1406 int FI =
I.getFrameIdx();
1408 HII.storeRegToStackSlot(
MBB,
MI, Reg, IsKill, FI, RC, &HRI,
Register());
1425 if (useRestoreFunction(MF, CSI)) {
1431 bool IsPIC = HTM.isPositionIndependent();
1442 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC
1443 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT;
1445 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC
1446 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4;
1455 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC
1456 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT;
1458 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC
1459 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4;
1465 addCalleeSaveRegistersAsImpOperand(DeallocCall, CSI,
true,
false);
1472 int FI =
I.getFrameIdx();
1473 HII.loadRegFromStackSlot(
MBB,
MI, Reg, FI, RC, &HRI,
Register());
1483 unsigned Opc =
MI.getOpcode();
1485 assert((
Opc == Hexagon::ADJCALLSTACKDOWN ||
Opc == Hexagon::ADJCALLSTACKUP) &&
1486 "Cannot handle this call frame pseudo instruction");
1500 if (!HasAlloca || !NeedsAlign)
1506 AP = AI->getOperand(0).getReg();
1509 HMFI.setStackAlignBaseReg(AP);
1517 auto IsUsed = [&HRI,&
MRI] (
Register Reg) ->
bool {
1519 if (
MRI.isPhysRegUsed(*AI))
1549 BitVector SRegs(Hexagon::NUM_TARGET_REGS);
1579 bool HasResSub =
false;
1605 BitVector TmpSup(Hexagon::NUM_TARGET_REGS);
1611 for (
int x = TmpSup.find_first(); x >= 0; x = TmpSup.find_next(x)) {
1649 int64_t MinOffset = 0;
1651 for (
const SpillSlot *S = FixedSlots; S != FixedSlots+NumFixed; ++S) {
1656 MinOffset = std::min(MinOffset, S->Offset);
1658 SRegs[S->Reg] =
false;
1667 unsigned Size =
TRI->getSpillSize(*RC);
1668 int64_t Off = MinOffset -
Size;
1670 Off &= -Alignment.
value();
1672 MinOffset = std::min(MinOffset, Off);
1678 dbgs() <<
"CS information: {";
1680 int FI =
I.getFrameIdx();
1692 bool MissedReg =
false;
1712 if (!Hexagon::ModRegsRegClass.
contains(DstR) ||
1713 !Hexagon::ModRegsRegClass.
contains(SrcR))
1716 Register TmpR =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1717 BuildMI(
B, It,
DL, HII.get(TargetOpcode::COPY), TmpR).
add(
MI->getOperand(1));
1718 BuildMI(
B, It,
DL, HII.get(TargetOpcode::COPY), DstR)
1730 if (!
MI->getOperand(0).isFI())
1734 unsigned Opc =
MI->getOpcode();
1736 bool IsKill =
MI->getOperand(2).isKill();
1737 int FI =
MI->getOperand(0).getIndex();
1741 Register TmpR =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1742 unsigned TfrOpc = (
Opc == Hexagon::STriw_pred) ? Hexagon::C2_tfrpr
1743 : Hexagon::A2_tfrcrr;
1748 BuildMI(
B, It,
DL, HII.get(Hexagon::S2_storeri_io))
1763 if (!
MI->getOperand(1).isFI())
1767 unsigned Opc =
MI->getOpcode();
1769 int FI =
MI->getOperand(1).getIndex();
1772 Register TmpR =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1773 BuildMI(
B, It,
DL, HII.get(Hexagon::L2_loadri_io), TmpR)
1780 unsigned TfrOpc = (
Opc == Hexagon::LDriw_pred) ? Hexagon::C2_tfrrp
1781 : Hexagon::A2_tfrrcr;
1794 if (!
MI->getOperand(0).isFI())
1799 bool IsKill =
MI->getOperand(2).isKill();
1800 int FI =
MI->getOperand(0).getIndex();
1801 auto *RC = &Hexagon::HvxVRRegClass;
1807 Register TmpR0 =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1810 BuildMI(
B, It,
DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1813 BuildMI(
B, It,
DL, HII.get(Hexagon::V6_vandqrt), TmpR1)
1819 expandStoreVec(
B, std::prev(It),
MRI, HII, NewRegs);
1831 if (!
MI->getOperand(1).isFI())
1836 int FI =
MI->getOperand(1).getIndex();
1837 auto *RC = &Hexagon::HvxVRRegClass;
1842 Register TmpR0 =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1845 BuildMI(
B, It,
DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1850 expandLoadVec(
B, std::prev(It),
MRI, HII, NewRegs);
1852 BuildMI(
B, It,
DL, HII.get(Hexagon::V6_vandvrt), DstR)
1869 if (!
MI->getOperand(0).isFI())
1879 for (
auto R =
B.begin(); R != It; ++R) {
1881 LPR.stepForward(*R, Clobbers);
1886 Register SrcLo = HRI.getSubReg(SrcR, Hexagon::vsub_lo);
1887 Register SrcHi = HRI.getSubReg(SrcR, Hexagon::vsub_hi);
1888 bool IsKill =
MI->getOperand(2).isKill();
1889 int FI =
MI->getOperand(0).getIndex();
1891 unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1892 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1897 if (LPR.contains(SrcLo)) {
1898 StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1899 : Hexagon::V6_vS32Ub_ai;
1908 if (LPR.contains(SrcHi)) {
1909 StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1910 : Hexagon::V6_vS32Ub_ai;
1929 if (!
MI->getOperand(1).isFI())
1934 Register DstHi = HRI.getSubReg(DstR, Hexagon::vsub_hi);
1935 Register DstLo = HRI.getSubReg(DstR, Hexagon::vsub_lo);
1936 int FI =
MI->getOperand(1).getIndex();
1938 unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1939 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1944 LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
1945 : Hexagon::V6_vL32Ub_ai;
1952 LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
1953 : Hexagon::V6_vL32Ub_ai;
1969 if (!
MI->getOperand(0).isFI())
1975 bool IsKill =
MI->getOperand(2).isKill();
1976 int FI =
MI->getOperand(0).getIndex();
1978 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1980 unsigned StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1981 : Hexagon::V6_vS32Ub_ai;
1998 if (!
MI->getOperand(1).isFI())
2004 int FI =
MI->getOperand(1).getIndex();
2006 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
2008 unsigned LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
2009 : Hexagon::V6_vL32Ub_ai;
2023 bool Changed =
false;
2025 for (
auto &
B : MF) {
2028 for (
auto I =
B.begin(), E =
B.end();
I != E;
I = NextI) {
2030 NextI = std::next(
I);
2031 unsigned Opc =
MI->getOpcode();
2034 case TargetOpcode::COPY:
2035 Changed |= expandCopy(
B,
I,
MRI, HII, NewRegs);
2037 case Hexagon::STriw_pred:
2038 case Hexagon::STriw_ctr:
2039 Changed |= expandStoreInt(
B,
I,
MRI, HII, NewRegs);
2041 case Hexagon::LDriw_pred:
2042 case Hexagon::LDriw_ctr:
2043 Changed |= expandLoadInt(
B,
I,
MRI, HII, NewRegs);
2045 case Hexagon::PS_vstorerq_ai:
2046 Changed |= expandStoreVecPred(
B,
I,
MRI, HII, NewRegs);
2048 case Hexagon::PS_vloadrq_ai:
2049 Changed |= expandLoadVecPred(
B,
I,
MRI, HII, NewRegs);
2051 case Hexagon::PS_vloadrw_ai:
2052 Changed |= expandLoadVec2(
B,
I,
MRI, HII, NewRegs);
2054 case Hexagon::PS_vstorerw_ai:
2055 Changed |= expandStoreVec2(
B,
I,
MRI, HII, NewRegs);
2069 SavedRegs.
resize(HRI.getNumRegs());
2079 expandSpillMacros(MF, NewRegs);
2081 optimizeSpillSlots(MF, NewRegs);
2085 if (!NewRegs.
empty() || mayOverflowFrameOffset(MF)) {
2091 SpillRCs.
insert(&Hexagon::IntRegsRegClass);
2096 for (
const auto *RC : SpillRCs) {
2100 switch (RC->
getID()) {
2101 case Hexagon::IntRegsRegClassID:
2104 case Hexagon::HvxQRRegClassID:
2108 unsigned S = HRI.getSpillSize(*RC);
2109 Align A = HRI.getSpillAlign(*RC);
2110 for (
unsigned i = 0; i < Num; i++) {
2129 auto F = DeadMap.find({Reg,0});
2130 if (
F == DeadMap.end())
2132 for (
auto &DR :
F->second)
2133 if (DR.contains(FIR))
2155 auto &HII = *HST.getInstrInfo();
2156 auto &HRI = *HST.getRegisterInfo();
2160 using BlockIndexMap =
2161 std::map<MachineBasicBlock *, HexagonBlockRanges::InstrIndexMap>;
2162 using BlockRangeMap =
2163 std::map<MachineBasicBlock *, HexagonBlockRanges::RangeList>;
2171 SlotInfo() =
default;
2174 BlockIndexMap BlockIndexes;
2176 std::map<int,SlotInfo> FIRangeMap;
2185 if (HaveRC ==
nullptr || HaveRC == NewRC)
2190 if (NewRC->hasSubClassEq(HaveRC))
2197 for (
auto &
B : MF) {
2198 std::map<int,IndexType> LastStore, LastLoad;
2200 auto &IndexMap =
P.first->second;
2202 << IndexMap <<
'\n');
2204 for (
auto &In :
B) {
2206 bool Load = HII.isLoadFromStackSlot(In, LFI) && !HII.isPredicated(In);
2207 bool Store = HII.isStoreToStackSlot(In, SFI) && !HII.isPredicated(In);
2208 if (Load && Store) {
2220 if (Load || Store) {
2221 int TFI =
Load ? LFI : SFI;
2222 unsigned AM = HII.getAddrMode(In);
2223 SlotInfo &
SI = FIRangeMap[TFI];
2227 unsigned OpNum =
Load ? 0 : 2;
2228 auto *RC = HII.getRegClass(
In.getDesc(), OpNum, &HRI, MF);
2229 RC = getCommonRC(
SI.RC, RC);
2237 unsigned S = HII.getMemAccessSize(In);
2238 if (
SI.Size != 0 &&
SI.Size != S)
2244 for (
auto *Mo :
In.memoperands()) {
2245 if (!Mo->isVolatile() && !Mo->isAtomic())
2256 for (
unsigned i = 0, n =
In.getNumOperands(); i < n; ++i) {
2260 int FI =
Op.getIndex();
2263 if (i+1 >= n || !
In.getOperand(i+1).isImm() ||
2264 In.getOperand(i+1).getImm() != 0)
2266 if (BadFIs.
count(FI))
2270 auto &
LS = LastStore[FI];
2271 auto &LL = LastLoad[FI];
2273 if (LS == IndexType::None)
2274 LS = IndexType::Entry;
2278 if (LS != IndexType::None)
2279 RL.
add(LS, LL,
false,
false);
2280 else if (LL != IndexType::None)
2281 RL.
add(IndexType::Entry, LL,
false,
false);
2282 LL = IndexType::None;
2290 for (
auto &
I : LastLoad) {
2291 IndexType LL =
I.second;
2292 if (LL == IndexType::None)
2294 auto &RL = FIRangeMap[
I.first].Map[&
B];
2295 IndexType &
LS = LastStore[
I.first];
2296 if (LS != IndexType::None)
2297 RL.
add(LS, LL,
false,
false);
2299 RL.
add(IndexType::Entry, LL,
false,
false);
2300 LS = IndexType::None;
2302 for (
auto &
I : LastStore) {
2303 IndexType
LS =
I.second;
2304 if (LS == IndexType::None)
2306 auto &RL = FIRangeMap[
I.first].Map[&
B];
2307 RL.
add(LS, IndexType::None,
false,
false);
2312 for (
auto &
P : FIRangeMap) {
2313 dbgs() <<
"fi#" <<
P.first;
2314 if (BadFIs.
count(
P.first))
2317 if (
P.second.RC !=
nullptr)
2318 dbgs() << HRI.getRegClassName(
P.second.RC) <<
'\n';
2320 dbgs() <<
"<null>\n";
2321 for (
auto &R :
P.second.Map)
2332 std::map<MachineBasicBlock*,std::vector<int>> BlockFIMap;
2334 for (
auto &
P : FIRangeMap) {
2336 if (BadFIs.
count(
P.first))
2338 for (
auto &
B : MF) {
2339 auto F =
P.second.Map.find(&
B);
2341 if (
F ==
P.second.Map.end() ||
F->second.empty())
2344 if (
IR.start() == IndexType::Entry)
2345 LoxFIs.insert(
P.first);
2346 BlockFIMap[&
B].push_back(
P.first);
2351 dbgs() <<
"Block-to-FI map (* -- live-on-exit):\n";
2352 for (
auto &
P : BlockFIMap) {
2353 auto &FIs =
P.second;
2357 for (
auto I : FIs) {
2358 dbgs() <<
" fi#" <<
I;
2359 if (LoxFIs.count(
I))
2371 for (
auto &
B : MF) {
2372 auto F = BlockIndexes.find(&
B);
2373 assert(
F != BlockIndexes.end());
2380 for (
auto FI : BlockFIMap[&
B]) {
2381 if (BadFIs.
count(FI))
2385 for (
auto &
Range : RL) {
2387 if (!IndexType::isInstr(
Range.start()) ||
2388 !IndexType::isInstr(
Range.end()))
2392 assert(
SI.mayStore() &&
"Unexpected start instruction");
2397 SrcOp.getSubReg() };
2398 auto *RC = HII.getRegClass(
SI.getDesc(), 2, &HRI, MF);
2416 if (SrcRR.
Reg != FoundR || SrcRR.
Sub != 0) {
2418 CopyIn =
BuildMI(
B, StartIt,
DL, HII.get(TargetOpcode::COPY), FoundR)
2424 if (LoxFIs.count(FI) && (&
Range == &RL.back())) {
2426 if (
unsigned SR =
SrcOp.getSubReg())
2427 SrcOp.setReg(HRI.getSubReg(FoundR, SR));
2429 SrcOp.setReg(FoundR);
2432 SrcOp.setIsKill(
false);
2439 for (
auto It = StartIt; It != EndIt; It = NextIt) {
2441 NextIt = std::next(It);
2443 if (!HII.isLoadFromStackSlot(
MI, TFI) || TFI != FI)
2446 assert(
MI.getOperand(0).getSubReg() == 0);
2448 if (DstR != FoundR) {
2450 unsigned MemSize = HII.getMemAccessSize(
MI);
2452 unsigned CopyOpc = TargetOpcode::COPY;
2453 if (HII.isSignExtendingLoad(
MI))
2454 CopyOpc = (MemSize == 1) ? Hexagon::A2_sxtb : Hexagon::A2_sxth;
2455 else if (HII.isZeroExtendingLoad(
MI))
2456 CopyOpc = (MemSize == 1) ? Hexagon::A2_zxtb : Hexagon::A2_zxth;
2457 CopyOut =
BuildMI(
B, It,
DL, HII.get(CopyOpc), DstR)
2473void HexagonFrameLowering::expandAlloca(
MachineInstr *AI,
2499 BuildMI(MB, AI,
DL, HII.get(Hexagon::A2_sub), Rd)
2504 BuildMI(MB, AI,
DL, HII.get(Hexagon::A2_sub), SP)
2510 BuildMI(MB, AI,
DL, HII.get(Hexagon::A2_andir), Rd)
2514 BuildMI(MB, AI,
DL, HII.get(Hexagon::A2_andir), SP)
2520 BuildMI(MB, AI,
DL, HII.get(TargetOpcode::COPY), SP)
2525 BuildMI(MB, AI,
DL, HII.get(Hexagon::A2_addi), Rd)
2545 if (
I.getOpcode() == Hexagon::PS_aligna)
2552void HexagonFrameLowering::addCalleeSaveRegistersAsImpOperand(
MachineInstr *
MI,
2553 const CSIVect &CSI,
bool IsDef,
bool IsKill)
const {
2564 const CSIVect &CSI)
const {
2577 BitVector Regs(Hexagon::NUM_TARGET_REGS);
2580 if (!Hexagon::DoubleRegsRegClass.
contains(R))
2584 int F = Regs.find_first();
2585 if (
F != Hexagon::D8)
2588 int N = Regs.find_next(
F);
2589 if (
N >= 0 &&
N !=
F+1)
2598 const CSIVect &CSI)
const {
2599 if (shouldInlineCSR(MF, CSI))
2601 unsigned NumCSI = CSI.size();
2607 return Threshold < NumCSI;
2610bool HexagonFrameLowering::useRestoreFunction(
const MachineFunction &MF,
2611 const CSIVect &CSI)
const {
2612 if (shouldInlineCSR(MF, CSI))
2622 unsigned NumCSI = CSI.size();
2628 return Threshold < NumCSI;
2631bool HexagonFrameLowering::mayOverflowFrameOffset(
MachineFunction &MF)
const {
2636 if (HST.useHVXOps() && StackSize > 256)
2643 bool HasImmStack =
false;
2644 unsigned MinLS = ~0
u;
2649 switch (
MI.getOpcode()) {
2650 case Hexagon::S4_storeirit_io:
2651 case Hexagon::S4_storeirif_io:
2652 case Hexagon::S4_storeiri_io:
2655 case Hexagon::S4_storeirht_io:
2656 case Hexagon::S4_storeirhf_io:
2657 case Hexagon::S4_storeirh_io:
2660 case Hexagon::S4_storeirbt_io:
2661 case Hexagon::S4_storeirbf_io:
2662 case Hexagon::S4_storeirb_io:
2663 if (
MI.getOperand(0).isFI())
2665 MinLS = std::min(MinLS, LS);
2672 return !isUInt<6>(StackSize >> MinLS);
2679struct HexagonFrameSortingObject {
2680 bool IsValid =
false;
2686struct HexagonFrameSortingComparator {
2687 inline bool operator()(
const HexagonFrameSortingObject &
A,
2688 const HexagonFrameSortingObject &
B)
const {
2689 return std::make_tuple(!
A.IsValid,
A.ObjectAlignment,
A.Size) <
2690 std::make_tuple(!
B.IsValid,
B.ObjectAlignment,
B.Size);
2700 if (ObjectsToAllocate.
empty())
2704 int NObjects = ObjectsToAllocate.
size();
2712 if (i != ObjectsToAllocate[j])
2723 SortingObjects[i].IsValid =
true;
2724 SortingObjects[i].Index = i;
2734 for (
auto &Obj : SortingObjects) {
2737 ObjectsToAllocate[--i] = Obj.Index;
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
This file implements the BitVector class.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static RegisterPass< DebugifyModulePass > DM("debugify", "Attach debug info to everything")
This file defines the DenseMap class.
static MachineInstr * getReturn(MachineBasicBlock &MBB)
Returns the "return" instruction from this block, or nullptr if there isn't any.
static cl::opt< unsigned > ShrinkLimit("shrink-frame-limit", cl::init(std::numeric_limits< unsigned >::max()), cl::Hidden, cl::desc("Max count of stack frame shrink-wraps"))
static bool isOptNone(const MachineFunction &MF)
static cl::opt< int > SpillFuncThreshold("spill-func-threshold", cl::Hidden, cl::desc("Specify O2(not Os) spill func threshold"), cl::init(6))
static std::optional< MachineBasicBlock::iterator > findCFILocation(MachineBasicBlock &B)
static cl::opt< bool > EliminateFramePointer("hexagon-fp-elim", cl::init(true), cl::Hidden, cl::desc("Refrain from using FP whenever possible"))
static bool enableAllocFrameElim(const MachineFunction &MF)
static const char * getSpillFunctionFor(Register MaxReg, SpillKind SpillType, bool Stkchk=false)
static bool hasReturn(const MachineBasicBlock &MBB)
Returns true if MBB contains an instruction that returns.
static cl::opt< bool > EnableSaveRestoreLong("enable-save-restore-long", cl::Hidden, cl::desc("Enable long calls for save-restore stubs."), cl::init(false))
static bool needToReserveScavengingSpillSlots(MachineFunction &MF, const HexagonRegisterInfo &HRI, const TargetRegisterClass *RC)
Returns true if there are no caller-saved registers available in class RC.
static bool isOptSize(const MachineFunction &MF)
static Register getMax32BitSubRegister(Register Reg, const TargetRegisterInfo &TRI, bool hireg=true)
Map a register pair Reg to the subregister that has the greater "number", i.e.
static cl::opt< int > SpillFuncThresholdOs("spill-func-threshold-Os", cl::Hidden, cl::desc("Specify Os spill func threshold"), cl::init(1))
static bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR, const HexagonRegisterInfo &HRI)
Checks if the basic block contains any instruction that needs a stack frame to be already in place.
static cl::opt< bool > DisableDeallocRet("disable-hexagon-dealloc-ret", cl::Hidden, cl::desc("Disable Dealloc Return for Hexagon target"))
static cl::opt< bool > EnableShrinkWrapping("hexagon-shrink-frame", cl::init(true), cl::Hidden, cl::desc("Enable stack frame shrink wrapping"))
static bool hasTailCall(const MachineBasicBlock &MBB)
Returns true if MBB has a machine instructions that indicates a tail call in the block.
static cl::opt< unsigned > NumberScavengerSlots("number-scavenger-slots", cl::Hidden, cl::desc("Set the number of scavenger slots"), cl::init(2))
static Register getMaxCalleeSavedReg(ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo &TRI)
Returns the callee saved register with the largest id in the vector.
static bool isMinSize(const MachineFunction &MF)
static cl::opt< unsigned > SpillOptMax("spill-opt-max", cl::Hidden, cl::init(std::numeric_limits< unsigned >::max()))
static unsigned SpillOptCount
static void dump_registers(BitVector &Regs, const TargetRegisterInfo &TRI)
static bool isRestoreCall(unsigned Opc)
static cl::opt< bool > OptimizeSpillSlots("hexagon-opt-spill", cl::Hidden, cl::init(true), cl::desc("Optimize spill slots"))
static cl::opt< bool > EnableStackOVFSanitizer("enable-stackovf-sanitizer", cl::Hidden, cl::desc("Enable runtime checks for stack overflow."), cl::init(false))
Legalize the Machine IR a function s Machine IR
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
Register const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file declares the machine register scavenger class.
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallSet class.
This file defines the SmallVector class.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
int find_first() const
find_first - Returns the index of the first set bit, -1 if none of the bits are set.
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
int find_next(unsigned Prev) const
find_next - Returns the index of the next set bit following the "Prev" bit.
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
This class represents an Operation in the Expression.
NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const
Find nearest common dominator basic block for basic block A and B.
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
FunctionPass class - This class is used to implement most global optimizations.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasOptNone() const
Do not optimize this function (-O0).
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
void replaceInstr(MachineInstr *OldMI, MachineInstr *NewMI)
IndexType getIndex(MachineInstr *MI) const
MachineInstr * getInstr(IndexType Idx) const
void add(IndexType Start, IndexType End, bool Fixed, bool TiedEnd)
const MachineInstr * getAlignaInstr(const MachineFunction &MF) const
void insertCFIInstructions(MachineFunction &MF) const
bool hasFPImpl(const MachineFunction &MF) const override
bool enableCalleeSaveSkip(const MachineFunction &MF) const override
Returns true if the target can safely skip saving callee-saved registers for noreturn nounwind functi...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
Perform most of the PEI work here:
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack frame.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS=nullptr) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
const SpillSlot * getCalleeSavedSpillSlots(unsigned &NumEntries) const override
getCalleeSavedSpillSlots - This method returns a pointer to an array of pairs, that contains an entry...
bool needsAligna(const MachineFunction &MF) const
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Store the specified register of the given register class to the specified stack frame index.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Load the specified register of the given register class from the specified stack frame index.
Hexagon target-specific information for each MachineFunction.
bool isEHReturnCalleeSaveReg(Register Reg) const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
const MCPhysReg * getCallerSavedRegs(const MachineFunction *MF, const TargetRegisterClass *RC) const
const HexagonInstrInfo * getInstrInfo() const override
bool isEnvironmentMusl() const
A set of physical registers with utility functions to track liveness when walking backward/forward th...
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Describe properties that are true of each instruction in the target description file.
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
constexpr bool isValid() const
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
LLVM_ABI iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< iterator > terminators()
iterator_range< succ_iterator > successors()
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
bool dominates(const MachineInstr *A, const MachineInstr *B) const
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setMaxCallFrameSize(uint64_t S)
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
bool isObjectPreAllocated(int ObjectIdx) const
Return true if the object was pre-allocated into the local block.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool hasCalls() const
Return true if the current function has any function calls.
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
LLVM_ABI int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
LLVM_ABI uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
LLVM_ABI int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset, bool IsImmutable=false)
Create a spill slot at a fixed location on the stack.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void setStackSize(uint64_t Size)
Set the size of the stack.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
virtual MachineFunctionProperties getRequiredProperties() const
Properties which a MachineFunction may have at a given point in time.
unsigned addFrameInst(const MCCFIInstruction &Inst)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineBasicBlock * getBlockNumbered(unsigned N) const
getBlockNumbered - MachineBasicBlocks are automatically numbered when they are inserted into the mach...
Function & getFunction()
Return the LLVM function that this machine code represents.
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID's allocated.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
instr_iterator getInstrIterator() const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
const MachineBasicBlock * getParent() const
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachinePostDominatorTree - an analysis pass wrapper for DominatorTree used to compute the post-domina...
LLVM_ABI MachineBasicBlock * findNearestCommonDominator(ArrayRef< MachineBasicBlock * > Blocks) const
Returns the nearest common dominator of the given blocks.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
static StackOffset getFixed(int64_t Fixed)
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
bool isPositionIndependent() const
unsigned getID() const
Return the register class ID number.
ArrayRef< MCPhysReg > getRawAllocationOrder(const MachineFunction &MF, bool Rev=false) const
Returns the preferred order for allocating registers from this register class in MF.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
FunctionPass * createHexagonCallFrameInformation()
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
static RegisterSet expandToSubRegs(RegisterRef R, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI)
std::map< RegisterRef, RangeList > RegToRangeMap
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.