40#define DEBUG_TYPE "x86-fl"
42STATISTIC(NumFrameLoopProbe,
"Number of loop stack probes used in prologue");
44 "Number of extra stack probes generated in prologue");
45STATISTIC(NumFunctionUsingPush2Pop2,
"Number of functions using push2/pop2");
77 (
hasFP(MF) && !
TRI->hasStackRealignment(MF)) ||
78 TRI->hasBasePointer(MF);
110 return IsLP64 ? X86::SUB64ri32 : X86::SUB32ri;
114 return IsLP64 ? X86::ADD64ri32 : X86::ADD32ri;
118 return IsLP64 ? X86::SUB64rr : X86::SUB32rr;
122 return IsLP64 ? X86::ADD64rr : X86::ADD32rr;
126 return IsLP64 ? X86::AND64ri32 : X86::AND32ri;
130 return IsLP64 ? X86::LEA64r : X86::LEA32r;
136 return X86::MOV32ri64;
138 return X86::MOV64ri32;
162 return ST.is64Bit() ? (ST.hasPPX() ? X86::PUSHP64r : X86::PUSH64r)
166 return ST.is64Bit() ? (ST.hasPPX() ? X86::POPP64r : X86::POP64r)
170 return ST.hasPPX() ? X86::PUSH2P : X86::PUSH2;
173 return ST.hasPPX() ? X86::POP2P : X86::POP2;
180 if (
Reg == X86::RAX ||
Reg == X86::EAX ||
Reg == X86::AX ||
181 Reg == X86::AH ||
Reg == X86::AL)
195 bool BreakNext =
false;
200 if (
Reg != X86::EFLAGS)
221 if (Succ->isLiveIn(X86::EFLAGS))
234 bool InEpilogue)
const {
235 bool isSub = NumBytes < 0;
253 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF);
257 if (EmitInlineStackProbe && !InEpilogue) {
263 }
else if (
Offset > Chunk) {
285 MI->getOperand(3).setIsDead();
287 }
else if (
Offset > 8 * Chunk) {
312 MI->getOperand(3).setIsDead();
332 unsigned Opc = isSub ? (
Is64Bit ? X86::PUSH64r : X86::PUSH32r)
333 : (
Is64Bit ? X86::POP64r : X86::POP32r);
342 BuildStackAdjustment(
MBB,
MBBI,
DL, isSub ? -ThisVal : ThisVal, InEpilogue)
352 assert(
Offset != 0 &&
"zero offset stack adjustment requested");
362 UseLEA =
STI.useLeaForSP() ||
MBB.isLiveIn(X86::EFLAGS);
370 if (UseLEA && !
STI.useLeaForSP())
375 "We shouldn't have allowed this insertion point");
378 MachineInstrBuilder
MI;
392 MI->getOperand(3).setIsDead();
397template <
typename FoundT,
typename CalcT>
400 FoundT FoundStackAdjust,
402 bool doMergeWithPrevious)
const {
405 return CalcNewOffset(0);
421 if (doMergeWithPrevious && PI !=
MBB.
begin() && PI->isCFIInstruction())
426 unsigned Opc = PI->getOpcode();
428 if ((
Opc == X86::ADD64ri32 ||
Opc == X86::ADD32ri) &&
429 PI->getOperand(0).getReg() ==
StackPtr) {
431 Offset = PI->getOperand(2).getImm();
432 }
else if ((
Opc == X86::LEA32r ||
Opc == X86::LEA64_32r) &&
433 PI->getOperand(0).getReg() ==
StackPtr &&
434 PI->getOperand(1).getReg() ==
StackPtr &&
435 PI->getOperand(2).getImm() == 1 &&
436 PI->getOperand(3).getReg() == X86::NoRegister &&
437 PI->getOperand(5).getReg() == X86::NoRegister) {
439 Offset = PI->getOperand(4).getImm();
440 }
else if ((
Opc == X86::SUB64ri32 ||
Opc == X86::SUB32ri) &&
441 PI->getOperand(0).getReg() ==
StackPtr) {
443 Offset = -PI->getOperand(2).getImm();
445 return CalcNewOffset(0);
447 FoundStackAdjust(PI,
Offset);
451 if (doMergeWithPrevious ? (PI ==
MBB.
begin()) : (PI ==
MBB.
end()))
452 return CalcNewOffset(0);
454 PI = doMergeWithPrevious ? std::prev(PI) : std::next(PI);
458 if (PI !=
MBB.
end() && PI->isCFIInstruction()) {
460 MCCFIInstruction CI = CIs[PI->getOperand(0).getCFIIndex()];
465 if (!doMergeWithPrevious)
468 return CalcNewOffset(
Offset);
474 bool doMergeWithPrevious)
const {
475 return mergeSPUpdates(
477 doMergeWithPrevious);
510 unsigned DwarfReg =
MRI->getDwarfRegNum(MachineFramePtr,
true);
533 unsigned DwarfReg =
MRI->getDwarfRegNum(Reg,
true);
544 CfaExpr.
push_back(dwarf::DW_CFA_expression);
550 STI.isTarget64BitILP32()
553 unsigned DwarfFramePtr =
MRI->getDwarfRegNum(MachineFramePtr,
true);
569 int FI =
MI->getOperand(1).getIndex();
574 STI.isTarget64BitILP32()
577 unsigned DwarfFramePtr =
MRI->getDwarfRegNum(MachineFramePtr,
true);
584 DefCfaExpr.
push_back(dwarf::DW_CFA_def_cfa_expression);
594void X86FrameLowering::emitZeroCallUsedRegs(
BitVector RegsToZero,
610 if (!X86::RFP80RegClass.
contains(Reg))
613 unsigned NumFPRegs = ST.is64Bit() ? 8 : 7;
614 for (
unsigned i = 0; i != NumFPRegs; ++i)
617 for (
unsigned i = 0; i != NumFPRegs; ++i)
625 if (
TRI->isGeneralPurposeRegister(MF,
Reg)) {
631 for (MCRegister
Reg : GPRsToZero.set_bits())
642 std::optional<MachineFunction::DebugInstrOperandPair> InstrNum)
const {
644 if (
STI.isTargetWindowsCoreCLR()) {
649 emitStackProbeInline(MF,
MBB,
MBBI,
DL,
false);
652 emitStackProbeCall(MF,
MBB,
MBBI,
DL, InProlog, InstrNum);
657 return STI.isOSWindows() && !
STI.isTargetWin64();
663 return MI.getOpcode() == X86::STACKALLOC_W_PROBING;
665 if (Where != PrologMBB.
end()) {
667 emitStackProbeInline(MF, PrologMBB, Where,
DL,
true);
668 Where->eraseFromParent();
676 bool InProlog)
const {
679 emitStackProbeInlineWindowsCoreCLR64(MF,
MBB,
MBBI,
DL, InProlog);
681 emitStackProbeInlineGeneric(MF,
MBB,
MBBI,
DL, InProlog);
684void X86FrameLowering::emitStackProbeInlineGeneric(
693 "different expansion expected for CoreCLR 64 bit");
695 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
696 uint64_t ProbeChunk = StackProbeSize * 8;
699 TRI->hasStackRealignment(MF) ? calculateMaxStackAlign(MF) : 0;
704 if (
Offset > ProbeChunk) {
706 MaxAlign % StackProbeSize);
709 MaxAlign % StackProbeSize);
713void X86FrameLowering::emitStackProbeInlineGenericBlock(
716 uint64_t AlignOffset)
const {
718 const bool NeedsDwarfCFI = needsDwarfCFI(MF);
721 const X86TargetLowering &TLI = *
STI.getTargetLowering();
722 const unsigned MovMIOpc =
Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
723 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
725 uint64_t CurrentOffset = 0;
727 assert(AlignOffset < StackProbeSize);
730 if (StackProbeSize <
Offset + AlignOffset) {
733 BuildStackAdjustment(
MBB,
MBBI,
DL, -StackAdjustment,
false)
735 if (!HasFP && NeedsDwarfCFI) {
746 NumFrameExtraProbe++;
747 CurrentOffset = StackProbeSize - AlignOffset;
753 while (CurrentOffset + StackProbeSize <
Offset) {
754 BuildStackAdjustment(
MBB,
MBBI,
DL, -StackProbeSize,
false)
757 if (!HasFP && NeedsDwarfCFI) {
767 NumFrameExtraProbe++;
768 CurrentOffset += StackProbeSize;
772 uint64_t ChunkSize =
Offset - CurrentOffset;
777 unsigned Opc =
Is64Bit ? X86::PUSH64r : X86::PUSH32r;
782 BuildStackAdjustment(
MBB,
MBBI,
DL, -ChunkSize,
false)
789void X86FrameLowering::emitStackProbeInlineGenericLoop(
792 uint64_t AlignOffset)
const {
797 "Inline stack probe loop will clobber live EFLAGS.");
799 const bool NeedsDwarfCFI = needsDwarfCFI(MF);
802 const X86TargetLowering &TLI = *
STI.getTargetLowering();
803 const unsigned MovMIOpc =
Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
804 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
807 if (AlignOffset < StackProbeSize) {
809 BuildStackAdjustment(
MBB,
MBBI,
DL, -AlignOffset,
false)
817 NumFrameExtraProbe++;
830 MF.
insert(MBBIter, testMBB);
831 MF.
insert(MBBIter, tailMBB);
871 if (!HasFP && NeedsDwarfCFI) {
874 const Register DwarfFinalStackProbed =
875 STI.isTarget64BitILP32()
881 nullptr,
TRI->getDwarfRegNum(DwarfFinalStackProbed,
true)));
888 BuildStackAdjustment(*testMBB, testMBB->
end(),
DL, -StackProbeSize,
919 const uint64_t TailOffset =
Offset % StackProbeSize;
922 BuildStackAdjustment(*tailMBB, TailMBBIter,
DL, -TailOffset,
928 if (!HasFP && NeedsDwarfCFI) {
932 STI.isTarget64BitILP32()
938 nullptr,
TRI->getDwarfRegNum(DwarfStackPtr,
true)));
945void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64(
949 assert(
STI.is64Bit() &&
"different expansion needed for 32 bit");
950 assert(
STI.isTargetWindowsCoreCLR() &&
"custom expansion expects CoreCLR");
951 const TargetInstrInfo &
TII = *
STI.getInstrInfo();
956 "Inline stack probe loop will clobber live EFLAGS.");
991 MF.
insert(MBBIter, RoundMBB);
992 MF.
insert(MBBIter, LoopMBB);
993 MF.
insert(MBBIter, ContinueMBB);
1001 const int64_t ThreadEnvironmentStackLimit = 0x10;
1003 const int64_t PageMask = ~(
PageSize - 1);
1008 const TargetRegisterClass *RegClass = &X86::GR64RegClass;
1010 SizeReg = InProlog ? X86::RAX :
MRI.createVirtualRegister(RegClass),
1011 ZeroReg = InProlog ? X86::RCX :
MRI.createVirtualRegister(RegClass),
1012 CopyReg = InProlog ? X86::RDX :
MRI.createVirtualRegister(RegClass),
1013 TestReg = InProlog ? X86::RDX :
MRI.createVirtualRegister(RegClass),
1014 FinalReg = InProlog ? X86::RDX :
MRI.createVirtualRegister(RegClass),
1015 RoundedReg = InProlog ? X86::RDX :
MRI.createVirtualRegister(RegClass),
1016 LimitReg = InProlog ? X86::RCX :
MRI.createVirtualRegister(RegClass),
1017 JoinReg = InProlog ? X86::RCX :
MRI.createVirtualRegister(RegClass),
1018 ProbeReg = InProlog ? X86::RCX :
MRI.createVirtualRegister(RegClass);
1021 int64_t RCXShadowSlot = 0;
1022 int64_t RDXShadowSlot = 0;
1029 X86MachineFunctionInfo *X86FI = MF.
getInfo<X86MachineFunctionInfo>();
1038 int64_t InitSlot = 8 + CalleeSaveSize + (
HasFP ? 8 : 0);
1042 RCXShadowSlot = InitSlot;
1044 RDXShadowSlot = InitSlot;
1045 if (IsRDXLiveIn && IsRCXLiveIn)
1086 .
addImm(ThreadEnvironmentStackLimit)
1097 BuildMI(RoundMBB,
DL,
TII.get(X86::AND64ri32), RoundedReg)
1142 TII.get(X86::MOV64rm), X86::RCX),
1143 X86::RSP,
false, RCXShadowSlot);
1146 TII.get(X86::MOV64rm), X86::RDX),
1147 X86::RSP,
false, RDXShadowSlot);
1152 BuildMI(*ContinueMBB, ContinueMBBI,
DL,
TII.get(X86::SUB64rr), X86::RSP)
1164 LivePhysRegs LiveRegs;
1170 for (++BeforeMBBI; BeforeMBBI !=
MBB.
end(); ++BeforeMBBI) {
1173 for (MachineInstr &
MI : *RoundMBB) {
1176 for (MachineInstr &
MI : *LoopMBB) {
1179 for (MachineInstr &
MI :
1186void X86FrameLowering::emitStackProbeCall(
1189 std::optional<MachineFunction::DebugInstrOperandPair> InstrNum)
const {
1193 if (
Is64Bit && IsLargeCodeModel &&
STI.useIndirectThunkCalls())
1195 "code model and indirect thunks not yet implemented.");
1199 "Stack probe calls will clobber live EFLAGS.");
1203 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
1205 CallOp = X86::CALLpcrel32;
1207 StringRef
Symbol =
STI.getTargetLowering()->getStackProbeSymbolName(MF);
1209 MachineInstrBuilder CI;
1233 MachineInstr *ModInst = CI;
1234 if (
STI.isTargetWin64() || !
STI.isOSWindows()) {
1251 if (
STI.isTargetWin64() || !
STI.isOSWindows()) {
1266 for (++ExpansionMBBI; ExpansionMBBI !=
MBBI; ++ExpansionMBBI)
1274 const uint64_t Win64MaxSEHOffset = 128;
1275 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
1277 return SEHFrameOffset & -16;
1285X86FrameLowering::calculateMaxStackAlign(
const MachineFunction &MF)
const {
1292 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
1299 MaxAlign = (MaxAlign > 16) ? MaxAlign :
Align(16);
1301 MaxAlign =
Align(16);
1303 return MaxAlign.
value();
1309 uint64_t MaxAlign)
const {
1310 uint64_t Val = -MaxAlign;
1315 const X86TargetLowering &TLI = *
STI.getTargetLowering();
1316 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
1317 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF);
1322 if (
Reg ==
StackPtr && EmitInlineStackProbe && MaxAlign >= StackProbeSize) {
1324 NumFrameLoopProbe++;
1325 MachineBasicBlock *entryMBB =
1327 MachineBasicBlock *headMBB =
1329 MachineBasicBlock *bodyMBB =
1331 MachineBasicBlock *footMBB =
1335 MF.
insert(MBBIter, entryMBB);
1336 MF.
insert(MBBIter, headMBB);
1337 MF.
insert(MBBIter, bodyMBB);
1338 MF.
insert(MBBIter, footMBB);
1339 const unsigned MovMIOpc =
Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
1348 BuildMI(entryMBB,
DL,
TII.get(TargetOpcode::COPY), FinalStackProbed)
1352 BuildMI(entryMBB,
DL,
TII.get(AndOp), FinalStackProbed)
1353 .
addReg(FinalStackProbed)
1358 MI->getOperand(3).setIsDead();
1362 .
addReg(FinalStackProbed)
1385 .
addReg(FinalStackProbed)
1415 .
addReg(FinalStackProbed)
1431 .
addReg(FinalStackProbed)
1450 MI->getOperand(3).setIsDead();
1458 "MF used frame lowering for wrong subtarget");
1467bool X86FrameLowering::isWin64Prologue(
const MachineFunction &MF)
const {
1471bool X86FrameLowering::needsDwarfCFI(
const MachineFunction &MF)
const {
1478 case X86::REPNE_PREFIX:
1479 case X86::REP_MOVSB_32:
1480 case X86::REP_MOVSB_64:
1481 case X86::REP_MOVSD_32:
1482 case X86::REP_MOVSD_64:
1483 case X86::REP_MOVSQ_32:
1484 case X86::REP_MOVSQ_64:
1485 case X86::REP_MOVSW_32:
1486 case X86::REP_MOVSW_64:
1487 case X86::REP_PREFIX:
1488 case X86::REP_STOSB_32:
1489 case X86::REP_STOSB_64:
1490 case X86::REP_STOSD_32:
1491 case X86::REP_STOSD_64:
1492 case X86::REP_STOSQ_32:
1493 case X86::REP_STOSQ_64:
1494 case X86::REP_STOSW_32:
1495 case X86::REP_STOSW_64:
1591 "MF used frame lowering for wrong subtarget");
1596 uint64_t MaxAlign = calculateMaxStackAlign(MF);
1598 bool IsFunclet =
MBB.isEHFuncletEntry();
1602 bool FnHasClrFunclet =
1604 bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
1605 bool HasFP =
hasFP(MF);
1606 bool IsWin64Prologue = isWin64Prologue(MF);
1609 bool NeedsWinFPO = !IsFunclet &&
STI.isTargetWin32() &&
1611 bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO;
1612 bool NeedsDwarfCFI = needsDwarfCFI(MF);
1618 bool HasWinCFI =
false;
1629 ArgBaseReg =
MI->getOperand(0).getReg();
1642 if (NeedsDwarfCFI) {
1644 unsigned DwarfStackPtr =
TRI->getDwarfRegNum(ArgBaseReg,
true);
1663 if (TailCallArgReserveSize && IsWin64Prologue)
1666 const bool EmitStackProbeCall =
1667 STI.getTargetLowering()->hasStackProbeSymbol(MF);
1668 unsigned StackProbeSize =
STI.getTargetLowering()->getStackProbeSize(MF);
1673 if (
STI.swiftAsyncContextIsDynamicallySet()) {
1683 .
addUse(X86::NoRegister);
1691 "win64 prologue does not set the bit 60 in the saved frame pointer");
1733 !EmitStackProbeCall &&
1741 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
1748 if (TailCallArgReserveSize != 0) {
1749 BuildStackAdjustment(
MBB,
MBBI,
DL, -(
int)TailCallArgReserveSize,
1778 if (IsWin64Prologue && IsFunclet && !IsClrFunclet) {
1786 MBB.addLiveIn(Establisher);
1798 if (
TRI->hasStackRealignment(MF) && !IsWin64Prologue)
1799 NumBytes =
alignTo(NumBytes, MaxAlign);
1807 if (NeedsDwarfCFI && !ArgBaseReg.
isValid()) {
1813 nullptr, -2 * stackGrowth + (
int)TailCallArgReserveSize),
1817 unsigned DwarfFramePtr =
TRI->getDwarfRegNum(MachineFramePtr,
true);
1821 (
int)TailCallArgReserveSize),
1834 assert(!IsWin64Prologue &&
1835 "win64 prologue does not store async context right below rbp");
1841 if (Attrs.hasAttrSomewhere(Attribute::SwiftAsync)) {
1844 MBB.addLiveIn(X86::R14);
1876 if (!IsWin64Prologue && !IsFunclet) {
1885 if (NeedsDwarfCFI) {
1888 CfaExpr.
push_back(dwarf::DW_CFA_expression);
1890 unsigned DwarfReg =
TRI->getDwarfRegNum(MachineFramePtr,
true);
1902 unsigned DwarfFramePtr =
TRI->getDwarfRegNum(MachineFramePtr,
true);
1921 assert(!IsFunclet &&
"funclets without FPs not yet implemented");
1929 if (HasFP &&
TRI->hasStackRealignment(MF))
1937 unsigned ParentFrameNumBytes = NumBytes;
1939 NumBytes = getWinEHFuncletFrameSize(MF);
1942 bool PushedRegs =
false;
1948 unsigned Opc =
MBBI->getOpcode();
1949 return Opc == X86::PUSH32r ||
Opc == X86::PUSH64r ||
Opc == X86::PUSHP64r ||
1950 Opc == X86::PUSH2 ||
Opc == X86::PUSH2P;
1953 while (IsCSPush(
MBBI)) {
1958 unsigned Opc = LastCSPush->getOpcode();
1960 if (!HasFP && NeedsDwarfCFI) {
1966 if (
Opc == X86::PUSH2 ||
Opc == X86::PUSH2P)
1979 if (
Opc == X86::PUSH2 ||
Opc == X86::PUSH2P)
1981 .
addImm(LastCSPush->getOperand(1).getReg())
1989 if (!IsWin64Prologue && !IsFunclet &&
TRI->hasStackRealignment(MF) &&
1991 assert(HasFP &&
"There should be a frame pointer if stack is realigned.");
2005 NumBytes = mergeSPUpdates(
2019 uint64_t AlignedNumBytes = NumBytes;
2020 if (IsWin64Prologue && !IsFunclet &&
TRI->hasStackRealignment(MF))
2021 AlignedNumBytes =
alignTo(AlignedNumBytes, MaxAlign);
2022 if (AlignedNumBytes >= StackProbeSize && EmitStackProbeCall) {
2024 "The Red Zone is not accounted for in stack probes");
2046 int64_t
Alloc = isEAXAlive ? NumBytes - 8 : NumBytes;
2054 .
addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
2073 }
else if (NumBytes) {
2077 if (NeedsWinCFI && NumBytes) {
2084 int SEHFrameOffset = 0;
2092 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
2094 MBB.addLiveIn(Establisher);
2096 Establisher,
false, PSPSlotOffset)
2103 false, PSPSlotOffset)
2110 SPOrEstablisher = Establisher;
2115 if (IsWin64Prologue && HasFP) {
2122 SPOrEstablisher,
false, SEHFrameOffset);
2125 .
addReg(SPOrEstablisher);
2128 if (NeedsWinCFI && !IsFunclet) {
2129 assert(!NeedsWinFPO &&
"this setframe incompatible with FPO data");
2138 }
else if (IsFunclet &&
STI.is32Bit()) {
2143 if (!
MBB.isCleanupFuncletEntry()) {
2161 if (
Register Reg =
TII.isStoreToStackSlot(FrameInstr, FI)) {
2162 if (X86::FR64RegClass.
contains(Reg)) {
2165 if (IsWin64Prologue && IsFunclet)
2173 assert(!NeedsWinFPO &&
"SEH_SaveXMM incompatible with FPO data");
2183 if (NeedsWinCFI && HasWinCFI)
2187 if (FnHasClrFunclet && !IsFunclet) {
2191 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
2205 if (IsWin64Prologue &&
TRI->hasStackRealignment(MF)) {
2206 assert(HasFP &&
"There should be a frame pointer if stack is realigned.");
2207 BuildStackAlignAND(
MBB,
MBBI,
DL, SPOrEstablisher, MaxAlign);
2211 if (IsFunclet &&
STI.is32Bit())
2218 if (
TRI->hasBasePointer(MF)) {
2244 assert(UsedReg == BasePtr);
2253 int FI =
MI->getOperand(1).getIndex();
2254 unsigned MOVmr =
Is64Bit ? X86::MOV64mr : X86::MOV32mr;
2261 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
2263 if (!HasFP && NumBytes) {
2289 bool NeedsCLD =
false;
2303 if (
MI.isInlineAsm()) {
2335 switch (
MI.getOpcode()) {
2337 case X86::CLEANUPRET:
2359X86FrameLowering::getPSPSlotOffsetFromSP(
const MachineFunction &MF)
const {
2366 return static_cast<unsigned>(
Offset);
2370X86FrameLowering::getWinEHFuncletFrameSize(
const MachineFunction &MF)
const {
2371 const X86MachineFunctionInfo *X86FI = MF.
getInfo<X86MachineFunctionInfo>();
2377 WinEHXMMSlotInfo.
size() *
TRI->getSpillSize(X86::VR128RegClass);
2386 UsedSize = getPSPSlotOffsetFromSP(MF) +
SlotSize;
2397 return FrameSizeMinusRBP + XMMSize - CSSize;
2401 return Opc == X86::TCRETURNri ||
Opc == X86::TCRETURN_WIN64ri ||
2402 Opc == X86::TCRETURN_HIPE32ri ||
Opc == X86::TCRETURNdi ||
2403 Opc == X86::TCRETURNmi ||
Opc == X86::TCRETURNri64 ||
2404 Opc == X86::TCRETURNri64_ImpCall ||
Opc == X86::TCRETURNdi64 ||
2405 Opc == X86::TCRETURNmi64;
2416 DL =
MBBI->getDebugLoc();
2418 const bool Is64BitILP32 =
STI.isTarget64BitILP32();
2424 bool NeedsWin64CFI =
2430 uint64_t MaxAlign = calculateMaxStackAlign(MF);
2433 bool HasFP =
hasFP(MF);
2436 bool NeedsDwarfCFI = (!MF.
getTarget().getTargetTriple().isOSDarwin() &&
2443 unsigned Opc = X86::LEA32r;
2445 ArgBaseReg =
MI->getOperand(0).getReg();
2446 if (
STI.is64Bit()) {
2448 StackReg = X86::RSP;
2459 if (NeedsDwarfCFI) {
2460 unsigned DwarfStackPtr =
TRI->getDwarfRegNum(StackReg,
true);
2470 assert(HasFP &&
"EH funclets without FP not yet implemented");
2471 NumBytes = getWinEHFuncletFrameSize(MF);
2475 NumBytes = FrameSize - CSSize - TailCallArgReserveSize;
2479 if (
TRI->hasStackRealignment(MF) && !IsWin64Prologue)
2480 NumBytes =
alignTo(FrameSize, MaxAlign);
2482 NumBytes = StackSize - CSSize - TailCallArgReserveSize;
2484 uint64_t SEHStackAllocAmt = NumBytes;
2509 if (NeedsDwarfCFI) {
2511 unsigned DwarfStackPtr =
2512 TRI->getDwarfRegNum(
Is64Bit ? X86::RSP : X86::ESP,
true);
2517 if (!
MBB.succ_empty() && !
MBB.isReturnBlock()) {
2518 unsigned DwarfFramePtr =
TRI->getDwarfRegNum(MachineFramePtr,
true);
2533 unsigned Opc = PI->getOpcode();
2535 if (
Opc != X86::DBG_VALUE && !PI->isTerminator()) {
2537 (
Opc != X86::POP32r &&
Opc != X86::POP64r &&
Opc != X86::BTR64ri8 &&
2538 Opc != X86::ADD64ri32 &&
Opc != X86::POPP64r &&
Opc != X86::POP2 &&
2539 Opc != X86::POP2P &&
Opc != X86::LEA64r))
2549 int FI =
MI->getOperand(1).getIndex();
2550 unsigned MOVrm =
Is64Bit ? X86::MOV64rm : X86::MOV32rm;
2557 if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET)
2558 emitCatchRetReturnValue(
MBB, FirstCSPop, &*Terminator);
2561 DL =
MBBI->getDebugLoc();
2573 if (
TRI->hasStackRealignment(MF))
2577 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
2589 if (LEAAmount != 0) {
2599 }
else if (NumBytes) {
2602 if (!HasFP && NeedsDwarfCFI) {
2606 nullptr, CSSize + TailCallArgReserveSize +
SlotSize),
2615 if (!HasFP && NeedsDwarfCFI) {
2622 unsigned Opc = PI->getOpcode();
2624 if (
Opc == X86::POP32r ||
Opc == X86::POP64r ||
Opc == X86::POPP64r ||
2625 Opc == X86::POP2 ||
Opc == X86::POP2P) {
2629 if (
Opc == X86::POP2 ||
Opc == X86::POP2P)
2641 if (NeedsDwarfCFI && !
MBB.succ_empty())
2647 assert(Delta <= 0 &&
"TCDelta should never be positive");
2672 if (
TRI->hasBasePointer(MF))
2673 FrameReg = IsFixed ?
TRI->getFramePtr() :
TRI->getBaseRegister();
2674 else if (
TRI->hasStackRealignment(MF))
2675 FrameReg = IsFixed ?
TRI->getFramePtr() :
TRI->getStackRegister();
2677 FrameReg =
TRI->getFrameRegister(MF);
2688 int64_t FPDelta = 0;
2699 if (IsWin64Prologue) {
2708 uint64_t NumBytes = FrameSize - CSSize;
2718 FPDelta = FrameSize - SEHFrameOffset;
2720 "FPDelta isn't aligned per the Win64 ABI!");
2723 if (FrameReg ==
TRI->getFramePtr()) {
2732 if (TailCallReturnAddrDelta < 0)
2733 Offset -= TailCallReturnAddrDelta;
2741 if (
TRI->hasStackRealignment(MF) ||
TRI->hasBasePointer(MF))
2751 const auto it = WinEHXMMSlotInfo.find(FI);
2753 if (it == WinEHXMMSlotInfo.end())
2756 FrameReg =
TRI->getStackRegister();
2764 int Adjustment)
const {
2766 FrameReg =
TRI->getStackRegister();
2774 bool IgnoreSPUpdates)
const {
2813 !
STI.isTargetWin64())
2824 "we don't handle this case!");
2856 std::vector<CalleeSavedInfo> &CSI)
const {
2860 unsigned CalleeSavedFrameSize = 0;
2861 unsigned XMMCalleeSavedFrameSize = 0;
2867 if (TailCallReturnAddrDelta < 0) {
2878 TailCallReturnAddrDelta -
SlotSize,
true);
2882 if (this->TRI->hasBasePointer(MF)) {
2908 for (
unsigned i = 0; i < CSI.size(); ++i) {
2909 if (
TRI->regsOverlap(CSI[i].getReg(),
FPReg)) {
2910 CSI.erase(CSI.begin() + i);
2926 unsigned NumRegsForPush2 = 0;
2929 return X86::GR64RegClass.contains(
I.getReg());
2931 bool NeedPadding = (SpillSlotOffset % 16 != 0) && (NumCSGPR % 2 == 0);
2932 bool UsePush2Pop2 = NeedPadding ? NumCSGPR > 2 : NumCSGPR > 1;
2934 NumRegsForPush2 = UsePush2Pop2 ?
alignDown(NumCSGPR, 2) : 0;
2951 (SpillSlotOffset % 16 == 0 ||
2973 "Expect even candidates for push2/pop2");
2975 ++NumFunctionUsingPush2Pop2;
2986 MVT VT = MVT::Other;
2987 if (X86::VK16RegClass.
contains(Reg))
2988 VT =
STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2991 unsigned Size =
TRI->getSpillSize(*RC);
2992 Align Alignment =
TRI->getSpillAlign(*RC);
2994 assert(SpillSlotOffset < 0 &&
"SpillSlotOffset should always < 0 on X86");
2995 SpillSlotOffset = -
alignTo(-SpillSlotOffset, Alignment);
2998 SpillSlotOffset -=
Size;
3004 if (X86::VR128RegClass.
contains(Reg)) {
3005 WinEHXMMSlotInfo[
SlotIndex] = XMMCalleeSavedFrameSize;
3006 XMMCalleeSavedFrameSize +=
Size;
3020 if (
MBB.isEHFuncletEntry() &&
STI.is32Bit() &&
STI.isOSWindows())
3035 auto UpdateLiveInCheckCanKill = [&](
Register Reg) {
3042 if (
MRI.isLiveIn(Reg))
3047 if (
MRI.isLiveIn(*AReg))
3051 auto UpdateLiveInGetKillRegState = [&](
Register Reg) {
3055 for (
auto RI = CSI.
rbegin(), RE = CSI.
rend(); RI != RE; ++RI) {
3063 .
addReg(Reg, UpdateLiveInGetKillRegState(Reg))
3064 .
addReg(Reg2, UpdateLiveInGetKillRegState(Reg2))
3068 .
addReg(Reg, UpdateLiveInGetKillRegState(Reg))
3074 unsigned Opc =
STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
3075 Register BaseReg = this->TRI->getBaseRegister();
3089 MVT VT = MVT::Other;
3090 if (X86::VK16RegClass.
contains(Reg))
3091 VT =
STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
3097 TII.storeRegToStackSlot(
MBB,
MI, Reg,
true,
I.getFrameIdx(), RC,
TRI,
3109 MBB.getParent()->getFunction().getPersonalityFn())) &&
3110 "SEH should not use CATCHRET");
3115 if (
STI.is64Bit()) {
3147 if (
MI->getOpcode() == X86::CATCHRET) {
3165 MVT VT = MVT::Other;
3166 if (X86::VK16RegClass.
contains(Reg))
3167 VT =
STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
3170 TII.loadRegFromStackSlot(
MBB,
MI, Reg,
I.getFrameIdx(), RC,
TRI,
3178 unsigned Opc =
STI.is64Bit() ? X86::POP64r : X86::POP32r;
3179 Register BaseReg = this->TRI->getBaseRegister();
3185 for (
auto I = CSI.
begin(), E = CSI.
end();
I != E; ++
I) {
3210 if (
TRI->hasBasePointer(MF)) {
3212 if (
STI.isTarget64BitILP32())
3214 SavedRegs.
set(BasePtr);
3222 if (
I->hasNestAttr() && !
I->use_empty())
3239 return Primary ? X86::R14 : X86::R13;
3241 return Primary ? X86::EBX : X86::EDI;
3246 return Primary ? X86::R11 : X86::R12;
3248 return Primary ? X86::R11D : X86::R12D;
3258 "nested function.");
3259 return Primary ? X86::EAX : X86::ECX;
3262 return Primary ? X86::EDX : X86::EAX;
3263 return Primary ? X86::ECX : X86::EAX;
3274 unsigned TlsReg, TlsOffset;
3279 assert(&(*MF.
begin()) == &PrologueMBB &&
"Shrink-wrapping not supported yet");
3283 "Scratch register is live-in");
3287 if (!
STI.isTargetLinux() && !
STI.isTargetDarwin() && !
STI.isTargetWin32() &&
3288 !
STI.isTargetWin64() && !
STI.isTargetFreeBSD() &&
3289 !
STI.isTargetDragonFly())
3303 bool IsNested =
false;
3312 for (
const auto &LI : PrologueMBB.
liveins()) {
3329 if (
STI.isTargetLinux()) {
3331 TlsOffset =
IsLP64 ? 0x70 : 0x40;
3332 }
else if (
STI.isTargetDarwin()) {
3334 TlsOffset = 0x60 + 90 * 8;
3335 }
else if (
STI.isTargetWin64()) {
3338 }
else if (
STI.isTargetFreeBSD()) {
3341 }
else if (
STI.isTargetDragonFly()) {
3348 if (CompareStackPointer)
3349 ScratchReg =
IsLP64 ? X86::RSP : X86::ESP;
3367 if (
STI.isTargetLinux()) {
3370 }
else if (
STI.isTargetDarwin()) {
3372 TlsOffset = 0x48 + 90 * 4;
3373 }
else if (
STI.isTargetWin32()) {
3376 }
else if (
STI.isTargetDragonFly()) {
3379 }
else if (
STI.isTargetFreeBSD()) {
3385 if (CompareStackPointer)
3386 ScratchReg = X86::ESP;
3388 BuildMI(checkMBB,
DL,
TII.get(X86::LEA32r), ScratchReg)
3395 if (
STI.isTargetLinux() ||
STI.isTargetWin32() ||
STI.isTargetWin64() ||
3396 STI.isTargetDragonFly()) {
3404 }
else if (
STI.isTargetDarwin()) {
3407 unsigned ScratchReg2;
3409 if (CompareStackPointer) {
3412 SaveScratch2 =
false;
3424 "Scratch register is live-in and not saved");
3430 BuildMI(checkMBB,
DL,
TII.get(X86::MOV32ri), ScratchReg2)
3441 BuildMI(checkMBB,
DL,
TII.get(X86::POP32r), ScratchReg2);
3457 const unsigned RegAX =
IsLP64 ? X86::RAX : X86::EAX;
3458 const unsigned Reg10 =
IsLP64 ? X86::R10 : X86::R10D;
3459 const unsigned Reg11 =
IsLP64 ? X86::R11 : X86::R11D;
3460 const unsigned MOVrr =
IsLP64 ? X86::MOV64rr : X86::MOV32rr;
3494 if (
STI.useIndirectThunkCalls())
3496 "code model and thunks not yet implemented.");
3513 BuildMI(allocMBB,
DL,
TII.get(X86::MORESTACK_RET_RESTORE_R10));
3522#ifdef EXPENSIVE_CHECKS
3533 for (
int i = 0, e = HiPELiteralsMD->
getNumOperands(); i != e; ++i) {
3535 if (
Node->getNumOperands() != 2)
3539 if (!NodeName || !NodeVal)
3542 if (ValConst && NodeName->
getString() == LiteralName) {
3548 " required but not provided");
3559 return MI.isMetaInstruction();
3585 assert(&(*MF.
begin()) == &PrologueMBB &&
"Shrink-wrapping not supported yet");
3590 if (!HiPELiteralsMD)
3592 "Can't generate HiPE prologue without runtime parameters");
3594 HiPELiteralsMD,
Is64Bit ?
"AMD64_LEAF_WORDS" :
"X86_LEAF_WORDS");
3595 const unsigned CCRegisteredArgs =
Is64Bit ? 6 : 5;
3596 const unsigned Guaranteed = HipeLeafWords *
SlotSize;
3603 "HiPE prologue is only supported on Linux operating systems.");
3613 unsigned MoreStackForCalls = 0;
3615 for (
auto &
MBB : MF) {
3616 for (
auto &
MI :
MBB) {
3636 if (
F->getName().contains(
"erlang.") ||
F->getName().contains(
"bif_") ||
3640 unsigned CalleeStkArity =
F->arg_size() > CCRegisteredArgs
3641 ?
F->arg_size() - CCRegisteredArgs
3643 if (HipeLeafWords - 1 > CalleeStkArity)
3645 std::max(MoreStackForCalls,
3646 (HipeLeafWords - 1 - CalleeStkArity) *
SlotSize);
3649 MaxStack += MoreStackForCalls;
3654 if (MaxStack > Guaranteed) {
3658 for (
const auto &LI : PrologueMBB.
liveins()) {
3666 unsigned ScratchReg,
SPReg, PReg, SPLimitOffset;
3667 unsigned LEAop, CMPop, CALLop;
3672 LEAop = X86::LEA64r;
3673 CMPop = X86::CMP64rm;
3674 CALLop = X86::CALL64pcrel32;
3678 LEAop = X86::LEA32r;
3679 CMPop = X86::CMP32rm;
3680 CALLop = X86::CALLpcrel32;
3685 "HiPE prologue scratch register is live-in");
3692 PReg,
false, SPLimitOffset);
3702 PReg,
false, SPLimitOffset);
3712#ifdef EXPENSIVE_CHECKS
3729 if (NumPops != 1 && NumPops != 2)
3737 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask())
3741 unsigned FoundRegs = 0;
3747 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass;
3749 for (
auto Candidate : RegClass) {
3757 if (
MRI.isReserved(Candidate))
3762 if (MO.isReg() && MO.isDef() &&
3763 TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) {
3772 Regs[FoundRegs++] = Candidate;
3773 if (FoundRegs == (
unsigned)NumPops)
3781 while (FoundRegs < (
unsigned)NumPops)
3782 Regs[FoundRegs++] = Regs[0];
3784 for (
int i = 0; i < NumPops; ++i)
3795 unsigned Opcode =
I->getOpcode();
3796 bool isDestroy = Opcode ==
TII.getCallFrameDestroyOpcode();
3799 uint64_t InternalAmt = (isDestroy || Amount) ?
TII.getFrameAdjustment(*
I) : 0;
3809 if (!reserveCallFrame) {
3830 bool HasDwarfEHHandlers = !WindowsCFI && !MF.
getLandingPads().empty();
3832 if (HasDwarfEHHandlers && !isDestroy &&
3842 Amount -= InternalAmt;
3852 int64_t StackAdjustment = isDestroy ? Amount : -Amount;
3853 int64_t CfaAdjustment = StackAdjustment;
3855 if (StackAdjustment) {
3864 auto CalcNewOffset = [&StackAdjustment](int64_t
Offset) {
3865 return StackAdjustment +
Offset;
3868 mergeSPUpdates(
MBB, InsertPos, CalcCfaAdjust, CalcNewOffset,
true);
3870 mergeSPUpdates(
MBB, InsertPos, CalcCfaAdjust, CalcNewOffset,
false);
3872 if (StackAdjustment) {
3873 if (!(
F.hasMinSize() &&
3874 adjustStackWithPops(
MBB, InsertPos,
DL, StackAdjustment)))
3875 BuildStackAdjustment(
MBB, InsertPos,
DL, StackAdjustment,
3901 while (CI !=
B && !std::prev(CI)->isCall())
3903 BuildStackAdjustment(
MBB, CI,
DL, -InternalAmt,
false);
3910 assert(
MBB.getParent() &&
"Block is not attached to a function!");
3912 if (!
MBB.isLiveIn(X86::EFLAGS))
3919 if (TLI.hasInlineStackProbe(MF) || TLI.hasStackProbeSymbol(MF))
3927 assert(
MBB.getParent() &&
"Block is not attached to a function!");
3933 if (
STI.isTargetWin64() && !
MBB.succ_empty() && !
MBB.isReturnBlock())
3955 bool CompactUnwind =
3971 assert(
STI.isTargetWindowsMSVC() &&
"funclets only supported in MSVC env");
3972 assert(
STI.isTargetWin32() &&
"EBP/ESI restoration only required on win32");
3974 "restoring EBP/ESI on non-32-bit target");
3986 int EHRegSize = MFI.getObjectSize(FI);
3991 X86::EBP,
true, -EHRegSize)
3997 int EndOffset = -EHRegOffset - EHRegSize;
4010 "end of registration object above normal EBP position!");
4011 }
else if (UsedReg == BasePtr) {
4021 assert(UsedReg == BasePtr);
4032 return TRI->getSlotSize();
4047 FrameBase.
Kind = DwarfFrameBase::CFA;
4053 return DwarfFrameBase{DwarfFrameBase::Register, {FrameRegister}};
4058struct X86FrameSortingObject {
4059 bool IsValid =
false;
4060 unsigned ObjectIndex = 0;
4061 unsigned ObjectSize = 0;
4063 unsigned ObjectNumUses = 0;
4079struct X86FrameSortingComparator {
4080 inline bool operator()(
const X86FrameSortingObject &
A,
4081 const X86FrameSortingObject &
B)
const {
4082 uint64_t DensityAScaled, DensityBScaled;
4102 DensityAScaled =
static_cast<uint64_t>(
A.ObjectNumUses) *
4104 DensityBScaled =
static_cast<uint64_t>(
B.ObjectNumUses) *
4115 if (DensityAScaled == DensityBScaled)
4116 return A.ObjectAlignment <
B.ObjectAlignment;
4118 return DensityAScaled < DensityBScaled;
4132 if (ObjectsToAllocate.
empty())
4144 for (
auto &Obj : ObjectsToAllocate) {
4145 SortingObjects[Obj].IsValid =
true;
4146 SortingObjects[Obj].ObjectIndex = Obj;
4150 if (ObjectSize == 0)
4152 SortingObjects[Obj].ObjectSize = 4;
4154 SortingObjects[Obj].ObjectSize = ObjectSize;
4158 for (
auto &
MBB : MF) {
4159 for (
auto &
MI :
MBB) {
4160 if (
MI.isDebugInstr())
4166 int Index = MO.getIndex();
4170 SortingObjects[Index].IsValid)
4171 SortingObjects[Index].ObjectNumUses++;
4186 for (
auto &Obj : SortingObjects) {
4190 ObjectsToAllocate[i++] = Obj.ObjectIndex;
4194 if (!
TRI->hasStackRealignment(MF) &&
hasFP(MF))
4195 std::reverse(ObjectsToAllocate.
begin(), ObjectsToAllocate.
end());
4207 Offset += getWinEHFuncletFrameSize(MF);
4235 adjustFrameForMsvcCxxEh(MF);
4239void X86FrameLowering::adjustFrameForMsvcCxxEh(
MachineFunction &MF)
const {
4247 int64_t MinFixedObjOffset = -
SlotSize;
4253 int FrameIndex =
H.CatchObj.FrameIndex;
4254 if ((FrameIndex != INT_MAX) && MFI.
getObjectOffset(FrameIndex) == 0) {
4257 MinFixedObjOffset -= std::abs(MinFixedObjOffset) %
Align;
4265 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;
4266 int64_t UnwindHelpOffset = MinFixedObjOffset -
SlotSize;
4273 MachineBasicBlock &
MBB = MF.
front();
4293 MI->eraseFromParent();
4306 bool NeedsRestore =
MBB.isEHPad() && !
MBB.isEHFuncletEntry();
4317 unsigned NumSpilledRegs) {
4319 unsigned AllocSize =
TRI->getSpillSize(*RC) * NumSpilledRegs;
4321 unsigned AlignedSize =
alignTo(AllocSize, StackAlign);
4322 return AlignedSize - AllocSize;
4328 int SPAdjust)
const {
4331 MachineBasicBlock *
MBB = BeforeMI->getParent();
4353 if (
FP.isValid() && needsDwarfCFI(MF)) {
4362 SmallString<64> CfaExpr;
4366 Offset +=
TRI->getSpillSize(*
TRI->getMinimalPhysRegClass(BP));
4369 if (
TII.isFrameSetup(*BeforeMI)) {
4371 BeforeMI = std::next(BeforeMI);
4374 if (
STI.isTarget64BitILP32())
4376 unsigned DwarfStackPtr =
TRI->getDwarfRegNum(
StackPtr,
true);
4377 CfaExpr.
push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfStackPtr));
4382 CfaExpr.
push_back((uint8_t)dwarf::DW_OP_plus);
4384 SmallString<64> DefCfaExpr;
4385 DefCfaExpr.
push_back(dwarf::DW_CFA_def_cfa_expression);
4397 int SPAdjust)
const {
4401 MachineBasicBlock *
MBB = AfterMI->getParent();
4419 if (needsDwarfCFI(MF)) {
4429void X86FrameLowering::saveAndRestoreFPBPUsingSP(
4432 assert(SpillFP || SpillBP);
4435 const TargetRegisterClass *RC;
4436 unsigned NumRegs = 0;
4439 FP =
TRI->getFrameRegister(MF);
4440 if (
STI.isTarget64BitILP32())
4442 RC =
TRI->getMinimalPhysRegClass(
FP);
4446 BP =
TRI->getBaseRegister();
4447 if (
STI.isTarget64BitILP32())
4449 RC =
TRI->getMinimalPhysRegClass(BP);
4454 spillFPBPUsingSP(MF, BeforeMI,
FP, BP, SPAdjust);
4455 restoreFPBPUsingSP(MF, AfterMI,
FP, BP, SPAdjust);
4458bool X86FrameLowering::skipSpillFPBP(
4460 if (
MI->getOpcode() == X86::LCMPXCHG16B_SAVE_RBX) {
4468 while (!(
MI->getOpcode() == TargetOpcode::COPY &&
4469 MI->getOperand(1).getReg() == X86::RBX) &&
4470 !((
Reg =
TII.isStoreToStackSlot(*
MI, FI)) &&
Reg == X86::RBX))
4480 AccessFP = AccessBP =
false;
4482 if (
MI.findRegisterUseOperandIdx(
FP,
TRI,
false) != -1 ||
4483 MI.findRegisterDefOperandIdx(
FP,
TRI,
false,
true) != -1)
4487 if (
MI.findRegisterUseOperandIdx(BP,
TRI,
false) != -1 ||
4488 MI.findRegisterDefOperandIdx(BP,
TRI,
false,
true) != -1)
4491 return AccessFP || AccessBP;
4504 if (!
MBB->hasEHPadSuccessor())
4517void X86FrameLowering::checkInterferedAccess(
4520 bool SpillBP)
const {
4521 if (
DefMI == KillMI)
4523 if (
TRI->hasBasePointer(MF)) {
4534 [](
const MachineOperand &MO) { return MO.isFI(); }))
4536 "Interference usage of base pointer/frame "
4567 FP =
TRI->getFrameRegister(MF);
4568 if (
TRI->hasBasePointer(MF))
4569 BP =
TRI->getBaseRegister();
4584 bool InsideEHLabels =
false;
4585 auto MI =
MBB.rbegin(), ME =
MBB.rend();
4586 auto TermMI =
MBB.getFirstTerminator();
4587 if (TermMI ==
MBB.begin())
4589 MI = *(std::prev(TermMI));
4597 isInvoke(*
MI, InsideEHLabels) || skipSpillFPBP(MF,
MI)) {
4602 if (
MI->getOpcode() == TargetOpcode::EH_LABEL) {
4603 InsideEHLabels = !InsideEHLabels;
4608 bool AccessFP, AccessBP;
4617 bool FPLive =
false, BPLive =
false;
4618 bool SpillFP =
false, SpillBP =
false;
4621 SpillFP |= AccessFP;
4622 SpillBP |= AccessBP;
4625 if (FPLive &&
MI->findRegisterDefOperandIdx(
FP,
TRI,
false,
true) != -1)
4627 if (
FP &&
MI->findRegisterUseOperandIdx(
FP,
TRI,
false) != -1)
4629 if (BPLive &&
MI->findRegisterDefOperandIdx(BP,
TRI,
false,
true) != -1)
4631 if (BP &&
MI->findRegisterUseOperandIdx(BP,
TRI,
false) != -1)
4635 }
while ((
MI != ME) &&
4636 (FPLive || BPLive ||
4640 if (FPLive && !SpillBP)
4645 if (KillMI->isCall() &&
DefMI != ME) {
4646 auto FrameSetup = std::next(
DefMI);
4650 while (FrameSetup != ME && !
TII.isFrameSetup(*FrameSetup) &&
4651 !FrameSetup->isCall())
4655 if (FrameSetup != ME &&
TII.isFrameSetup(*FrameSetup) &&
4656 (
TII.getFrameSize(*FrameSetup) ||
4657 TII.getFrameAdjustment(*FrameSetup))) {
4658 while (!
TII.isFrameInstr(*KillMI))
4666 checkInterferedAccess(MF,
DefMI, KillMI, SpillFP, SpillBP);
4669 saveAndRestoreFPBPUsingSP(MF, &(*
DefMI), &(*KillMI), SpillFP, SpillBP);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool isFuncletReturnInstr(const MachineInstr &MI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static const uint64_t kSplitStackAvailable
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096), cl::Hidden)
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
static bool isTailCallOpcode(unsigned Opc)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static constexpr MCPhysReg FPReg
static constexpr MCPhysReg SPReg
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool is64Bit(const char *name)
static unsigned calculateSetFPREG(uint64_t SPAdjust)
static unsigned GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary)
GetScratchRegister - Get a temp register for performing work in the segmented stack and the Erlang/Hi...
static unsigned getADDriOpcode(bool IsLP64)
static unsigned getPUSH2Opcode(const X86Subtarget &ST)
static unsigned getMOVriOpcode(bool Use64BitReg, int64_t Imm)
static unsigned getLEArOpcode(bool IsLP64)
static unsigned getSUBriOpcode(bool IsLP64)
static bool flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB)
Check if the flags need to be preserved before the terminators.
static bool isFPBPAccess(const MachineInstr &MI, Register FP, Register BP, const TargetRegisterInfo *TRI, bool &AccessFP, bool &AccessBP)
static bool isOpcodeRep(unsigned Opcode)
Return true if an opcode is part of the REP group of instructions.
static unsigned getANDriOpcode(bool IsLP64, int64_t Imm)
static bool isEAXLiveIn(MachineBasicBlock &MBB)
static int computeFPBPAlignmentGap(MachineFunction &MF, const TargetRegisterClass *RC, unsigned NumSpilledRegs)
static unsigned getADDrrOpcode(bool IsLP64)
constexpr int64_t MaxSPChunk
static bool HasNestArgument(const MachineFunction *MF)
static unsigned getPOPOpcode(const X86Subtarget &ST)
static bool isInvoke(const MachineInstr &MI, bool InsideEHLabels)
static unsigned getPOP2Opcode(const X86Subtarget &ST)
static unsigned getHiPELiteral(NamedMDNode *HiPELiteralsMD, const StringRef LiteralName)
Lookup an ERTS parameter in the !hipe.literals named metadata node.
static bool blockEndIsUnreachable(const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI)
static unsigned getSUBrrOpcode(bool IsLP64)
static unsigned getPUSHOpcode(const X86Subtarget &ST)
static const unsigned FramePtr
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
reverse_iterator rend() const
bool empty() const
empty - Check if the array is empty.
reverse_iterator rbegin() const
iterator_range< const_set_bits_iterator > set_bits() const
static BranchProbability getOne()
static BranchProbability getZero()
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool needsUnwindTableEntry() const
True if this function needs an unwind table.
const Argument * const_arg_iterator
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Module * getParent()
Get the module that this global value is contained inside of...
bool usesWindowsCFI() const
static MCCFIInstruction createDefCfaRegister(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_def_cfa_register modifies a rule for computing CFA.
static MCCFIInstruction createGnuArgsSize(MCSymbol *L, int64_t Size, SMLoc Loc={})
A special wrapper for .cfi_escape that indicates GNU_ARGS_SIZE.
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
static MCCFIInstruction createRememberState(MCSymbol *L, SMLoc Loc={})
.cfi_remember_state Save all current rules for all registers.
OpType getOperation() const
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa_offset modifies a rule for computing CFA.
static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals, SMLoc Loc={}, StringRef Comment="")
.cfi_escape Allows the user to add arbitrary bytes to the unwind info.
static MCCFIInstruction createAdjustCfaOffset(MCSymbol *L, int64_t Adjustment, SMLoc Loc={})
.cfi_adjust_cfa_offset Same as .cfi_def_cfa_offset, but Offset is a relative value that is added/subt...
static MCCFIInstruction createRestoreState(MCSymbol *L, SMLoc Loc={})
.cfi_restore_state Restore the previously saved state.
const MCObjectFileInfo * getObjectFileInfo() const
const MCRegisterInfo * getRegisterInfo() const
LLVM_ABI void reportError(SMLoc L, const Twine &Msg)
MCSection * getCompactUnwindSection() const
MCRegAliasIterator enumerates all registers aliasing Reg.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
LLVM_ABI StringRef getString() const
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
MachineInstrBundleIterator< const MachineInstr > const_iterator
iterator_range< livein_iterator > liveins() const
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI, MCRegister Reg, const_iterator Before, unsigned Neighborhood=10) const
Return whether (physical) register Reg has been defined and not killed as of just before Before.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
@ LQR_Live
Register is known to be (at least partially) live.
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool needsSplitStackProlog() const
Return true if this function requires a split stack prolog, even if it uses no stack space.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
LLVM_ABI void ensureMaxAlignment(Align Alignment)
Make sure the function is at least Align bytes aligned.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
void setCVBytesOfCalleeSavedRegisters(unsigned S)
LLVM_ABI int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
LLVM_ABI uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasCopyImplyingStackAdjustment() const
Returns true if the function contains operations which will lower down to instructions which manipula...
bool hasStackObjects() const
Return true if there are any stack objects in this function.
LLVM_ABI int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset, bool IsImmutable=false)
Create a spill slot at a fixed location on the stack.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void setStackSize(uint64_t Size)
Set the size of the stack.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
int getObjectIndexBegin() const
Return the minimum frame object index.
void setOffsetAdjustment(int64_t Adj)
Set the correction for frame offsets.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
unsigned addFrameInst(const MCCFIInstruction &Inst)
void setHasWinCFI(bool v)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const std::vector< MCCFIInstruction > & getFrameInstructions() const
Returns a reference to a list of cfi instructions in the function's prologue.
bool hasInlineAsm() const
Returns true if the function contains any inline assembly.
void makeDebugValueSubstitution(DebugInstrOperandPair, DebugInstrOperandPair, unsigned SubReg=0)
Create a substitution between one <instr,operand> value to a different, new value.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
bool callsUnwindInit() const
void push_front(MachineBasicBlock *MBB)
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
MCContext & getContext() const
bool callsEHReturn() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, raw_ostream *OS=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const std::vector< LandingPadInfo > & getLandingPads() const
Return a reference to the landing pad info for the current function.
BasicBlockListType::iterator iterator
bool shouldSplitStack() const
Should we be emitting segmented stack stuff for the function.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
bool hasEHFunclets() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getNumOperands() const
Retuns the total number of operands.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
LLVM_ABI bool isLiveIn(Register Reg) const
NamedMDNode * getNamedMetadata(StringRef Name) const
Return the first NamedMDNode in the module with the specified name.
unsigned getCodeViewFlag() const
Returns the CodeView Version by checking module flags.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
LLVM_ABI MDNode * getOperand(unsigned i) const
LLVM_ABI unsigned getNumOperands() const
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SlotIndex - An opaque wrapper around machine indexes.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
void append(StringRef RHS)
Append from a StringRef.
StringRef str() const
Explicit conversion to StringRef.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
static StackOffset getFixed(int64_t Fixed)
StringRef - Represent a constant reference to a string, i.e.
static constexpr size_t npos
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
TargetFrameLowering(StackDirection D, Align StackAl, int LAO, Align TransAl=Align(1), bool StackReal=true)
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
const Triple & getTargetTriple() const
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
SwiftAsyncFramePointerMode SwiftAsyncFramePointer
Control when and how the Swift async frame pointer bit should be set.
LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual Register getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
bool isUEFI() const
Tests whether the OS is UEFI.
bool isOSWindows() const
Tests whether the OS is Windows.
bool has128ByteRedZone(const MachineFunction &MF) const
Return true if the function has a redzone (accessible bytes past the frame of the top of stack functi...
void spillFPBP(MachineFunction &MF) const override
If a function uses base pointer and the base pointer is clobbered by inline asm, RA doesn't detect th...
bool canSimplifyCallFramePseudos(const MachineFunction &MF) const override
canSimplifyCallFramePseudos - If there is a reserved call frame, the call frame pseudos can be simpli...
bool needsFrameIndexResolution(const MachineFunction &MF) const override
X86FrameLowering(const X86Subtarget &STI, MaybeAlign StackAlignOverride)
const X86RegisterInfo * TRI
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
bool hasFPImpl(const MachineFunction &MF) const override
hasFPImpl - Return true if the specified function should have a dedicated frame pointer register.
MachineBasicBlock::iterator restoreWin32EHStackPointers(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool RestoreSP=false) const
Sets up EBP and optionally ESI based on the incoming EBP value.
int getInitialCFAOffset(const MachineFunction &MF) const override
Return initial CFA offset value i.e.
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
void emitStackProbe(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog, std::optional< MachineFunction::DebugInstrOperandPair > InstrNum=std::nullopt) const
Emit target stack probe code.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool IsPrologue) const
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int64_t mergeSPAdd(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, int64_t AddOffset, bool doMergeWithPrevious) const
Equivalent to: mergeSPUpdates(MBB, MBBI, [AddOffset](int64_t Offset) { return AddOffset + Offset; }...
StackOffset getFrameIndexReferenceSP(const MachineFunction &MF, int FI, Register &SPReg, int Adjustment) const
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override
bool enableShrinkWrapping(const MachineFunction &MF) const override
Returns true if the target will correctly handle shrink wrapping.
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
void inlineStackProbe(MachineFunction &MF, MachineBasicBlock &PrologMBB) const override
Replace a StackProbe inline-stub with the actual probe code inline.
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &DL, int64_t NumBytes, bool InEpilogue) const
Emit a series of instructions to increment / decrement the stack pointer by a constant value.
bool canUseAsEpilogue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a epilogue for the target.
bool Is64Bit
Is64Bit implies that x86_64 instructions are available.
Register getInitialCFARegister(const MachineFunction &MF) const override
Return initial CFA register value i.e.
bool Uses64BitFramePtr
True if the 64-bit frame or stack pointer should be used.
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override
void adjustForSegmentedStacks(MachineFunction &MF, MachineBasicBlock &PrologueMBB) const override
Adjust the prologue to have the function use segmented stacks.
DwarfFrameBase getDwarfFrameBase(const MachineFunction &MF) const override
Return the frame base information to be encoded in the DWARF subprogram debug info.
void emitCalleeSavedFrameMovesFullCFA(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const override
Emits Dwarf Info specifying offsets of callee saved registers and frame pointer.
int getWin64EHFrameIndexRef(const MachineFunction &MF, int FI, Register &SPReg) const
bool canUseLEAForSPInEpilogue(const MachineFunction &MF) const
Check that LEA can be used on SP in an epilogue sequence for MF.
bool stackProbeFunctionModifiesSP() const override
Does the stack probe function call return with a modified stack pointer?
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack.
void BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const MCCFIInstruction &CFIInst, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Wraps up getting a CFI index and building a MachineInstr for it.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...
StackOffset getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, Register &FrameReg, bool IgnoreSPUpdates) const override
Same as getFrameIndexReference, except that the stack pointer (as opposed to the frame pointer) will ...
void restoreWinEHStackPointersInParent(MachineFunction &MF) const
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
void adjustForHiPEPrologue(MachineFunction &MF, MachineBasicBlock &PrologueMBB) const override
Erlang programs may need a special prologue to handle the stack size they might need at runtime.
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
bool getForceFramePointer() const
void setPadForPush2Pop2(bool V)
bool isCandidateForPush2Pop2(Register Reg) const
unsigned getArgumentStackSize() const
bool getFPClobberedByCall() const
int getRestoreBasePointerOffset() const
int getSEHFramePtrSaveIndex() const
bool hasCFIAdjustCfa() const
int getTCReturnAddrDelta() const
void setRestoreBasePointer(const MachineFunction *MF)
bool getHasSEHFramePtrSave() const
DenseMap< int, unsigned > & getWinEHXMMSlotInfo()
bool getBPClobberedByCall() const
void setUsesRedZone(bool V)
bool hasPreallocatedCall() const
bool hasSwiftAsyncContext() const
void setHasSEHFramePtrSave(bool V)
bool getRestoreBasePointer() const
MachineInstr * getStackPtrSaveMI() const
size_t getNumCandidatesForPush2Pop2() const
AMXProgModelEnum getAMXProgModel() const
void addCandidateForPush2Pop2(Register Reg)
unsigned getCalleeSavedFrameSize() const
bool getHasPushSequences() const
bool padForPush2Pop2() const
void setStackPtrSaveMI(MachineInstr *MI)
bool getUsesRedZone() const
void setCalleeSavedFrameSize(unsigned bytes)
void setSEHFramePtrSaveIndex(int Index)
const X86TargetLowering * getTargetLowering() const override
bool isTargetWindowsCoreCLR() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
uint16_t StackAdjustment(const RuntimeFunction &RF)
StackAdjustment - calculated stack adjustment in words.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ HiPE
Used by the High-Performance Erlang Compiler (HiPE).
@ X86_INTR
x86 hardware interrupt context.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ X86_FastCall
'fast' analog of X86_StdCall.
@ BasicBlock
Various leaf nodes.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ MO_GOTPCREL
MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
@ DwarfCFI
DWARF-like instruction based exceptions.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
IterT skipDebugInstructionsForward(IterT It, IterT End, bool SkipPseudoOp=true)
Increment It until it points to a non-debug instruction or to End and return the resulting iterator.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
@ Always
Always set the bit.
@ Never
Never set the bit.
@ DeploymentBased
Determine whether to set the bit statically or dynamically based on the deployment target.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
IterT skipDebugInstructionsBackward(IterT It, IterT Begin, bool SkipPseudoOp=true)
Decrement It until it points to a non-debug instruction or to Begin and return the resulting iterator...
unsigned getUndefRegState(bool B)
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
unsigned encodeSLEB128(int64_t Value, raw_ostream &OS, unsigned PadTo=0)
Utility function to encode a SLEB128 value to an output stream.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().
unsigned encodeULEB128(uint64_t Value, raw_ostream &OS, unsigned PadTo=0)
Utility function to encode a ULEB128 value to an output stream.
static const MachineInstrBuilder & addRegOffset(const MachineInstrBuilder &MIB, Register Reg, bool isKill, int Offset)
addRegOffset - This function is used to add a memory reference of the form [Reg + Offset],...
void fullyRecomputeLiveIns(ArrayRef< MachineBasicBlock * > MBBs)
Convenience function for recomputing live-in's for a set of MBBs until the computation converges.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Pair of physical register and lane mask.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
union llvm::TargetFrameLowering::DwarfFrameBase::@004076321055032247336074224075335064105264310375 Location
enum llvm::TargetFrameLowering::DwarfFrameBase::FrameBaseKind Kind
SmallVector< WinEHTryBlockMapEntry, 4 > TryBlockMap
SmallVector< WinEHHandlerType, 1 > HandlerArray