19#define DEBUG_TYPE "frame-info"
21STATISTIC(NumRedZoneFunctions,
"Number of functions using red zone");
32 return AFI->hasStreamingModeChanges() &&
38 unsigned Opc =
MBBI->getOpcode();
39 if (
Opc == AArch64::CNTD_XPiI)
45 if (
Opc == AArch64::BL)
48 return Opc == TargetOpcode::COPY;
53 switch (
I->getOpcode()) {
56 case AArch64::LD1B_2Z_IMM:
57 case AArch64::ST1B_2Z_IMM:
58 case AArch64::STR_ZXI:
59 case AArch64::LDR_ZXI:
60 case AArch64::PTRUE_C_B:
63 case AArch64::SEH_SavePReg:
64 case AArch64::SEH_SaveZReg:
71 switch (
I->getOpcode()) {
74 case AArch64::STR_PXI:
75 case AArch64::LDR_PXI:
108 if (
AFL.requiresSaveVG(
MF)) {
109 auto &TLI = *
Subtarget.getTargetLowering();
114 switch (
MBBI->getOpcode()) {
118 NewOpc = AArch64::STPXpre;
121 NewOpc = AArch64::STPDpre;
124 NewOpc = AArch64::STPQpre;
126 case AArch64::STRXui:
127 NewOpc = AArch64::STRXpre;
129 case AArch64::STRDui:
130 NewOpc = AArch64::STRDpre;
132 case AArch64::STRQui:
133 NewOpc = AArch64::STRQpre;
136 NewOpc = AArch64::LDPXpost;
139 NewOpc = AArch64::LDPDpost;
142 NewOpc = AArch64::LDPQpost;
144 case AArch64::LDRXui:
145 NewOpc = AArch64::LDRXpost;
147 case AArch64::LDRDui:
148 NewOpc = AArch64::LDRDpost;
150 case AArch64::LDRQui:
151 NewOpc = AArch64::LDRQpost;
155 int64_t MinOffset, MaxOffset;
157 NewOpc, Scale, Width, MinOffset, MaxOffset);
163 if (
MBBI->getOperand(
MBBI->getNumOperands() - 1).getImm() != 0 ||
164 CSStackSizeInc < MinOffset * (int64_t)Scale.
getFixedValue() ||
165 CSStackSizeInc > MaxOffset * (int64_t)Scale.
getFixedValue()) {
179 return std::prev(
MBBI);
184 auto SEH = std::next(
MBBI);
185 if (AArch64InstrInfo::isSEHInstruction(*SEH))
186 SEH->eraseFromParent();
193 unsigned OpndIdx = 0;
194 for (
unsigned OpndEnd =
MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
196 MIB.
add(
MBBI->getOperand(OpndIdx));
198 assert(
MBBI->getOperand(OpndIdx).getImm() == 0 &&
199 "Unexpected immediate offset in first/last callee-save save/restore "
201 assert(
MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
202 "Unexpected base register in callee-save save/restore instruction!");
203 assert(CSStackSizeInc % Scale == 0);
204 MIB.
addImm(CSStackSizeInc / (
int)Scale);
212 AFL.insertSEH(*MIB, *
TII, FrameFlag);
219 return std::prev(
MBB.erase(
MBBI));
224 unsigned LocalStackSize) {
226 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
227 switch (
MBBI->getOpcode()) {
230 case AArch64::SEH_SaveFPLR:
231 case AArch64::SEH_SaveRegP:
232 case AArch64::SEH_SaveReg:
233 case AArch64::SEH_SaveFRegP:
234 case AArch64::SEH_SaveFReg:
235 case AArch64::SEH_SaveAnyRegQP:
236 case AArch64::SEH_SaveAnyRegQPX:
237 ImmOpnd = &
MBBI->getOperand(ImmIdx);
246 if (AArch64InstrInfo::isSEHInstruction(
MI))
249 unsigned Opc =
MI.getOpcode();
253 case AArch64::STRXui:
255 case AArch64::STRDui:
257 case AArch64::LDRXui:
259 case AArch64::LDRDui:
263 case AArch64::STRQui:
265 case AArch64::LDRQui:
272 unsigned OffsetIdx =
MI.getNumExplicitOperands() - 1;
273 assert(
MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
274 "Unexpected base register in callee-save save/restore instruction!");
278 assert(LocalStackSize % Scale == 0);
279 OffsetOpnd.
setImm(OffsetOpnd.
getImm() + LocalStackSize / Scale);
284 assert(
MBBI !=
MI.getParent()->end() &&
"Expecting a valid instruction");
285 assert(AArch64InstrInfo::isSEHInstruction(*
MBBI) &&
286 "Expecting a SEH instruction");
293 if (
AFL.homogeneousPrologEpilog(
MF))
296 if (
AFI->getLocalStackSize() == 0)
307 if (
AFL.needsWinCFI(
MF) &&
AFI->getCalleeSavedStackSize() > 0 &&
308 MF.getFunction().hasOptSize())
313 if (StackBumpBytes >= 512 ||
314 AFL.windowsRequiresStackProbe(
MF, StackBumpBytes))
317 if (
MFI.hasVarSizedObjects())
326 if (
AFL.canUseRedZone(
MF))
331 if (
AFI->hasSVEStackSize())
342 EmitAsyncCFI =
AFI->needsAsyncDwarfUnwindInfo(
MF);
347 collectBlockLiveins();
364void AArch64PrologueEmitter::collectBlockLiveins() {
367 PrologueEndI =
MBB.begin();
368 while (PrologueEndI !=
MBB.end() &&
372 if (PrologueEndI !=
MBB.end()) {
388void AArch64PrologueEmitter::verifyPrologueClobbers()
const {
389 if (PrologueEndI ==
MBB.end())
392 for (MachineInstr &
MI :
393 make_range(
MBB.instr_begin(), PrologueEndI->getIterator())) {
394 for (
auto &
Op :
MI.operands())
395 if (
Op.isReg() &&
Op.isDef())
396 assert(!LiveRegs.contains(
Op.getReg()) &&
397 "live register clobbered by inserted prologue instructions");
402void AArch64PrologueEmitter::determineLocalsStackSize(
403 uint64_t StackSize, uint64_t PrologueSaveSize) {
404 AFI->setLocalStackSize(StackSize - PrologueSaveSize);
411 static const int64_t MAX_BYTES_PER_SCALABLE_BYTE = 16;
412 return Size.getScalable() * MAX_BYTES_PER_SCALABLE_BYTE +
Size.getFixed();
415void AArch64PrologueEmitter::allocateStackSpace(
417 StackOffset AllocSize,
bool EmitCFI, StackOffset InitialOffset,
418 bool FollowupAllocs) {
425 const uint64_t AndMask = ~(MaxAlign - 1);
428 Register TargetReg = RealignmentPadding
429 ?
AFL.findScratchNonCalleeSaveRegister(&
MBB)
436 if (RealignmentPadding) {
457 if (AllocSize.getScalable() == 0 && RealignmentPadding == 0) {
459 assert(ScratchReg != AArch64::NoRegister);
462 .
addImm(AllocSize.getFixed())
463 .
addImm(InitialOffset.getFixed())
464 .
addImm(InitialOffset.getScalable());
469 if (FollowupAllocs) {
485 int64_t ProbeSize =
AFI->getStackProbeSize();
486 if (
upperBound(AllocSize) + RealignmentPadding <= ProbeSize) {
487 Register ScratchReg = RealignmentPadding
488 ?
AFL.findScratchNonCalleeSaveRegister(&
MBB)
490 assert(ScratchReg != AArch64::NoRegister);
495 if (RealignmentPadding) {
501 AFI->setStackRealigned(
true);
503 if (FollowupAllocs ||
upperBound(AllocSize) + RealignmentPadding >
519 assert(TargetReg != AArch64::NoRegister);
524 if (RealignmentPadding) {
537 .buildDefCFARegister(AArch64::SP);
539 if (RealignmentPadding)
540 AFI->setStackRealigned(
true);
550 AFI->setHasRedZone(
false);
560 if (
AFI->getArgumentStackToRestore())
563 if (
AFI->shouldSignReturnAddress(
MF)) {
566 if (!
AFL.shouldSignReturnAddressEverywhere(
MF)) {
574 if (
AFI->needsShadowCallStackPrologueEpilogue(
MF)) {
575 emitShadowCallStackPrologue(PrologueBeginI,
DL);
587 if (
HasFP &&
AFI->hasSwiftAsyncContext())
588 emitSwiftAsyncContextFramePointer(PrologueBeginI,
DL);
597 if (std::optional<int> TBPI =
AFI->getTaggedBasePointerIndex())
598 AFI->setTaggedBasePointerOffset(-
MFI.getObjectOffset(*TBPI));
600 AFI->setTaggedBasePointerOffset(
MFI.getStackSize());
610 if (!
AFI->hasStackFrame() && !
AFL.windowsRequiresStackProbe(
MF, NumBytes))
611 return emitEmptyStackFramePrologue(NumBytes, PrologueBeginI,
DL);
613 bool IsWin64 =
Subtarget.isCallingConvWin64(F.getCallingConv(), F.isVarArg());
628 bool FPAfterSVECalleeSaves =
629 Subtarget.isTargetWindows() &&
AFI->getSVECalleeSavedStackSize();
631 if (FPAfterSVECalleeSaves &&
AFI->hasStackHazardSlotIndex())
634 auto PrologueSaveSize =
AFI->getCalleeSavedStackSize() + FixedObject;
636 determineLocalsStackSize(NumBytes, PrologueSaveSize);
639 if (FPAfterSVECalleeSaves) {
648 allocateStackSpace(PrologueBeginI, 0, SaveSize,
false,
StackOffset{},
650 NumBytes -= FixedObject;
657 MBBI,
DL, -
AFI->getCalleeSavedStackSize(), EmitAsyncCFI);
658 NumBytes -=
AFI->getCalleeSavedStackSize();
659 }
else if (CombineSPBump) {
660 assert(!
AFL.getSVEStackSize(
MF) &&
"Cannot combine SP bump with SVE");
668 NumBytes -= PrologueSaveSize;
669 }
else if (PrologueSaveSize != 0) {
671 PrologueBeginI,
DL, -PrologueSaveSize, EmitAsyncCFI);
672 NumBytes -= PrologueSaveSize;
674 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
679 auto &TLI = *
Subtarget.getTargetLowering();
682 while (AfterGPRSavesI != EndI &&
689 AFI->getLocalStackSize());
696 emitFramePointerSetup(AfterGPRSavesI,
DL, FixedObject);
702 emitCalleeSavedGPRLocations(AfterGPRSavesI);
705 const bool NeedsRealignment =
707 const int64_t RealignmentPadding =
708 (NeedsRealignment &&
MFI.getMaxAlign() >
Align(16))
709 ?
MFI.getMaxAlign().value() - 16
712 if (
AFL.windowsRequiresStackProbe(
MF, NumBytes + RealignmentPadding))
713 emitWindowsStackProbe(AfterGPRSavesI,
DL, NumBytes, RealignmentPadding);
719 StackOffset SVECalleeSavesSize = PPRCalleeSavesSize + ZPRCalleeSavesSize;
723 std::optional<MachineBasicBlock::iterator> ZPRCalleeSavesBegin,
724 ZPRCalleeSavesEnd, PPRCalleeSavesBegin, PPRCalleeSavesEnd;
729 if (!FPAfterSVECalleeSaves) {
732 PPRCalleeSavesBegin = AfterGPRSavesI;
733 if (PPRCalleeSavesSize) {
738 "Unexpected instruction");
740 AfterSVESavesI !=
MBB.getFirstTerminator())
743 PPRCalleeSavesEnd = ZPRCalleeSavesBegin = AfterSVESavesI;
744 if (ZPRCalleeSavesSize) {
748 "Unexpected instruction");
750 AfterSVESavesI !=
MBB.getFirstTerminator())
753 ZPRCalleeSavesEnd = AfterSVESavesI;
757 emitCalleeSavedSVELocations(AfterSVESavesI);
759 if (
AFI->hasSplitSVEObjects()) {
760 assert(!FPAfterSVECalleeSaves &&
761 "Cannot use FPAfterSVECalleeSaves with aarch64-split-sve-objects");
763 "Cannot use redzone with aarch64-split-sve-objects");
766 "WinCFI with aarch64-split-sve-objects is not supported");
770 allocateStackSpace(*PPRCalleeSavesBegin, 0, PPRCalleeSavesSize,
771 EmitAsyncCFI && !
HasFP, CFAOffset,
772 MFI.hasVarSizedObjects() || ZPRCalleeSavesSize ||
773 ZPRLocalsSize || PPRLocalsSize);
774 CFAOffset += PPRCalleeSavesSize;
777 assert(PPRCalleeSavesEnd == ZPRCalleeSavesBegin &&
778 "Expected ZPR callee saves after PPR locals");
779 allocateStackSpace(*PPRCalleeSavesEnd, RealignmentPadding,
780 PPRLocalsSize + ZPRCalleeSavesSize,
781 EmitAsyncCFI && !
HasFP, CFAOffset,
782 MFI.hasVarSizedObjects() || ZPRLocalsSize);
783 CFAOffset += PPRLocalsSize + ZPRCalleeSavesSize;
786 allocateStackSpace(*ZPRCalleeSavesEnd, RealignmentPadding,
788 EmitAsyncCFI && !
HasFP, CFAOffset,
789 MFI.hasVarSizedObjects());
794 if (!FPAfterSVECalleeSaves)
795 allocateStackSpace(AfterGPRSavesI, 0, SVECalleeSavesSize,
796 EmitAsyncCFI && !
HasFP, CFAOffset,
797 MFI.hasVarSizedObjects() || LocalsSize);
798 CFAOffset += SVECalleeSavesSize;
802 assert(!(
AFL.canUseRedZone(
MF) && NeedsRealignment) &&
803 "Cannot use redzone with stack realignment");
804 if (!
AFL.canUseRedZone(
MF)) {
808 StackOffset SVELocalsSize = PPRLocalsSize + ZPRLocalsSize;
809 allocateStackSpace(AfterSVESavesI, RealignmentPadding,
811 EmitAsyncCFI && !
HasFP, CFAOffset,
812 MFI.hasVarSizedObjects());
851 MBB.addLiveIn(AArch64::X1);
855 if (
EmitCFI && !EmitAsyncCFI) {
857 emitDefineCFAWithFP(AfterSVESavesI, FixedObject);
860 AFL.getSVEStackSize(
MF) +
867 emitCalleeSavedGPRLocations(AfterSVESavesI);
868 emitCalleeSavedSVELocations(AfterSVESavesI);
872void AArch64PrologueEmitter::emitShadowCallStackPrologue(
883 MBB.addLiveIn(AArch64::X18);
892 static const char CFIInst[] = {
893 dwarf::DW_CFA_val_expression,
896 static_cast<char>(
unsigned(dwarf::DW_OP_breg18)),
897 static_cast<char>(-8) & 0x7f,
900 .buildEscape(StringRef(CFIInst,
sizeof(CFIInst)));
904void AArch64PrologueEmitter::emitSwiftAsyncContextFramePointer(
906 switch (
MF.getTarget().Options.SwiftAsyncFramePointer) {
908 if (
Subtarget.swiftAsyncContextIsDynamicallySet()) {
950void AArch64PrologueEmitter::emitEmptyStackFramePrologue(
953 assert(!
HasFP &&
"unexpected function without stack frame but with FP");
955 "unexpected function without stack frame but with SVE objects");
957 AFI->setLocalStackSize(NumBytes);
967 if (
AFL.canUseRedZone(
MF)) {
968 AFI->setHasRedZone(
true);
969 ++NumRedZoneFunctions;
976 MCSymbol *FrameLabel =
MF.getContext().createTempSymbol();
979 .buildDefCFAOffset(NumBytes, FrameLabel);
990void AArch64PrologueEmitter::emitFramePointerSetup(
992 unsigned FixedObject) {
993 int64_t FPOffset =
AFI->getCalleeSaveBaseToFrameRecordOffset();
995 FPOffset +=
AFI->getLocalStackSize();
997 if (
AFI->hasSwiftAsyncContext()) {
1001 const auto &
Attrs =
MF.getFunction().getAttributes();
1002 bool HaveInitialContext =
Attrs.hasAttrSomewhere(Attribute::SwiftAsync);
1003 if (HaveInitialContext)
1004 MBB.addLiveIn(AArch64::X22);
1005 Register Reg = HaveInitialContext ? AArch64::X22 : AArch64::XZR;
1043 emitDefineCFAWithFP(
MBBI, FixedObject);
1047void AArch64PrologueEmitter::emitDefineCFAWithFP(
1049 const int OffsetToFirstCalleeSaveFromFP =
1050 AFI->getCalleeSaveBaseToFrameRecordOffset() -
1051 AFI->getCalleeSavedStackSize();
1054 .buildDefCFA(
FramePtr, FixedObject - OffsetToFirstCalleeSaveFromFP);
1057void AArch64PrologueEmitter::emitWindowsStackProbe(
1059 int64_t RealignmentPadding)
const {
1060 if (
AFI->getSVECalleeSavedStackSize())
1065 unsigned X15Scratch = AArch64::NoRegister;
1067 [
this](
const MachineBasicBlock::RegisterMaskPair &LiveIn) {
1068 return RegInfo.isSuperOrSubRegisterEq(AArch64::X15,
1071 X15Scratch =
AFL.findScratchNonCalleeSaveRegister(&
MBB,
true);
1072 assert(X15Scratch != AArch64::NoRegister &&
1073 (X15Scratch < AArch64::X15 || X15Scratch > AArch64::X17));
1075 LiveRegs.removeReg(AArch64::X15);
1084 uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
1092 if (NumBytes >= (1 << 28))
1094 "unwinding purposes");
1096 uint32_t LowNumWords = NumWords & 0xFFFF;
1103 if ((NumWords & 0xFFFF0000) != 0) {
1106 .
addImm((NumWords & 0xFFFF0000) >> 16)
1118 const char *ChkStk =
Subtarget.getChkStkName();
1119 switch (
MF.getTarget().getCodeModel()) {
1183 if (RealignmentPadding > 0) {
1184 if (RealignmentPadding >= 4096) {
1187 .
addImm(RealignmentPadding)
1197 .
addImm(RealignmentPadding)
1202 uint64_t AndMask = ~(
MFI.getMaxAlign().value() - 1);
1206 AFI->setStackRealigned(
true);
1212 if (X15Scratch != AArch64::NoRegister) {
1221void AArch64PrologueEmitter::emitCalleeSavedGPRLocations(
1223 const std::vector<CalleeSavedInfo> &CSI =
MFI.getCalleeSavedInfo();
1228 for (
const auto &
Info : CSI) {
1229 unsigned FrameIdx =
Info.getFrameIdx();
1230 if (
MFI.hasScalableStackID(FrameIdx))
1233 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
1234 int64_t
Offset =
MFI.getObjectOffset(FrameIdx) -
AFL.getOffsetOfLocalArea();
1235 CFIBuilder.buildOffset(
Info.getReg(),
Offset);
1239void AArch64PrologueEmitter::emitCalleeSavedSVELocations(
1242 const std::vector<CalleeSavedInfo> &CSI =
MFI.getCalleeSavedInfo();
1248 std::optional<int64_t> IncomingVGOffsetFromDefCFA;
1249 if (
AFL.requiresSaveVG(
MF)) {
1251 reverse(CSI), [](
auto &
Info) {
return Info.getReg() == AArch64::VG; });
1252 IncomingVGOffsetFromDefCFA =
MFI.getObjectOffset(IncomingVG.getFrameIdx()) -
1253 AFL.getOffsetOfLocalArea();
1256 StackOffset PPRStackSize =
AFL.getPPRStackSize(
MF);
1257 for (
const auto &
Info : CSI) {
1258 int FI =
Info.getFrameIdx();
1259 if (!
MFI.hasScalableStackID(FI))
1264 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
1265 MCRegister
Reg =
Info.getReg();
1273 if (
AFI->hasSplitSVEObjects() &&
1277 CFIBuilder.insertCFIInst(
1283 switch (
MI.getOpcode()) {
1286 case AArch64::CATCHRET:
1287 case AArch64::CLEANUPRET:
1298 SEHEpilogueStartI =
MBB.end();
1303 if (
MBB.end() != EpilogueEndI) {
1304 DL = EpilogueEndI->getDebugLoc();
1318 int64_t ArgumentStackToRestore =
AFL.getArgumentStackToRestore(
MF,
MBB);
1319 bool IsWin64 =
Subtarget.isCallingConvWin64(
MF.getFunction().getCallingConv(),
1320 MF.getFunction().isVarArg());
1323 int64_t AfterCSRPopSize = ArgumentStackToRestore;
1324 auto PrologueSaveSize =
AFI->getCalleeSavedStackSize() + FixedObject;
1329 if (
MF.hasEHFunclets())
1330 AFI->setLocalStackSize(NumBytes - PrologueSaveSize);
1334 auto FirstHomogenousEpilogI =
MBB.getFirstTerminator();
1335 if (FirstHomogenousEpilogI !=
MBB.begin()) {
1336 auto HomogeneousEpilog = std::prev(FirstHomogenousEpilogI);
1337 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog)
1338 FirstHomogenousEpilogI = HomogeneousEpilog;
1348 assert(AfterCSRPopSize == 0);
1352 bool FPAfterSVECalleeSaves =
1353 Subtarget.isTargetWindows() &&
AFI->getSVECalleeSavedStackSize();
1355 bool CombineSPBump = shouldCombineCSRLocalStackBump(NumBytes);
1357 bool CombineAfterCSRBump =
false;
1358 if (FPAfterSVECalleeSaves) {
1359 AfterCSRPopSize += FixedObject;
1360 }
else if (!CombineSPBump && PrologueSaveSize != 0) {
1362 while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION ||
1363 AArch64InstrInfo::isSEHInstruction(*Pop))
1364 Pop = std::prev(Pop);
1371 if (
OffsetOp.getImm() == 0 && AfterCSRPopSize >= 0) {
1380 AfterCSRPopSize += PrologueSaveSize;
1381 CombineAfterCSRBump =
true;
1390 while (FirstGPRRestoreI != Begin) {
1396 }
else if (CombineSPBump)
1398 AFI->getLocalStackSize());
1408 BuildMI(
MBB, FirstGPRRestoreI, DL,
TII->get(AArch64::SEH_EpilogStart))
1410 SEHEpilogueStartI = FirstGPRRestoreI;
1411 --SEHEpilogueStartI;
1414 if (
HasFP &&
AFI->hasSwiftAsyncContext())
1415 emitSwiftAsyncContextFramePointer(EpilogueEndI, DL);
1419 StackOffset SVEStackSize = ZPRStackSize + PPRStackSize;
1422 if (CombineSPBump) {
1423 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
1437 NumBytes -= PrologueSaveSize;
1438 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
1440 if (!
AFI->hasSplitSVEObjects()) {
1443 StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize;
1445 RestoreEnd = FirstGPRRestoreI;
1446 int64_t ZPRCalleeSavedSize =
AFI->getZPRCalleeSavedStackSize();
1447 int64_t PPRCalleeSavedSize =
AFI->getPPRCalleeSavedStackSize();
1448 int64_t SVECalleeSavedSize = ZPRCalleeSavedSize + PPRCalleeSavedSize;
1450 if (SVECalleeSavedSize) {
1451 if (FPAfterSVECalleeSaves)
1452 RestoreEnd =
MBB.getFirstTerminator();
1454 RestoreBegin = std::prev(RestoreEnd);
1455 while (RestoreBegin !=
MBB.begin() &&
1461 "Unexpected instruction");
1465 DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset;
1466 DeallocateAfter = CalleeSavedSizeAsOffset;
1470 if (FPAfterSVECalleeSaves) {
1474 if (!
AFI->isStackRealigned() && !
MFI.hasVarSizedObjects()) {
1496 }
else if (SVEStackSize) {
1497 int64_t SVECalleeSavedSize =
AFI->getSVECalleeSavedStackSize();
1502 (
AFI->isStackRealigned() ||
MFI.hasVarSizedObjects()) ? AArch64::FP
1504 if (SVECalleeSavedSize && BaseForSVEDealloc == AArch64::FP) {
1505 Register CalleeSaveBase = AArch64::FP;
1506 if (int64_t CalleeSaveBaseOffset =
1507 AFI->getCalleeSaveBaseToFrameRecordOffset()) {
1512 CalleeSaveBase =
MBB.getParent()->getRegInfo().createVirtualRegister(
1513 &AArch64::GPR64RegClass);
1523 }
else if (BaseForSVEDealloc == AArch64::SP) {
1524 if (SVECalleeSavedSize) {
1532 NumBytes + PrologueSaveSize));
1550 emitCalleeSavedSVERestores(RestoreEnd);
1552 }
else if (
AFI->hasSplitSVEObjects() && SVEStackSize) {
1554 assert(!
AFI->isStackRealigned() && !
MFI.hasVarSizedObjects() &&
1555 "unexpected stack realignment or variable sized objects with split "
1556 "SVE stack objects");
1559 auto ZPRCalleeSavedSize =
1561 auto PPRCalleeSavedSize =
1563 StackOffset PPRLocalsSize = PPRStackSize - PPRCalleeSavedSize;
1564 StackOffset ZPRLocalsSize = ZPRStackSize - ZPRCalleeSavedSize;
1567 PPRRestoreEnd = FirstGPRRestoreI;
1568 if (PPRCalleeSavedSize) {
1569 PPRRestoreBegin = std::prev(PPRRestoreEnd);
1570 while (PPRRestoreBegin !=
MBB.begin() &&
1576 ZPRRestoreEnd = PPRRestoreBegin;
1577 if (ZPRCalleeSavedSize) {
1578 ZPRRestoreBegin = std::prev(ZPRRestoreEnd);
1579 while (ZPRRestoreBegin !=
MBB.begin() &&
1586 if (PPRCalleeSavedSize || ZPRCalleeSavedSize) {
1594 CFAOffset -= NonSVELocals;
1597 if (ZPRLocalsSize) {
1601 CFAOffset -= ZPRLocalsSize;
1604 if (PPRLocalsSize || ZPRCalleeSavedSize) {
1605 assert(PPRRestoreBegin == ZPRRestoreEnd &&
1606 "Expected PPR restores after ZPR");
1608 PPRLocalsSize + ZPRCalleeSavedSize,
TII,
1611 CFAOffset -= PPRLocalsSize + ZPRCalleeSavedSize;
1613 if (PPRCalleeSavedSize) {
1621 emitCalleeSavedSVERestores(ZPRRestoreEnd);
1625 bool RedZone =
AFL.canUseRedZone(
MF);
1628 if (RedZone && AfterCSRPopSize == 0)
1635 bool NoCalleeSaveRestore = PrologueSaveSize == 0;
1636 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes;
1637 if (NoCalleeSaveRestore)
1638 StackRestoreBytes += AfterCSRPopSize;
1641 MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::SP,
1648 if (NoCalleeSaveRestore || AfterCSRPopSize == 0)
1658 if (!
IsFunclet && (
MFI.hasVarSizedObjects() ||
AFI->isStackRealigned())) {
1660 MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::FP,
1663 }
else if (NumBytes)
1676 if (AfterCSRPopSize) {
1677 assert(AfterCSRPopSize > 0 &&
"attempting to reallocate arg stack that an "
1678 "interrupt may have clobbered");
1681 MBB,
MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
1688bool AArch64EpilogueEmitter::shouldCombineCSRLocalStackBump(
1700 while (LastI != Begin) {
1702 if (LastI->isTransient())
1707 switch (LastI->getOpcode()) {
1708 case AArch64::STGloop:
1709 case AArch64::STZGloop:
1711 case AArch64::STZGi:
1712 case AArch64::ST2Gi:
1713 case AArch64::STZ2Gi:
1721void AArch64EpilogueEmitter::emitSwiftAsyncContextFramePointer(
1723 switch (
MF.getTarget().Options.SwiftAsyncFramePointer) {
1751void AArch64EpilogueEmitter::emitShadowCallStackEpilogue(
1765 if (
AFI->needsAsyncDwarfUnwindInfo(
MF))
1767 .buildRestore(AArch64::X18);
1770void AArch64EpilogueEmitter::emitCalleeSavedRestores(
1772 const std::vector<CalleeSavedInfo> &CSI =
MFI.getCalleeSavedInfo();
1778 for (
const auto &
Info : CSI) {
1779 if (
SVE !=
MFI.hasScalableStackID(
Info.getFrameIdx()))
1782 MCRegister
Reg =
Info.getReg();
1786 CFIBuilder.buildRestore(
Info.getReg());
1790void AArch64EpilogueEmitter::finalizeEpilogue()
const {
1791 if (
AFI->needsShadowCallStackPrologueEpilogue(
MF)) {
1792 emitShadowCallStackEpilogue(
MBB.getFirstTerminator(), DL);
1796 emitCalleeSavedGPRRestores(
MBB.getFirstTerminator());
1797 if (
AFI->shouldSignReturnAddress(
MF)) {
1800 if (!
AFL.shouldSignReturnAddressEverywhere(
MF)) {
1802 TII->get(AArch64::PAUTH_EPILOGUE))
1809 BuildMI(
MBB,
MBB.getFirstTerminator(), DL,
TII->get(AArch64::SEH_EpilogEnd))
1811 if (!
MF.hasWinCFI())
1812 MF.setHasWinCFI(
true);
1817 MBB.erase(SEHEpilogueStartI);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file contains the declaration of the AArch64PrologueEmitter and AArch64EpilogueEmitter classes,...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Analysis containing CSE Info
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static const unsigned FramePtr
void emitEpilogue()
Emit the epilogue.
AArch64EpilogueEmitter(MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL)
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
void setStackRealigned(bool s)
void emitPrologue()
Emit the prologue.
AArch64PrologueEmitter(MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL)
const MachineFrameInfo & MFI
AArch64FunctionInfo * AFI
MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(MachineBasicBlock::iterator MBBI, const DebugLoc &DL, int CSStackSizeInc, bool EmitCFI, MachineInstr::MIFlag FrameFlag=MachineInstr::FrameSetup, int CFAOffset=0) const
bool isVGInstruction(MachineBasicBlock::iterator MBBI, const TargetLowering &TLI) const
AArch64PrologueEpilogueCommon(MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL)
const AArch64RegisterInfo & RegInfo
const AArch64FrameLowering & AFL
void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, uint64_t LocalStackSize) const
bool shouldCombineCSRLocalStackBump(uint64_t StackBumpBytes) const
const AArch64Subtarget & Subtarget
bool requiresGetVGCall() const
const TargetInstrInfo * TII
const AArch64TargetLowering * getTargetLowering() const override
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
Helper class for creating CFI instructions and inserting them into MIR.
void buildDefCFAOffset(int64_t Offset, MCSymbol *Label=nullptr) const
void insertCFIInst(const MCCFIInstruction &CFIInst) const
void buildDefCFA(MCRegister Reg, int64_t Offset) const
A set of physical registers with utility functions to track liveness when walking backward/forward th...
MachineInstrBundleIterator< MachineInstr > iterator
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
static MachineOperand CreateImm(int64_t Val)
const char * getSymbolName() const
Wrapper class representing virtual and physical registers.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
int64_t getScalable() const
Returns the scalable component of the stack.
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
StringRef - Represent a constant reference to a string, i.e.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
This is an optimization pass for GlobalISel generic memory operations.
MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg, unsigned Reg, const StackOffset &Offset, bool LastAdjustmentWasScalable=true)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
static bool matchLibcall(const TargetLowering &TLI, const MachineOperand &MO, RTLIB::Libcall LC)
static bool isPartOfSVECalleeSaves(MachineBasicBlock::iterator I)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
static bool isFuncletReturnInstr(const MachineInstr &MI)
auto reverse(ContainerTy &&C)
static void getLivePhysRegsUpTo(MachineInstr &MI, const TargetRegisterInfo &TRI, LivePhysRegs &LiveRegs)
Collect live registers from the end of MI's parent up to (including) MI in LiveRegs.
@ Always
Always set the bit.
@ Never
Never set the bit.
@ DeploymentBased
Determine whether to set the bit statically or dynamically based on the deployment target.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
static bool isPartOfPPRCalleeSaves(MachineBasicBlock::iterator I)
@ Success
The lock was released successfully.
static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, unsigned LocalStackSize)
MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg, const StackOffset &OffsetFromDefCFA, std::optional< int64_t > IncomingVGOffsetFromDefCFA)
DWARFExpression::Operation Op
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
static bool isPartOfZPRCalleeSaves(MachineBasicBlock::iterator I)
static int64_t upperBound(StackOffset Size)
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.