19#define DEBUG_TYPE "frame-info"
21STATISTIC(NumRedZoneFunctions,
"Number of functions using red zone");
32 return AFI->hasStreamingModeChanges() &&
38 unsigned Opc =
MBBI->getOpcode();
39 if (
Opc == AArch64::CNTD_XPiI)
45 if (
Opc == AArch64::BL)
48 return Opc == TargetOpcode::COPY;
53 switch (
I->getOpcode()) {
56 case AArch64::LD1B_2Z_IMM:
57 case AArch64::ST1B_2Z_IMM:
58 case AArch64::STR_ZXI:
59 case AArch64::LDR_ZXI:
60 case AArch64::CPY_ZPzI_B:
61 case AArch64::CMPNE_PPzZI_B:
62 case AArch64::PTRUE_C_B:
63 case AArch64::PTRUE_B:
66 case AArch64::SEH_SavePReg:
67 case AArch64::SEH_SaveZReg:
74 switch (
I->getOpcode()) {
77 case AArch64::STR_PXI:
78 case AArch64::LDR_PXI:
111 if (
AFL.requiresSaveVG(
MF)) {
112 auto &TLI = *
Subtarget.getTargetLowering();
117 switch (
MBBI->getOpcode()) {
121 NewOpc = AArch64::STPXpre;
124 NewOpc = AArch64::STPDpre;
127 NewOpc = AArch64::STPQpre;
129 case AArch64::STRXui:
130 NewOpc = AArch64::STRXpre;
132 case AArch64::STRDui:
133 NewOpc = AArch64::STRDpre;
135 case AArch64::STRQui:
136 NewOpc = AArch64::STRQpre;
139 NewOpc = AArch64::LDPXpost;
142 NewOpc = AArch64::LDPDpost;
145 NewOpc = AArch64::LDPQpost;
147 case AArch64::LDRXui:
148 NewOpc = AArch64::LDRXpost;
150 case AArch64::LDRDui:
151 NewOpc = AArch64::LDRDpost;
153 case AArch64::LDRQui:
154 NewOpc = AArch64::LDRQpost;
158 int64_t MinOffset, MaxOffset;
160 NewOpc, Scale, Width, MinOffset, MaxOffset);
166 if (
MBBI->getOperand(
MBBI->getNumOperands() - 1).getImm() != 0 ||
167 CSStackSizeInc < MinOffset * (int64_t)Scale.
getFixedValue() ||
168 CSStackSizeInc > MaxOffset * (int64_t)Scale.
getFixedValue()) {
182 return std::prev(
MBBI);
187 auto SEH = std::next(
MBBI);
188 if (AArch64InstrInfo::isSEHInstruction(*SEH))
189 SEH->eraseFromParent();
196 unsigned OpndIdx = 0;
197 for (
unsigned OpndEnd =
MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
199 MIB.
add(
MBBI->getOperand(OpndIdx));
201 assert(
MBBI->getOperand(OpndIdx).getImm() == 0 &&
202 "Unexpected immediate offset in first/last callee-save save/restore "
204 assert(
MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
205 "Unexpected base register in callee-save save/restore instruction!");
206 assert(CSStackSizeInc % Scale == 0);
207 MIB.
addImm(CSStackSizeInc / (
int)Scale);
215 AFL.insertSEH(*MIB, *
TII, FrameFlag);
222 return std::prev(
MBB.erase(
MBBI));
227 unsigned LocalStackSize) {
229 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
230 switch (
MBBI->getOpcode()) {
233 case AArch64::SEH_SaveFPLR:
234 case AArch64::SEH_SaveRegP:
235 case AArch64::SEH_SaveReg:
236 case AArch64::SEH_SaveFRegP:
237 case AArch64::SEH_SaveFReg:
238 case AArch64::SEH_SaveAnyRegQP:
239 case AArch64::SEH_SaveAnyRegQPX:
240 ImmOpnd = &
MBBI->getOperand(ImmIdx);
249 if (AArch64InstrInfo::isSEHInstruction(
MI))
252 unsigned Opc =
MI.getOpcode();
256 case AArch64::STRXui:
258 case AArch64::STRDui:
260 case AArch64::LDRXui:
262 case AArch64::LDRDui:
266 case AArch64::STRQui:
268 case AArch64::LDRQui:
275 unsigned OffsetIdx =
MI.getNumExplicitOperands() - 1;
276 assert(
MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
277 "Unexpected base register in callee-save save/restore instruction!");
281 assert(LocalStackSize % Scale == 0);
282 OffsetOpnd.
setImm(OffsetOpnd.
getImm() + LocalStackSize / Scale);
287 assert(
MBBI !=
MI.getParent()->end() &&
"Expecting a valid instruction");
288 assert(AArch64InstrInfo::isSEHInstruction(*
MBBI) &&
289 "Expecting a SEH instruction");
296 if (
AFL.homogeneousPrologEpilog(
MF))
299 if (
AFI->getLocalStackSize() == 0)
310 if (
AFL.needsWinCFI(
MF) &&
AFI->getCalleeSavedStackSize() > 0 &&
311 MF.getFunction().hasOptSize())
316 if (StackBumpBytes >= 512 ||
317 AFL.windowsRequiresStackProbe(
MF, StackBumpBytes))
320 if (
MFI.hasVarSizedObjects())
329 if (
AFL.canUseRedZone(
MF))
334 if (
AFI->hasSVEStackSize())
345 EmitAsyncCFI =
AFI->needsAsyncDwarfUnwindInfo(
MF);
350 collectBlockLiveins();
367void AArch64PrologueEmitter::collectBlockLiveins() {
370 PrologueEndI =
MBB.begin();
371 while (PrologueEndI !=
MBB.end() &&
375 if (PrologueEndI !=
MBB.end()) {
391void AArch64PrologueEmitter::verifyPrologueClobbers()
const {
392 if (PrologueEndI ==
MBB.end())
395 for (MachineInstr &
MI :
396 make_range(
MBB.instr_begin(), PrologueEndI->getIterator())) {
397 for (
auto &
Op :
MI.operands())
398 if (
Op.isReg() &&
Op.isDef())
399 assert(!LiveRegs.contains(
Op.getReg()) &&
400 "live register clobbered by inserted prologue instructions");
405void AArch64PrologueEmitter::determineLocalsStackSize(
406 uint64_t StackSize, uint64_t PrologueSaveSize) {
407 AFI->setLocalStackSize(StackSize - PrologueSaveSize);
414 static const int64_t MAX_BYTES_PER_SCALABLE_BYTE = 16;
415 return Size.getScalable() * MAX_BYTES_PER_SCALABLE_BYTE +
Size.getFixed();
418void AArch64PrologueEmitter::allocateStackSpace(
420 StackOffset AllocSize,
bool EmitCFI, StackOffset InitialOffset,
421 bool FollowupAllocs) {
428 const uint64_t AndMask = ~(MaxAlign - 1);
431 Register TargetReg = RealignmentPadding
432 ?
AFL.findScratchNonCalleeSaveRegister(&
MBB)
439 if (RealignmentPadding) {
460 if (AllocSize.getScalable() == 0 && RealignmentPadding == 0) {
462 assert(ScratchReg != AArch64::NoRegister);
465 .
addImm(AllocSize.getFixed())
466 .
addImm(InitialOffset.getFixed())
467 .
addImm(InitialOffset.getScalable());
472 if (FollowupAllocs) {
488 int64_t ProbeSize =
AFI->getStackProbeSize();
489 if (
upperBound(AllocSize) + RealignmentPadding <= ProbeSize) {
490 Register ScratchReg = RealignmentPadding
491 ?
AFL.findScratchNonCalleeSaveRegister(&
MBB)
493 assert(ScratchReg != AArch64::NoRegister);
498 if (RealignmentPadding) {
504 AFI->setStackRealigned(
true);
506 if (FollowupAllocs ||
upperBound(AllocSize) + RealignmentPadding >
522 assert(TargetReg != AArch64::NoRegister);
527 if (RealignmentPadding) {
540 .buildDefCFARegister(AArch64::SP);
542 if (RealignmentPadding)
543 AFI->setStackRealigned(
true);
553 AFI->setHasRedZone(
false);
563 if (
AFI->getArgumentStackToRestore())
566 if (
AFI->shouldSignReturnAddress(
MF)) {
569 if (!
AFL.shouldSignReturnAddressEverywhere(
MF)) {
577 if (
AFI->needsShadowCallStackPrologueEpilogue(
MF)) {
578 emitShadowCallStackPrologue(PrologueBeginI,
DL);
590 if (
HasFP &&
AFI->hasSwiftAsyncContext())
591 emitSwiftAsyncContextFramePointer(PrologueBeginI,
DL);
600 if (std::optional<int> TBPI =
AFI->getTaggedBasePointerIndex())
601 AFI->setTaggedBasePointerOffset(-
MFI.getObjectOffset(*TBPI));
603 AFI->setTaggedBasePointerOffset(
MFI.getStackSize());
613 if (!
AFI->hasStackFrame() && !
AFL.windowsRequiresStackProbe(
MF, NumBytes))
614 return emitEmptyStackFramePrologue(NumBytes, PrologueBeginI,
DL);
616 bool IsWin64 =
Subtarget.isCallingConvWin64(F.getCallingConv(), F.isVarArg());
631 bool FPAfterSVECalleeSaves =
632 Subtarget.isTargetWindows() &&
AFI->getSVECalleeSavedStackSize();
634 if (FPAfterSVECalleeSaves &&
AFI->hasStackHazardSlotIndex())
637 auto PrologueSaveSize =
AFI->getCalleeSavedStackSize() + FixedObject;
639 determineLocalsStackSize(NumBytes, PrologueSaveSize);
642 if (FPAfterSVECalleeSaves) {
651 allocateStackSpace(PrologueBeginI, 0, SaveSize,
false,
StackOffset{},
653 NumBytes -= FixedObject;
660 MBBI,
DL, -
AFI->getCalleeSavedStackSize(), EmitAsyncCFI);
661 NumBytes -=
AFI->getCalleeSavedStackSize();
662 }
else if (CombineSPBump) {
663 assert(!
AFL.getSVEStackSize(
MF) &&
"Cannot combine SP bump with SVE");
671 NumBytes -= PrologueSaveSize;
672 }
else if (PrologueSaveSize != 0) {
674 PrologueBeginI,
DL, -PrologueSaveSize, EmitAsyncCFI);
675 NumBytes -= PrologueSaveSize;
677 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
682 auto &TLI = *
Subtarget.getTargetLowering();
685 while (AfterGPRSavesI != EndI &&
692 AFI->getLocalStackSize());
699 emitFramePointerSetup(AfterGPRSavesI,
DL, FixedObject);
705 emitCalleeSavedGPRLocations(AfterGPRSavesI);
708 const bool NeedsRealignment =
710 const int64_t RealignmentPadding =
711 (NeedsRealignment &&
MFI.getMaxAlign() >
Align(16))
712 ?
MFI.getMaxAlign().value() - 16
715 if (
AFL.windowsRequiresStackProbe(
MF, NumBytes + RealignmentPadding))
716 emitWindowsStackProbe(AfterGPRSavesI,
DL, NumBytes, RealignmentPadding);
722 StackOffset SVECalleeSavesSize = PPRCalleeSavesSize + ZPRCalleeSavesSize;
726 std::optional<MachineBasicBlock::iterator> ZPRCalleeSavesBegin,
727 ZPRCalleeSavesEnd, PPRCalleeSavesBegin, PPRCalleeSavesEnd;
732 if (!FPAfterSVECalleeSaves) {
735 PPRCalleeSavesBegin = AfterGPRSavesI;
736 if (PPRCalleeSavesSize) {
741 "Unexpected instruction");
743 AfterSVESavesI !=
MBB.getFirstTerminator())
746 PPRCalleeSavesEnd = ZPRCalleeSavesBegin = AfterSVESavesI;
747 if (ZPRCalleeSavesSize) {
751 "Unexpected instruction");
753 AfterSVESavesI !=
MBB.getFirstTerminator())
756 ZPRCalleeSavesEnd = AfterSVESavesI;
760 emitCalleeSavedSVELocations(AfterSVESavesI);
762 if (
AFI->hasSplitSVEObjects()) {
763 assert(!FPAfterSVECalleeSaves &&
764 "Cannot use FPAfterSVECalleeSaves with aarch64-split-sve-objects");
766 "Cannot use redzone with aarch64-split-sve-objects");
769 "WinCFI with aarch64-split-sve-objects is not supported");
773 allocateStackSpace(*PPRCalleeSavesBegin, 0, PPRCalleeSavesSize,
774 EmitAsyncCFI && !
HasFP, CFAOffset,
775 MFI.hasVarSizedObjects() || ZPRCalleeSavesSize ||
776 ZPRLocalsSize || PPRLocalsSize);
777 CFAOffset += PPRCalleeSavesSize;
780 assert(PPRCalleeSavesEnd == ZPRCalleeSavesBegin &&
781 "Expected ZPR callee saves after PPR locals");
782 allocateStackSpace(*PPRCalleeSavesEnd, RealignmentPadding,
783 PPRLocalsSize + ZPRCalleeSavesSize,
784 EmitAsyncCFI && !
HasFP, CFAOffset,
785 MFI.hasVarSizedObjects() || ZPRLocalsSize);
786 CFAOffset += PPRLocalsSize + ZPRCalleeSavesSize;
789 allocateStackSpace(*ZPRCalleeSavesEnd, RealignmentPadding,
791 EmitAsyncCFI && !
HasFP, CFAOffset,
792 MFI.hasVarSizedObjects());
797 if (!FPAfterSVECalleeSaves)
798 allocateStackSpace(AfterGPRSavesI, 0, SVECalleeSavesSize,
799 EmitAsyncCFI && !
HasFP, CFAOffset,
800 MFI.hasVarSizedObjects() || LocalsSize);
801 CFAOffset += SVECalleeSavesSize;
805 assert(!(
AFL.canUseRedZone(
MF) && NeedsRealignment) &&
806 "Cannot use redzone with stack realignment");
807 if (!
AFL.canUseRedZone(
MF)) {
811 StackOffset SVELocalsSize = PPRLocalsSize + ZPRLocalsSize;
812 allocateStackSpace(AfterSVESavesI, RealignmentPadding,
814 EmitAsyncCFI && !
HasFP, CFAOffset,
815 MFI.hasVarSizedObjects());
854 MBB.addLiveIn(AArch64::X1);
858 if (
EmitCFI && !EmitAsyncCFI) {
860 emitDefineCFAWithFP(AfterSVESavesI, FixedObject);
863 AFL.getSVEStackSize(
MF) +
870 emitCalleeSavedGPRLocations(AfterSVESavesI);
871 emitCalleeSavedSVELocations(AfterSVESavesI);
875void AArch64PrologueEmitter::emitShadowCallStackPrologue(
886 MBB.addLiveIn(AArch64::X18);
895 static const char CFIInst[] = {
896 dwarf::DW_CFA_val_expression,
899 static_cast<char>(
unsigned(dwarf::DW_OP_breg18)),
900 static_cast<char>(-8) & 0x7f,
903 .buildEscape(StringRef(CFIInst,
sizeof(CFIInst)));
907void AArch64PrologueEmitter::emitSwiftAsyncContextFramePointer(
909 switch (
MF.getTarget().Options.SwiftAsyncFramePointer) {
911 if (
Subtarget.swiftAsyncContextIsDynamicallySet()) {
953void AArch64PrologueEmitter::emitEmptyStackFramePrologue(
956 assert(!
HasFP &&
"unexpected function without stack frame but with FP");
958 "unexpected function without stack frame but with SVE objects");
960 AFI->setLocalStackSize(NumBytes);
970 if (
AFL.canUseRedZone(
MF)) {
971 AFI->setHasRedZone(
true);
972 ++NumRedZoneFunctions;
979 MCSymbol *FrameLabel =
MF.getContext().createTempSymbol();
982 .buildDefCFAOffset(NumBytes, FrameLabel);
993void AArch64PrologueEmitter::emitFramePointerSetup(
995 unsigned FixedObject) {
996 int64_t FPOffset =
AFI->getCalleeSaveBaseToFrameRecordOffset();
998 FPOffset +=
AFI->getLocalStackSize();
1000 if (
AFI->hasSwiftAsyncContext()) {
1004 const auto &
Attrs =
MF.getFunction().getAttributes();
1005 bool HaveInitialContext =
Attrs.hasAttrSomewhere(Attribute::SwiftAsync);
1006 if (HaveInitialContext)
1007 MBB.addLiveIn(AArch64::X22);
1008 Register Reg = HaveInitialContext ? AArch64::X22 : AArch64::XZR;
1046 emitDefineCFAWithFP(
MBBI, FixedObject);
1050void AArch64PrologueEmitter::emitDefineCFAWithFP(
1052 const int OffsetToFirstCalleeSaveFromFP =
1053 AFI->getCalleeSaveBaseToFrameRecordOffset() -
1054 AFI->getCalleeSavedStackSize();
1057 .buildDefCFA(
FramePtr, FixedObject - OffsetToFirstCalleeSaveFromFP);
1060void AArch64PrologueEmitter::emitWindowsStackProbe(
1062 int64_t RealignmentPadding)
const {
1063 if (
AFI->getSVECalleeSavedStackSize())
1068 unsigned X15Scratch = AArch64::NoRegister;
1070 [
this](
const MachineBasicBlock::RegisterMaskPair &LiveIn) {
1071 return RegInfo.isSuperOrSubRegisterEq(AArch64::X15,
1074 X15Scratch =
AFL.findScratchNonCalleeSaveRegister(&
MBB,
true);
1075 assert(X15Scratch != AArch64::NoRegister &&
1076 (X15Scratch < AArch64::X15 || X15Scratch > AArch64::X17));
1078 LiveRegs.removeReg(AArch64::X15);
1087 uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
1095 if (NumBytes >= (1 << 28))
1097 "unwinding purposes");
1099 uint32_t LowNumWords = NumWords & 0xFFFF;
1106 if ((NumWords & 0xFFFF0000) != 0) {
1109 .
addImm((NumWords & 0xFFFF0000) >> 16)
1121 const char *ChkStk =
Subtarget.getChkStkName();
1122 switch (
MF.getTarget().getCodeModel()) {
1186 if (RealignmentPadding > 0) {
1187 if (RealignmentPadding >= 4096) {
1190 .
addImm(RealignmentPadding)
1200 .
addImm(RealignmentPadding)
1205 uint64_t AndMask = ~(
MFI.getMaxAlign().value() - 1);
1209 AFI->setStackRealigned(
true);
1215 if (X15Scratch != AArch64::NoRegister) {
1224void AArch64PrologueEmitter::emitCalleeSavedGPRLocations(
1226 const std::vector<CalleeSavedInfo> &CSI =
MFI.getCalleeSavedInfo();
1231 for (
const auto &
Info : CSI) {
1232 unsigned FrameIdx =
Info.getFrameIdx();
1233 if (
MFI.hasScalableStackID(FrameIdx))
1236 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
1237 int64_t
Offset =
MFI.getObjectOffset(FrameIdx) -
AFL.getOffsetOfLocalArea();
1238 CFIBuilder.buildOffset(
Info.getReg(),
Offset);
1242void AArch64PrologueEmitter::emitCalleeSavedSVELocations(
1245 const std::vector<CalleeSavedInfo> &CSI =
MFI.getCalleeSavedInfo();
1251 std::optional<int64_t> IncomingVGOffsetFromDefCFA;
1252 if (
AFL.requiresSaveVG(
MF)) {
1254 reverse(CSI), [](
auto &
Info) {
return Info.getReg() == AArch64::VG; });
1255 IncomingVGOffsetFromDefCFA =
MFI.getObjectOffset(IncomingVG.getFrameIdx()) -
1256 AFL.getOffsetOfLocalArea();
1259 StackOffset PPRStackSize =
AFL.getPPRStackSize(
MF);
1260 for (
const auto &
Info : CSI) {
1261 int FI =
Info.getFrameIdx();
1262 if (!
MFI.hasScalableStackID(FI))
1267 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
1268 MCRegister
Reg =
Info.getReg();
1276 if (
AFI->hasSplitSVEObjects() &&
1280 CFIBuilder.insertCFIInst(
1286 switch (
MI.getOpcode()) {
1289 case AArch64::CATCHRET:
1290 case AArch64::CLEANUPRET:
1301 SEHEpilogueStartI =
MBB.end();
1306 if (
MBB.end() != EpilogueEndI) {
1307 DL = EpilogueEndI->getDebugLoc();
1321 int64_t ArgumentStackToRestore =
AFL.getArgumentStackToRestore(
MF,
MBB);
1322 bool IsWin64 =
Subtarget.isCallingConvWin64(
MF.getFunction().getCallingConv(),
1323 MF.getFunction().isVarArg());
1326 int64_t AfterCSRPopSize = ArgumentStackToRestore;
1327 auto PrologueSaveSize =
AFI->getCalleeSavedStackSize() + FixedObject;
1332 if (
MF.hasEHFunclets())
1333 AFI->setLocalStackSize(NumBytes - PrologueSaveSize);
1337 auto FirstHomogenousEpilogI =
MBB.getFirstTerminator();
1338 if (FirstHomogenousEpilogI !=
MBB.begin()) {
1339 auto HomogeneousEpilog = std::prev(FirstHomogenousEpilogI);
1340 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog)
1341 FirstHomogenousEpilogI = HomogeneousEpilog;
1351 assert(AfterCSRPopSize == 0);
1355 bool FPAfterSVECalleeSaves =
1356 Subtarget.isTargetWindows() &&
AFI->getSVECalleeSavedStackSize();
1358 bool CombineSPBump = shouldCombineCSRLocalStackBump(NumBytes);
1360 bool CombineAfterCSRBump =
false;
1361 if (FPAfterSVECalleeSaves) {
1362 AfterCSRPopSize += FixedObject;
1363 }
else if (!CombineSPBump && PrologueSaveSize != 0) {
1365 while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION ||
1366 AArch64InstrInfo::isSEHInstruction(*Pop))
1367 Pop = std::prev(Pop);
1374 if (
OffsetOp.getImm() == 0 && AfterCSRPopSize >= 0) {
1383 AfterCSRPopSize += PrologueSaveSize;
1384 CombineAfterCSRBump =
true;
1393 while (FirstGPRRestoreI != Begin) {
1399 }
else if (CombineSPBump)
1401 AFI->getLocalStackSize());
1411 BuildMI(
MBB, FirstGPRRestoreI, DL,
TII->get(AArch64::SEH_EpilogStart))
1413 SEHEpilogueStartI = FirstGPRRestoreI;
1414 --SEHEpilogueStartI;
1417 if (
HasFP &&
AFI->hasSwiftAsyncContext())
1418 emitSwiftAsyncContextFramePointer(EpilogueEndI, DL);
1422 StackOffset SVEStackSize = ZPRStackSize + PPRStackSize;
1425 if (CombineSPBump) {
1426 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
1440 NumBytes -= PrologueSaveSize;
1441 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
1443 if (!
AFI->hasSplitSVEObjects()) {
1446 StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize;
1448 RestoreEnd = FirstGPRRestoreI;
1449 int64_t ZPRCalleeSavedSize =
AFI->getZPRCalleeSavedStackSize();
1450 int64_t PPRCalleeSavedSize =
AFI->getPPRCalleeSavedStackSize();
1451 int64_t SVECalleeSavedSize = ZPRCalleeSavedSize + PPRCalleeSavedSize;
1453 if (SVECalleeSavedSize) {
1454 if (FPAfterSVECalleeSaves)
1455 RestoreEnd =
MBB.getFirstTerminator();
1457 RestoreBegin = std::prev(RestoreEnd);
1458 while (RestoreBegin !=
MBB.begin() &&
1464 "Unexpected instruction");
1468 DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset;
1469 DeallocateAfter = CalleeSavedSizeAsOffset;
1473 if (FPAfterSVECalleeSaves) {
1477 if (!
AFI->isStackRealigned() && !
MFI.hasVarSizedObjects()) {
1499 }
else if (SVEStackSize) {
1500 int64_t SVECalleeSavedSize =
AFI->getSVECalleeSavedStackSize();
1505 (
AFI->isStackRealigned() ||
MFI.hasVarSizedObjects()) ? AArch64::FP
1507 if (SVECalleeSavedSize && BaseForSVEDealloc == AArch64::FP) {
1508 Register CalleeSaveBase = AArch64::FP;
1509 if (int64_t CalleeSaveBaseOffset =
1510 AFI->getCalleeSaveBaseToFrameRecordOffset()) {
1515 CalleeSaveBase =
MBB.getParent()->getRegInfo().createVirtualRegister(
1516 &AArch64::GPR64RegClass);
1526 }
else if (BaseForSVEDealloc == AArch64::SP) {
1527 if (SVECalleeSavedSize) {
1535 NumBytes + PrologueSaveSize));
1553 emitCalleeSavedSVERestores(RestoreEnd);
1555 }
else if (
AFI->hasSplitSVEObjects() && SVEStackSize) {
1557 assert(!
AFI->isStackRealigned() && !
MFI.hasVarSizedObjects() &&
1558 "unexpected stack realignment or variable sized objects with split "
1559 "SVE stack objects");
1562 auto ZPRCalleeSavedSize =
1564 auto PPRCalleeSavedSize =
1566 StackOffset PPRLocalsSize = PPRStackSize - PPRCalleeSavedSize;
1567 StackOffset ZPRLocalsSize = ZPRStackSize - ZPRCalleeSavedSize;
1570 PPRRestoreEnd = FirstGPRRestoreI;
1571 if (PPRCalleeSavedSize) {
1572 PPRRestoreBegin = std::prev(PPRRestoreEnd);
1573 while (PPRRestoreBegin !=
MBB.begin() &&
1579 ZPRRestoreEnd = PPRRestoreBegin;
1580 if (ZPRCalleeSavedSize) {
1581 ZPRRestoreBegin = std::prev(ZPRRestoreEnd);
1582 while (ZPRRestoreBegin !=
MBB.begin() &&
1589 if (PPRCalleeSavedSize || ZPRCalleeSavedSize) {
1597 CFAOffset -= NonSVELocals;
1600 if (ZPRLocalsSize) {
1604 CFAOffset -= ZPRLocalsSize;
1607 if (PPRLocalsSize || ZPRCalleeSavedSize) {
1608 assert(PPRRestoreBegin == ZPRRestoreEnd &&
1609 "Expected PPR restores after ZPR");
1611 PPRLocalsSize + ZPRCalleeSavedSize,
TII,
1614 CFAOffset -= PPRLocalsSize + ZPRCalleeSavedSize;
1616 if (PPRCalleeSavedSize) {
1624 emitCalleeSavedSVERestores(ZPRRestoreEnd);
1628 bool RedZone =
AFL.canUseRedZone(
MF);
1631 if (RedZone && AfterCSRPopSize == 0)
1638 bool NoCalleeSaveRestore = PrologueSaveSize == 0;
1639 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes;
1640 if (NoCalleeSaveRestore)
1641 StackRestoreBytes += AfterCSRPopSize;
1644 MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::SP,
1651 if (NoCalleeSaveRestore || AfterCSRPopSize == 0)
1661 if (!
IsFunclet && (
MFI.hasVarSizedObjects() ||
AFI->isStackRealigned())) {
1663 MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::FP,
1666 }
else if (NumBytes)
1679 if (AfterCSRPopSize) {
1680 assert(AfterCSRPopSize > 0 &&
"attempting to reallocate arg stack that an "
1681 "interrupt may have clobbered");
1684 MBB,
MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
1691bool AArch64EpilogueEmitter::shouldCombineCSRLocalStackBump(
1703 while (LastI != Begin) {
1705 if (LastI->isTransient())
1710 switch (LastI->getOpcode()) {
1711 case AArch64::STGloop:
1712 case AArch64::STZGloop:
1714 case AArch64::STZGi:
1715 case AArch64::ST2Gi:
1716 case AArch64::STZ2Gi:
1724void AArch64EpilogueEmitter::emitSwiftAsyncContextFramePointer(
1726 switch (
MF.getTarget().Options.SwiftAsyncFramePointer) {
1754void AArch64EpilogueEmitter::emitShadowCallStackEpilogue(
1768 if (
AFI->needsAsyncDwarfUnwindInfo(
MF))
1770 .buildRestore(AArch64::X18);
1773void AArch64EpilogueEmitter::emitCalleeSavedRestores(
1775 const std::vector<CalleeSavedInfo> &CSI =
MFI.getCalleeSavedInfo();
1781 for (
const auto &
Info : CSI) {
1782 if (
SVE !=
MFI.hasScalableStackID(
Info.getFrameIdx()))
1785 MCRegister
Reg =
Info.getReg();
1789 CFIBuilder.buildRestore(
Info.getReg());
1793void AArch64EpilogueEmitter::finalizeEpilogue()
const {
1794 if (
AFI->needsShadowCallStackPrologueEpilogue(
MF)) {
1795 emitShadowCallStackEpilogue(
MBB.getFirstTerminator(), DL);
1799 emitCalleeSavedGPRRestores(
MBB.getFirstTerminator());
1800 if (
AFI->shouldSignReturnAddress(
MF)) {
1803 if (!
AFL.shouldSignReturnAddressEverywhere(
MF)) {
1805 TII->get(AArch64::PAUTH_EPILOGUE))
1812 BuildMI(
MBB,
MBB.getFirstTerminator(), DL,
TII->get(AArch64::SEH_EpilogEnd))
1814 if (!
MF.hasWinCFI())
1815 MF.setHasWinCFI(
true);
1820 MBB.erase(SEHEpilogueStartI);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file contains the declaration of the AArch64PrologueEmitter and AArch64EpilogueEmitter classes,...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Analysis containing CSE Info
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static const unsigned FramePtr
void emitEpilogue()
Emit the epilogue.
AArch64EpilogueEmitter(MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL)
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
void setStackRealigned(bool s)
void emitPrologue()
Emit the prologue.
AArch64PrologueEmitter(MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL)
const MachineFrameInfo & MFI
AArch64FunctionInfo * AFI
MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(MachineBasicBlock::iterator MBBI, const DebugLoc &DL, int CSStackSizeInc, bool EmitCFI, MachineInstr::MIFlag FrameFlag=MachineInstr::FrameSetup, int CFAOffset=0) const
bool isVGInstruction(MachineBasicBlock::iterator MBBI, const TargetLowering &TLI) const
AArch64PrologueEpilogueCommon(MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL)
const AArch64RegisterInfo & RegInfo
const AArch64FrameLowering & AFL
void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, uint64_t LocalStackSize) const
bool shouldCombineCSRLocalStackBump(uint64_t StackBumpBytes) const
const AArch64Subtarget & Subtarget
bool requiresGetVGCall() const
const TargetInstrInfo * TII
const AArch64TargetLowering * getTargetLowering() const override
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
Helper class for creating CFI instructions and inserting them into MIR.
void buildDefCFAOffset(int64_t Offset, MCSymbol *Label=nullptr) const
void insertCFIInst(const MCCFIInstruction &CFIInst) const
void buildDefCFA(MCRegister Reg, int64_t Offset) const
A set of physical registers with utility functions to track liveness when walking backward/forward th...
MachineInstrBundleIterator< MachineInstr > iterator
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
static MachineOperand CreateImm(int64_t Val)
const char * getSymbolName() const
Wrapper class representing virtual and physical registers.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
int64_t getScalable() const
Returns the scalable component of the stack.
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
StringRef - Represent a constant reference to a string, i.e.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
This is an optimization pass for GlobalISel generic memory operations.
MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg, unsigned Reg, const StackOffset &Offset, bool LastAdjustmentWasScalable=true)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
static bool matchLibcall(const TargetLowering &TLI, const MachineOperand &MO, RTLIB::Libcall LC)
static bool isPartOfSVECalleeSaves(MachineBasicBlock::iterator I)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
static bool isFuncletReturnInstr(const MachineInstr &MI)
auto reverse(ContainerTy &&C)
static void getLivePhysRegsUpTo(MachineInstr &MI, const TargetRegisterInfo &TRI, LivePhysRegs &LiveRegs)
Collect live registers from the end of MI's parent up to (including) MI in LiveRegs.
@ Always
Always set the bit.
@ Never
Never set the bit.
@ DeploymentBased
Determine whether to set the bit statically or dynamically based on the deployment target.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
static bool isPartOfPPRCalleeSaves(MachineBasicBlock::iterator I)
@ Success
The lock was released successfully.
static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, unsigned LocalStackSize)
MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg, const StackOffset &OffsetFromDefCFA, std::optional< int64_t > IncomingVGOffsetFromDefCFA)
DWARFExpression::Operation Op
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
static bool isPartOfZPRCalleeSaves(MachineBasicBlock::iterator I)
static int64_t upperBound(StackOffset Size)
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.