LLVM 22.0.0git
RISCVFrameLowering.cpp
Go to the documentation of this file.
1//===-- RISCVFrameLowering.cpp - RISC-V Frame Information -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of TargetFrameLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVFrameLowering.h"
16#include "RISCVSubtarget.h"
26#include "llvm/MC/MCDwarf.h"
27#include "llvm/Support/LEB128.h"
28
29#include <algorithm>
30
31#define DEBUG_TYPE "riscv-frame"
32
33using namespace llvm;
34
36 if (ABI == RISCVABI::ABI_ILP32E)
37 return Align(4);
38 if (ABI == RISCVABI::ABI_LP64E)
39 return Align(8);
40 return Align(16);
41}
42
46 /*LocalAreaOffset=*/0,
47 /*TransientStackAlignment=*/getABIStackAlignment(STI.getTargetABI())),
48 STI(STI) {}
49
50// The register used to hold the frame pointer.
51static constexpr MCPhysReg FPReg = RISCV::X8;
52
53// The register used to hold the stack pointer.
54static constexpr MCPhysReg SPReg = RISCV::X2;
55
56// The register used to hold the return address.
57static constexpr MCPhysReg RAReg = RISCV::X1;
58
59// LIst of CSRs that are given a fixed location by save/restore libcalls or
60// Zcmp/Xqccmp Push/Pop. The order in this table indicates the order the
61// registers are saved on the stack. Zcmp uses the reverse order of save/restore
62// and Xqccmp on the stack, but this is handled when offsets are calculated.
63static const MCPhysReg FixedCSRFIMap[] = {
64 /*ra*/ RAReg, /*s0*/ FPReg, /*s1*/ RISCV::X9,
65 /*s2*/ RISCV::X18, /*s3*/ RISCV::X19, /*s4*/ RISCV::X20,
66 /*s5*/ RISCV::X21, /*s6*/ RISCV::X22, /*s7*/ RISCV::X23,
67 /*s8*/ RISCV::X24, /*s9*/ RISCV::X25, /*s10*/ RISCV::X26,
68 /*s11*/ RISCV::X27};
69
70// The number of stack bytes allocated by `QC.C.MIENTER(.NEST)` and popped by
71// `QC.C.MILEAVERET`.
72static constexpr uint64_t QCIInterruptPushAmount = 96;
73
74static const std::pair<MCPhysReg, int8_t> FixedCSRFIQCIInterruptMap[] = {
75 /* -1 is a gap for mepc/mnepc */
76 {/*fp*/ FPReg, -2},
77 /* -3 is a gap for qc.mcause */
78 {/*ra*/ RAReg, -4},
79 /* -5 is reserved */
80 {/*t0*/ RISCV::X5, -6},
81 {/*t1*/ RISCV::X6, -7},
82 {/*t2*/ RISCV::X7, -8},
83 {/*a0*/ RISCV::X10, -9},
84 {/*a1*/ RISCV::X11, -10},
85 {/*a2*/ RISCV::X12, -11},
86 {/*a3*/ RISCV::X13, -12},
87 {/*a4*/ RISCV::X14, -13},
88 {/*a5*/ RISCV::X15, -14},
89 {/*a6*/ RISCV::X16, -15},
90 {/*a7*/ RISCV::X17, -16},
91 {/*t3*/ RISCV::X28, -17},
92 {/*t4*/ RISCV::X29, -18},
93 {/*t5*/ RISCV::X30, -19},
94 {/*t6*/ RISCV::X31, -20},
95 /* -21, -22, -23, -24 are reserved */
96};
97
98/// Returns true if DWARF CFI instructions ("frame moves") should be emitted.
99static bool needsDwarfCFI(const MachineFunction &MF) {
100 return MF.needsFrameMoves();
101}
102
103// For now we use x3, a.k.a gp, as pointer to shadow call stack.
104// User should not use x3 in their asm.
107 const DebugLoc &DL) {
108 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
109 // We check Zimop instead of (Zimop || Zcmop) to determine whether HW shadow
110 // stack is available despite the fact that sspush/sspopchk both have a
111 // compressed form, because if only Zcmop is available, we would need to
112 // reserve X5 due to c.sspopchk only takes X5 and we currently do not support
113 // using X5 as the return address register.
114 // However, we can still aggressively use c.sspush x1 if zcmop is available.
115 bool HasHWShadowStack = MF.getFunction().hasFnAttribute("hw-shadow-stack") &&
116 STI.hasStdExtZimop();
117 bool HasSWShadowStack =
118 MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
119 if (!HasHWShadowStack && !HasSWShadowStack)
120 return;
121
122 const llvm::RISCVRegisterInfo *TRI = STI.getRegisterInfo();
123
124 // Do not save RA to the SCS if it's not saved to the regular stack,
125 // i.e. RA is not at risk of being overwritten.
126 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
127 if (llvm::none_of(
128 CSI, [&](CalleeSavedInfo &CSR) { return CSR.getReg() == RAReg; }))
129 return;
130
131 const RISCVInstrInfo *TII = STI.getInstrInfo();
132 if (HasHWShadowStack) {
133 if (STI.hasStdExtZcmop()) {
134 static_assert(RAReg == RISCV::X1, "C.SSPUSH only accepts X1");
135 BuildMI(MBB, MI, DL, TII->get(RISCV::PseudoMOP_C_SSPUSH));
136 } else {
137 BuildMI(MBB, MI, DL, TII->get(RISCV::PseudoMOP_SSPUSH)).addReg(RAReg);
138 }
139 return;
140 }
141
142 Register SCSPReg = RISCVABI::getSCSPReg();
143
144 bool IsRV64 = STI.is64Bit();
145 int64_t SlotSize = STI.getXLen() / 8;
146 // Store return address to shadow call stack
147 // addi gp, gp, [4|8]
148 // s[w|d] ra, -[4|8](gp)
149 BuildMI(MBB, MI, DL, TII->get(RISCV::ADDI))
150 .addReg(SCSPReg, RegState::Define)
151 .addReg(SCSPReg)
152 .addImm(SlotSize)
154 BuildMI(MBB, MI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
155 .addReg(RAReg)
156 .addReg(SCSPReg)
157 .addImm(-SlotSize)
159
160 if (!needsDwarfCFI(MF))
161 return;
162
163 // Emit a CFI instruction that causes SlotSize to be subtracted from the value
164 // of the shadow stack pointer when unwinding past this frame.
165 char DwarfSCSReg = TRI->getDwarfRegNum(SCSPReg, /*IsEH*/ true);
166 assert(DwarfSCSReg < 32 && "SCS Register should be < 32 (X3).");
167
168 char Offset = static_cast<char>(-SlotSize) & 0x7f;
169 const char CFIInst[] = {
170 dwarf::DW_CFA_val_expression,
171 DwarfSCSReg, // register
172 2, // length
173 static_cast<char>(unsigned(dwarf::DW_OP_breg0 + DwarfSCSReg)),
174 Offset, // addend (sleb128)
175 };
176
178 .buildEscape(StringRef(CFIInst, sizeof(CFIInst)));
179}
180
183 const DebugLoc &DL) {
184 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
185 bool HasHWShadowStack = MF.getFunction().hasFnAttribute("hw-shadow-stack") &&
186 STI.hasStdExtZimop();
187 bool HasSWShadowStack =
188 MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
189 if (!HasHWShadowStack && !HasSWShadowStack)
190 return;
191
192 // See emitSCSPrologue() above.
193 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
194 if (llvm::none_of(
195 CSI, [&](CalleeSavedInfo &CSR) { return CSR.getReg() == RAReg; }))
196 return;
197
198 const RISCVInstrInfo *TII = STI.getInstrInfo();
199 if (HasHWShadowStack) {
200 BuildMI(MBB, MI, DL, TII->get(RISCV::PseudoMOP_SSPOPCHK)).addReg(RAReg);
201 return;
202 }
203
204 Register SCSPReg = RISCVABI::getSCSPReg();
205
206 bool IsRV64 = STI.is64Bit();
207 int64_t SlotSize = STI.getXLen() / 8;
208 // Load return address from shadow call stack
209 // l[w|d] ra, -[4|8](gp)
210 // addi gp, gp, -[4|8]
211 BuildMI(MBB, MI, DL, TII->get(IsRV64 ? RISCV::LD : RISCV::LW))
213 .addReg(SCSPReg)
214 .addImm(-SlotSize)
216 BuildMI(MBB, MI, DL, TII->get(RISCV::ADDI))
217 .addReg(SCSPReg, RegState::Define)
218 .addReg(SCSPReg)
219 .addImm(-SlotSize)
221 if (needsDwarfCFI(MF)) {
222 // Restore the SCS pointer
224 }
225}
226
227// Insert instruction to swap mscratchsw with sp
230 const DebugLoc &DL) {
231 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
232
233 if (!RVFI->isSiFiveStackSwapInterrupt(MF))
234 return;
235
236 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
237 const RISCVInstrInfo *TII = STI.getInstrInfo();
238
239 assert(STI.hasVendorXSfmclic() && "Stack Swapping Requires XSfmclic");
240
241 BuildMI(MBB, MBBI, DL, TII->get(RISCV::CSRRW))
243 .addImm(RISCVSysReg::sf_mscratchcsw)
246
247 // FIXME: CFI Information for this swap.
248}
249
250static void
253 if (!RVFI.isSiFivePreemptibleInterrupt(MF))
254 return;
255
256 const TargetRegisterClass &RC = RISCV::GPRRegClass;
257 const TargetRegisterInfo &TRI =
258 *MF.getSubtarget<RISCVSubtarget>().getRegisterInfo();
259 MachineFrameInfo &MFI = MF.getFrameInfo();
260
261 // Create two frame objects for spilling X8 and X9, which will be done in
262 // `emitSiFiveCLICPreemptibleSaves`. This is in addition to any other stack
263 // objects we might have for X8 and X9, as they might be saved twice.
264 for (int I = 0; I < 2; ++I) {
265 int FI = MFI.CreateStackObject(TRI.getSpillSize(RC), TRI.getSpillAlign(RC),
266 true);
268 }
269}
270
274 const DebugLoc &DL) {
275 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
276
277 if (!RVFI->isSiFivePreemptibleInterrupt(MF))
278 return;
279
280 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
281 const RISCVInstrInfo *TII = STI.getInstrInfo();
282
283 // FIXME: CFI Information here is nonexistent/wrong.
284
285 // X8 and X9 might be stored into the stack twice, initially into the
286 // `interruptCSRFrameIndex` here, and then maybe again into their CSI frame
287 // index.
288 //
289 // This is done instead of telling the register allocator that we need two
290 // VRegs to store the value of `mcause` and `mepc` through the instruction,
291 // which affects other passes.
292 TII->storeRegToStackSlot(MBB, MBBI, RISCV::X8, /* IsKill=*/true,
293 RVFI->getInterruptCSRFrameIndex(0),
294 &RISCV::GPRRegClass, STI.getRegisterInfo(),
296 TII->storeRegToStackSlot(MBB, MBBI, RISCV::X9, /* IsKill=*/true,
297 RVFI->getInterruptCSRFrameIndex(1),
298 &RISCV::GPRRegClass, STI.getRegisterInfo(),
300
301 // Put `mcause` into X8 (s0), and `mepc` into X9 (s1). If either of these are
302 // used in the function, then they will appear in `getUnmanagedCSI` and will
303 // be saved again.
304 BuildMI(MBB, MBBI, DL, TII->get(RISCV::CSRRS))
305 .addReg(RISCV::X8, RegState::Define)
306 .addImm(RISCVSysReg::mcause)
307 .addReg(RISCV::X0)
309 BuildMI(MBB, MBBI, DL, TII->get(RISCV::CSRRS))
310 .addReg(RISCV::X9, RegState::Define)
311 .addImm(RISCVSysReg::mepc)
312 .addReg(RISCV::X0)
314
315 // Enable interrupts.
316 BuildMI(MBB, MBBI, DL, TII->get(RISCV::CSRRSI))
317 .addReg(RISCV::X0, RegState::Define)
318 .addImm(RISCVSysReg::mstatus)
319 .addImm(8)
321}
322
326 const DebugLoc &DL) {
327 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
328
329 if (!RVFI->isSiFivePreemptibleInterrupt(MF))
330 return;
331
332 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
333 const RISCVInstrInfo *TII = STI.getInstrInfo();
334
335 // FIXME: CFI Information here is nonexistent/wrong.
336
337 // Disable interrupts.
338 BuildMI(MBB, MBBI, DL, TII->get(RISCV::CSRRCI))
339 .addReg(RISCV::X0, RegState::Define)
340 .addImm(RISCVSysReg::mstatus)
341 .addImm(8)
343
344 // Restore `mepc` from x9 (s1), and `mcause` from x8 (s0). If either were used
345 // in the function, they have already been restored once, so now have the
346 // value stored in `emitSiFiveCLICPreemptibleSaves`.
347 BuildMI(MBB, MBBI, DL, TII->get(RISCV::CSRRW))
348 .addReg(RISCV::X0, RegState::Define)
349 .addImm(RISCVSysReg::mepc)
350 .addReg(RISCV::X9, RegState::Kill)
352 BuildMI(MBB, MBBI, DL, TII->get(RISCV::CSRRW))
353 .addReg(RISCV::X0, RegState::Define)
354 .addImm(RISCVSysReg::mcause)
355 .addReg(RISCV::X8, RegState::Kill)
357
358 // X8 and X9 need to be restored to their values on function entry, which we
359 // saved onto the stack in `emitSiFiveCLICPreemptibleSaves`.
360 TII->loadRegFromStackSlot(MBB, MBBI, RISCV::X9,
361 RVFI->getInterruptCSRFrameIndex(1),
362 &RISCV::GPRRegClass, STI.getRegisterInfo(),
364 TII->loadRegFromStackSlot(MBB, MBBI, RISCV::X8,
365 RVFI->getInterruptCSRFrameIndex(0),
366 &RISCV::GPRRegClass, STI.getRegisterInfo(),
368}
369
370// Get the ID of the libcall used for spilling and restoring callee saved
371// registers. The ID is representative of the number of registers saved or
372// restored by the libcall, except it is zero-indexed - ID 0 corresponds to a
373// single register.
374static int getLibCallID(const MachineFunction &MF,
375 const std::vector<CalleeSavedInfo> &CSI) {
376 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
377
378 if (CSI.empty() || !RVFI->useSaveRestoreLibCalls(MF))
379 return -1;
380
381 MCRegister MaxReg;
382 for (auto &CS : CSI)
383 // assignCalleeSavedSpillSlots assigns negative frame indexes to
384 // registers which can be saved by libcall.
385 if (CS.getFrameIdx() < 0)
386 MaxReg = std::max(MaxReg.id(), CS.getReg().id());
387
388 if (!MaxReg)
389 return -1;
390
391 switch (MaxReg.id()) {
392 default:
393 llvm_unreachable("Something has gone wrong!");
394 // clang-format off
395 case /*s11*/ RISCV::X27: return 12;
396 case /*s10*/ RISCV::X26: return 11;
397 case /*s9*/ RISCV::X25: return 10;
398 case /*s8*/ RISCV::X24: return 9;
399 case /*s7*/ RISCV::X23: return 8;
400 case /*s6*/ RISCV::X22: return 7;
401 case /*s5*/ RISCV::X21: return 6;
402 case /*s4*/ RISCV::X20: return 5;
403 case /*s3*/ RISCV::X19: return 4;
404 case /*s2*/ RISCV::X18: return 3;
405 case /*s1*/ RISCV::X9: return 2;
406 case /*s0*/ FPReg: return 1;
407 case /*ra*/ RAReg: return 0;
408 // clang-format on
409 }
410}
411
412// Get the name of the libcall used for spilling callee saved registers.
413// If this function will not use save/restore libcalls, then return a nullptr.
414static const char *
416 const std::vector<CalleeSavedInfo> &CSI) {
417 static const char *const SpillLibCalls[] = {
418 "__riscv_save_0",
419 "__riscv_save_1",
420 "__riscv_save_2",
421 "__riscv_save_3",
422 "__riscv_save_4",
423 "__riscv_save_5",
424 "__riscv_save_6",
425 "__riscv_save_7",
426 "__riscv_save_8",
427 "__riscv_save_9",
428 "__riscv_save_10",
429 "__riscv_save_11",
430 "__riscv_save_12"
431 };
432
433 int LibCallID = getLibCallID(MF, CSI);
434 if (LibCallID == -1)
435 return nullptr;
436 return SpillLibCalls[LibCallID];
437}
438
439// Get the name of the libcall used for restoring callee saved registers.
440// If this function will not use save/restore libcalls, then return a nullptr.
441static const char *
443 const std::vector<CalleeSavedInfo> &CSI) {
444 static const char *const RestoreLibCalls[] = {
445 "__riscv_restore_0",
446 "__riscv_restore_1",
447 "__riscv_restore_2",
448 "__riscv_restore_3",
449 "__riscv_restore_4",
450 "__riscv_restore_5",
451 "__riscv_restore_6",
452 "__riscv_restore_7",
453 "__riscv_restore_8",
454 "__riscv_restore_9",
455 "__riscv_restore_10",
456 "__riscv_restore_11",
457 "__riscv_restore_12"
458 };
459
460 int LibCallID = getLibCallID(MF, CSI);
461 if (LibCallID == -1)
462 return nullptr;
463 return RestoreLibCalls[LibCallID];
464}
465
466// Get the max reg of Push/Pop for restoring callee saved registers.
467static unsigned getNumPushPopRegs(const std::vector<CalleeSavedInfo> &CSI) {
468 unsigned NumPushPopRegs = 0;
469 for (auto &CS : CSI) {
470 auto *FII = llvm::find_if(FixedCSRFIMap,
471 [&](MCPhysReg P) { return P == CS.getReg(); });
472 if (FII != std::end(FixedCSRFIMap)) {
473 unsigned RegNum = std::distance(std::begin(FixedCSRFIMap), FII);
474 NumPushPopRegs = std::max(NumPushPopRegs, RegNum + 1);
475 }
476 }
477 assert(NumPushPopRegs != 12 && "x26 requires x27 to also be pushed");
478 return NumPushPopRegs;
479}
480
481// Return true if the specified function should have a dedicated frame
482// pointer register. This is true if frame pointer elimination is
483// disabled, if it needs dynamic stack realignment, if the function has
484// variable sized allocas, or if the frame address is taken.
486 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
487
488 const MachineFrameInfo &MFI = MF.getFrameInfo();
489 return MF.getTarget().Options.DisableFramePointerElim(MF) ||
490 RegInfo->hasStackRealignment(MF) || MFI.hasVarSizedObjects() ||
492}
493
495 const MachineFrameInfo &MFI = MF.getFrameInfo();
496 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
497
498 // If we do not reserve stack space for outgoing arguments in prologue,
499 // we will adjust the stack pointer before call instruction. After the
500 // adjustment, we can not use SP to access the stack objects for the
501 // arguments. Instead, use BP to access these stack objects.
502 return (MFI.hasVarSizedObjects() ||
504 MFI.getMaxCallFrameSize() != 0))) &&
505 TRI->hasStackRealignment(MF);
506}
507
508// Determines the size of the frame and maximum call frame size.
509void RISCVFrameLowering::determineFrameLayout(MachineFunction &MF) const {
510 MachineFrameInfo &MFI = MF.getFrameInfo();
511 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
512
513 // Get the number of bytes to allocate from the FrameInfo.
514 uint64_t FrameSize = MFI.getStackSize();
515
516 // QCI Interrupts use at least 96 bytes of stack space
517 if (RVFI->useQCIInterrupt(MF))
518 FrameSize = std::max(FrameSize, QCIInterruptPushAmount);
519
520 // Get the alignment.
521 Align StackAlign = getStackAlign();
522
523 // Make sure the frame is aligned.
524 FrameSize = alignTo(FrameSize, StackAlign);
525
526 // Update frame info.
527 MFI.setStackSize(FrameSize);
528
529 // When using SP or BP to access stack objects, we may require extra padding
530 // to ensure the bottom of the RVV stack is correctly aligned within the main
531 // stack. We calculate this as the amount required to align the scalar local
532 // variable section up to the RVV alignment.
534 if (RVFI->getRVVStackSize() && (!hasFP(MF) || TRI->hasStackRealignment(MF))) {
535 int ScalarLocalVarSize = FrameSize - RVFI->getCalleeSavedStackSize() -
536 RVFI->getVarArgsSaveSize();
537 if (auto RVVPadding =
538 offsetToAlignment(ScalarLocalVarSize, RVFI->getRVVStackAlign()))
539 RVFI->setRVVPadding(RVVPadding);
540 }
541}
542
543// Returns the stack size including RVV padding (when required), rounded back
544// up to the required stack alignment.
546 const MachineFunction &MF) const {
547 const MachineFrameInfo &MFI = MF.getFrameInfo();
548 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
549 return alignTo(MFI.getStackSize() + RVFI->getRVVPadding(), getStackAlign());
550}
551
554 const std::vector<CalleeSavedInfo> &CSI) {
555 const MachineFrameInfo &MFI = MF.getFrameInfo();
557
558 for (auto &CS : CSI) {
559 int FI = CS.getFrameIdx();
560 if (FI >= 0 && MFI.getStackID(FI) == TargetStackID::Default)
561 NonLibcallCSI.push_back(CS);
562 }
563
564 return NonLibcallCSI;
565}
566
569 const std::vector<CalleeSavedInfo> &CSI) {
570 const MachineFrameInfo &MFI = MF.getFrameInfo();
572
573 for (auto &CS : CSI) {
574 int FI = CS.getFrameIdx();
575 if (FI >= 0 && MFI.getStackID(FI) == TargetStackID::ScalableVector)
576 RVVCSI.push_back(CS);
577 }
578
579 return RVVCSI;
580}
581
584 const std::vector<CalleeSavedInfo> &CSI) {
585 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
586
587 SmallVector<CalleeSavedInfo, 8> PushOrLibCallsCSI;
588 if (!RVFI->useSaveRestoreLibCalls(MF) && !RVFI->isPushable(MF))
589 return PushOrLibCallsCSI;
590
591 for (const auto &CS : CSI) {
592 if (RVFI->useQCIInterrupt(MF)) {
593 // Some registers are saved by both `QC.C.MIENTER(.NEST)` and
594 // `QC.CM.PUSH(FP)`. In these cases, prioritise the CFI info that points
595 // to the versions saved by `QC.C.MIENTER(.NEST)` which is what FP
596 // unwinding would use.
598 CS.getReg()))
599 continue;
600 }
601
602 if (llvm::is_contained(FixedCSRFIMap, CS.getReg()))
603 PushOrLibCallsCSI.push_back(CS);
604 }
605
606 return PushOrLibCallsCSI;
607}
608
611 const std::vector<CalleeSavedInfo> &CSI) {
612 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
613
614 SmallVector<CalleeSavedInfo, 8> QCIInterruptCSI;
615 if (!RVFI->useQCIInterrupt(MF))
616 return QCIInterruptCSI;
617
618 for (const auto &CS : CSI) {
620 CS.getReg()))
621 QCIInterruptCSI.push_back(CS);
622 }
623
624 return QCIInterruptCSI;
625}
626
627void RISCVFrameLowering::allocateAndProbeStackForRVV(
629 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, int64_t Amount,
630 MachineInstr::MIFlag Flag, bool EmitCFI, bool DynAllocation) const {
631 assert(Amount != 0 && "Did not need to adjust stack pointer for RVV.");
632
633 // Emit a variable-length allocation probing loop.
634
635 // Get VLEN in TargetReg
636 const RISCVInstrInfo *TII = STI.getInstrInfo();
637 Register TargetReg = RISCV::X6;
638 uint32_t NumOfVReg = Amount / RISCV::RVVBytesPerBlock;
639 BuildMI(MBB, MBBI, DL, TII->get(RISCV::PseudoReadVLENB), TargetReg)
640 .setMIFlag(Flag);
641 TII->mulImm(MF, MBB, MBBI, DL, TargetReg, NumOfVReg, Flag);
642
643 CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup);
644 if (EmitCFI) {
645 // Set the CFA register to TargetReg.
646 CFIBuilder.buildDefCFA(TargetReg, -Amount);
647 }
648
649 // It will be expanded to a probe loop in `inlineStackProbe`.
650 BuildMI(MBB, MBBI, DL, TII->get(RISCV::PROBED_STACKALLOC_RVV))
651 .addReg(TargetReg);
652
653 if (EmitCFI) {
654 // Set the CFA register back to SP.
655 CFIBuilder.buildDefCFARegister(SPReg);
656 }
657
658 // SUB SP, SP, T1
659 BuildMI(MBB, MBBI, DL, TII->get(RISCV::SUB), SPReg)
660 .addReg(SPReg)
661 .addReg(TargetReg)
662 .setMIFlag(Flag);
663
664 // If we have a dynamic allocation later we need to probe any residuals.
665 if (DynAllocation) {
666 BuildMI(MBB, MBBI, DL, TII->get(STI.is64Bit() ? RISCV::SD : RISCV::SW))
667 .addReg(RISCV::X0)
668 .addReg(SPReg)
669 .addImm(0)
671 }
672}
673
676 int FixedOffset, int ScalableOffset,
677 llvm::raw_string_ostream &Comment) {
678 unsigned DwarfVLenB = TRI.getDwarfRegNum(RISCV::VLENB, true);
679 uint8_t Buffer[16];
680 if (FixedOffset) {
681 Expr.push_back(dwarf::DW_OP_consts);
682 Expr.append(Buffer, Buffer + encodeSLEB128(FixedOffset, Buffer));
683 Expr.push_back((uint8_t)dwarf::DW_OP_plus);
684 Comment << (FixedOffset < 0 ? " - " : " + ") << std::abs(FixedOffset);
685 }
686
687 Expr.push_back((uint8_t)dwarf::DW_OP_consts);
688 Expr.append(Buffer, Buffer + encodeSLEB128(ScalableOffset, Buffer));
689
690 Expr.push_back((uint8_t)dwarf::DW_OP_bregx);
691 Expr.append(Buffer, Buffer + encodeULEB128(DwarfVLenB, Buffer));
692 Expr.push_back(0);
693
694 Expr.push_back((uint8_t)dwarf::DW_OP_mul);
695 Expr.push_back((uint8_t)dwarf::DW_OP_plus);
696
697 Comment << (ScalableOffset < 0 ? " - " : " + ") << std::abs(ScalableOffset)
698 << " * vlenb";
699}
700
703 uint64_t FixedOffset,
704 uint64_t ScalableOffset) {
705 assert(ScalableOffset != 0 && "Did not need to adjust CFA for RVV");
706 SmallString<64> Expr;
707 std::string CommentBuffer;
708 llvm::raw_string_ostream Comment(CommentBuffer);
709 // Build up the expression (Reg + FixedOffset + ScalableOffset * VLENB).
710 unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
711 Expr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfReg));
712 Expr.push_back(0);
713 if (Reg == SPReg)
714 Comment << "sp";
715 else
716 Comment << printReg(Reg, &TRI);
717
718 appendScalableVectorExpression(TRI, Expr, FixedOffset, ScalableOffset,
719 Comment);
720
721 SmallString<64> DefCfaExpr;
722 uint8_t Buffer[16];
723 DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);
724 DefCfaExpr.append(Buffer, Buffer + encodeULEB128(Expr.size(), Buffer));
725 DefCfaExpr.append(Expr.str());
726
727 return MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str(), SMLoc(),
728 Comment.str());
729}
730
732 Register Reg, uint64_t FixedOffset,
733 uint64_t ScalableOffset) {
734 assert(ScalableOffset != 0 && "Did not need to adjust CFA for RVV");
735 SmallString<64> Expr;
736 std::string CommentBuffer;
737 llvm::raw_string_ostream Comment(CommentBuffer);
738 Comment << printReg(Reg, &TRI) << " @ cfa";
739
740 // Build up the expression (FixedOffset + ScalableOffset * VLENB).
741 appendScalableVectorExpression(TRI, Expr, FixedOffset, ScalableOffset,
742 Comment);
743
744 SmallString<64> DefCfaExpr;
745 uint8_t Buffer[16];
746 unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
747 DefCfaExpr.push_back(dwarf::DW_CFA_expression);
748 DefCfaExpr.append(Buffer, Buffer + encodeULEB128(DwarfReg, Buffer));
749 DefCfaExpr.append(Buffer, Buffer + encodeULEB128(Expr.size(), Buffer));
750 DefCfaExpr.append(Expr.str());
751
752 return MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str(), SMLoc(),
753 Comment.str());
754}
755
756// Allocate stack space and probe it if necessary.
760 uint64_t RealStackSize, bool EmitCFI,
761 bool NeedProbe, uint64_t ProbeSize,
762 bool DynAllocation,
763 MachineInstr::MIFlag Flag) const {
764 DebugLoc DL;
765 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
766 const RISCVInstrInfo *TII = STI.getInstrInfo();
767 bool IsRV64 = STI.is64Bit();
769
770 // Simply allocate the stack if it's not big enough to require a probe.
771 if (!NeedProbe || Offset <= ProbeSize) {
773 Flag, getStackAlign());
774
775 if (EmitCFI)
776 CFIBuilder.buildDefCFAOffset(RealStackSize);
777
778 if (NeedProbe && DynAllocation) {
779 // s[d|w] zero, 0(sp)
780 BuildMI(MBB, MBBI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
781 .addReg(RISCV::X0)
782 .addReg(SPReg)
783 .addImm(0)
784 .setMIFlags(Flag);
785 }
786
787 return;
788 }
789
790 // Unroll the probe loop depending on the number of iterations.
791 if (Offset < ProbeSize * 5) {
792 uint64_t CurrentOffset = 0;
793 while (CurrentOffset + ProbeSize <= Offset) {
794 RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg,
795 StackOffset::getFixed(-ProbeSize), Flag, getStackAlign());
796 // s[d|w] zero, 0(sp)
797 BuildMI(MBB, MBBI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
798 .addReg(RISCV::X0)
799 .addReg(SPReg)
800 .addImm(0)
801 .setMIFlags(Flag);
802
803 CurrentOffset += ProbeSize;
804 if (EmitCFI)
805 CFIBuilder.buildDefCFAOffset(CurrentOffset);
806 }
807
808 uint64_t Residual = Offset - CurrentOffset;
809 if (Residual) {
810 RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg,
811 StackOffset::getFixed(-Residual), Flag, getStackAlign());
812 if (EmitCFI)
813 CFIBuilder.buildDefCFAOffset(Offset);
814
815 if (DynAllocation) {
816 // s[d|w] zero, 0(sp)
817 BuildMI(MBB, MBBI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
818 .addReg(RISCV::X0)
819 .addReg(SPReg)
820 .addImm(0)
821 .setMIFlags(Flag);
822 }
823 }
824
825 return;
826 }
827
828 // Emit a variable-length allocation probing loop.
829 uint64_t RoundedSize = alignDown(Offset, ProbeSize);
830 uint64_t Residual = Offset - RoundedSize;
831
832 Register TargetReg = RISCV::X6;
833 // SUB TargetReg, SP, RoundedSize
834 RI->adjustReg(MBB, MBBI, DL, TargetReg, SPReg,
835 StackOffset::getFixed(-RoundedSize), Flag, getStackAlign());
836
837 if (EmitCFI) {
838 // Set the CFA register to TargetReg.
839 CFIBuilder.buildDefCFA(TargetReg, RoundedSize);
840 }
841
842 // It will be expanded to a probe loop in `inlineStackProbe`.
843 BuildMI(MBB, MBBI, DL, TII->get(RISCV::PROBED_STACKALLOC)).addReg(TargetReg);
844
845 if (EmitCFI) {
846 // Set the CFA register back to SP.
847 CFIBuilder.buildDefCFARegister(SPReg);
848 }
849
850 if (Residual) {
852 Flag, getStackAlign());
853 if (DynAllocation) {
854 // s[d|w] zero, 0(sp)
855 BuildMI(MBB, MBBI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
856 .addReg(RISCV::X0)
857 .addReg(SPReg)
858 .addImm(0)
859 .setMIFlags(Flag);
860 }
861 }
862
863 if (EmitCFI)
864 CFIBuilder.buildDefCFAOffset(Offset);
865}
866
867static bool isPush(unsigned Opcode) {
868 switch (Opcode) {
869 case RISCV::CM_PUSH:
870 case RISCV::QC_CM_PUSH:
871 case RISCV::QC_CM_PUSHFP:
872 return true;
873 default:
874 return false;
875 }
876}
877
878static bool isPop(unsigned Opcode) {
879 // There are other pops but these are the only ones introduced during this
880 // pass.
881 switch (Opcode) {
882 case RISCV::CM_POP:
883 case RISCV::QC_CM_POP:
884 return true;
885 default:
886 return false;
887 }
888}
889
891 bool UpdateFP) {
892 switch (Kind) {
894 return RISCV::CM_PUSH;
896 return UpdateFP ? RISCV::QC_CM_PUSHFP : RISCV::QC_CM_PUSH;
897 default:
898 llvm_unreachable("Unhandled PushPopKind");
899 }
900}
901
903 // There are other pops but they are introduced later by the Push/Pop
904 // Optimizer.
905 switch (Kind) {
907 return RISCV::CM_POP;
909 return RISCV::QC_CM_POP;
910 default:
911 llvm_unreachable("Unhandled PushPopKind");
912 }
913}
914
916 MachineBasicBlock &MBB) const {
917 MachineFrameInfo &MFI = MF.getFrameInfo();
918 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
919 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
921
923
924 // Debug location must be unknown since the first debug location is used
925 // to determine the end of the prologue.
926 DebugLoc DL;
927
928 // All calls are tail calls in GHC calling conv, and functions have no
929 // prologue/epilogue.
931 return;
932
933 // SiFive CLIC needs to swap `sp` into `sf.mscratchcsw`
935
936 // Emit prologue for shadow call stack.
937 emitSCSPrologue(MF, MBB, MBBI, DL);
938
939 // We keep track of the first instruction because it might be a
940 // `(QC.)CM.PUSH(FP)`, and we may need to adjust the immediate rather than
941 // inserting an `addi sp, sp, -N*16`
942 auto PossiblePush = MBBI;
943
944 // Skip past all callee-saved register spill instructions.
945 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
946 ++MBBI;
947
948 // Determine the correct frame layout
949 determineFrameLayout(MF);
950
951 const auto &CSI = MFI.getCalleeSavedInfo();
952
953 // Skip to before the spills of scalar callee-saved registers
954 // FIXME: assumes exactly one instruction is used to restore each
955 // callee-saved register.
956 MBBI = std::prev(MBBI, getRVVCalleeSavedInfo(MF, CSI).size() +
957 getUnmanagedCSI(MF, CSI).size());
959 bool NeedsDwarfCFI = needsDwarfCFI(MF);
960
961 // If libcalls are used to spill and restore callee-saved registers, the frame
962 // has two sections; the opaque section managed by the libcalls, and the
963 // section managed by MachineFrameInfo which can also hold callee saved
964 // registers in fixed stack slots, both of which have negative frame indices.
965 // This gets even more complicated when incoming arguments are passed via the
966 // stack, as these too have negative frame indices. An example is detailed
967 // below:
968 //
969 // | incoming arg | <- FI[-3]
970 // | libcallspill |
971 // | calleespill | <- FI[-2]
972 // | calleespill | <- FI[-1]
973 // | this_frame | <- FI[0]
974 //
975 // For negative frame indices, the offset from the frame pointer will differ
976 // depending on which of these groups the frame index applies to.
977 // The following calculates the correct offset knowing the number of callee
978 // saved registers spilt by the two methods.
979 if (int LibCallRegs = getLibCallID(MF, MFI.getCalleeSavedInfo()) + 1) {
980 // Calculate the size of the frame managed by the libcall. The stack
981 // alignment of these libcalls should be the same as how we set it in
982 // getABIStackAlignment.
983 unsigned LibCallFrameSize =
984 alignTo((STI.getXLen() / 8) * LibCallRegs, getStackAlign());
985 RVFI->setLibCallStackSize(LibCallFrameSize);
986
987 if (NeedsDwarfCFI) {
988 CFIBuilder.buildDefCFAOffset(LibCallFrameSize);
989 for (const CalleeSavedInfo &CS : getPushOrLibCallsSavedInfo(MF, CSI))
990 CFIBuilder.buildOffset(CS.getReg(),
991 MFI.getObjectOffset(CS.getFrameIdx()));
992 }
993 }
994
995 // FIXME (note copied from Lanai): This appears to be overallocating. Needs
996 // investigation. Get the number of bytes to allocate from the FrameInfo.
997 uint64_t RealStackSize = getStackSizeWithRVVPadding(MF);
998 uint64_t StackSize = RealStackSize - RVFI->getReservedSpillsSize();
999 uint64_t RVVStackSize = RVFI->getRVVStackSize();
1000
1001 // Early exit if there is no need to allocate on the stack
1002 if (RealStackSize == 0 && !MFI.adjustsStack() && RVVStackSize == 0)
1003 return;
1004
1005 // If the stack pointer has been marked as reserved, then produce an error if
1006 // the frame requires stack allocation
1007 if (STI.isRegisterReservedByUser(SPReg))
1009 MF.getFunction(), "Stack pointer required, but has been reserved."});
1010
1011 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1012 // Split the SP adjustment to reduce the offsets of callee saved spill.
1013 if (FirstSPAdjustAmount) {
1014 StackSize = FirstSPAdjustAmount;
1015 RealStackSize = FirstSPAdjustAmount;
1016 }
1017
1018 if (RVFI->useQCIInterrupt(MF)) {
1019 // The function starts with `QC.C.MIENTER(.NEST)`, so the `(QC.)CM.PUSH(FP)`
1020 // could only be the next instruction.
1021 ++PossiblePush;
1022
1023 if (NeedsDwarfCFI) {
1024 // Insert the CFI metadata before where we think the `(QC.)CM.PUSH(FP)`
1025 // could be. The PUSH will also get its own CFI metadata for its own
1026 // modifications, which should come after the PUSH.
1027 CFIInstBuilder PushCFIBuilder(MBB, PossiblePush,
1030 for (const CalleeSavedInfo &CS : getQCISavedInfo(MF, CSI))
1031 PushCFIBuilder.buildOffset(CS.getReg(),
1032 MFI.getObjectOffset(CS.getFrameIdx()));
1033 }
1034 }
1035
1036 if (RVFI->isPushable(MF) && PossiblePush != MBB.end() &&
1037 isPush(PossiblePush->getOpcode())) {
1038 // Use available stack adjustment in push instruction to allocate additional
1039 // stack space. Align the stack size down to a multiple of 16. This is
1040 // needed for RVE.
1041 // FIXME: Can we increase the stack size to a multiple of 16 instead?
1042 uint64_t StackAdj =
1043 std::min(alignDown(StackSize, 16), static_cast<uint64_t>(48));
1044 PossiblePush->getOperand(1).setImm(StackAdj);
1045 StackSize -= StackAdj;
1046
1047 if (NeedsDwarfCFI) {
1048 CFIBuilder.buildDefCFAOffset(RealStackSize - StackSize);
1049 for (const CalleeSavedInfo &CS : getPushOrLibCallsSavedInfo(MF, CSI))
1050 CFIBuilder.buildOffset(CS.getReg(),
1051 MFI.getObjectOffset(CS.getFrameIdx()));
1052 }
1053 }
1054
1055 // Allocate space on the stack if necessary.
1056 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
1057 const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
1058 bool NeedProbe = TLI->hasInlineStackProbe(MF);
1059 uint64_t ProbeSize = TLI->getStackProbeSize(MF, getStackAlign());
1060 bool DynAllocation =
1061 MF.getInfo<RISCVMachineFunctionInfo>()->hasDynamicAllocation();
1062 if (StackSize != 0)
1063 allocateStack(MBB, MBBI, MF, StackSize, RealStackSize, NeedsDwarfCFI,
1064 NeedProbe, ProbeSize, DynAllocation,
1066
1067 // Save SiFive CLIC CSRs into Stack
1069
1070 // The frame pointer is callee-saved, and code has been generated for us to
1071 // save it to the stack. We need to skip over the storing of callee-saved
1072 // registers as the frame pointer must be modified after it has been saved
1073 // to the stack, not before.
1074 // FIXME: assumes exactly one instruction is used to save each callee-saved
1075 // register.
1076 std::advance(MBBI, getUnmanagedCSI(MF, CSI).size());
1077 CFIBuilder.setInsertPoint(MBBI);
1078
1079 // Iterate over list of callee-saved registers and emit .cfi_offset
1080 // directives.
1081 if (NeedsDwarfCFI)
1082 for (const CalleeSavedInfo &CS : getUnmanagedCSI(MF, CSI))
1083 CFIBuilder.buildOffset(CS.getReg(),
1084 MFI.getObjectOffset(CS.getFrameIdx()));
1085
1086 // Generate new FP.
1087 if (hasFP(MF)) {
1088 if (STI.isRegisterReservedByUser(FPReg))
1090 MF.getFunction(), "Frame pointer required, but has been reserved."});
1091 // The frame pointer does need to be reserved from register allocation.
1092 assert(MF.getRegInfo().isReserved(FPReg) && "FP not reserved");
1093
1094 // Some stack management variants automatically keep FP updated, so we don't
1095 // need an instruction to do so.
1096 if (!RVFI->hasImplicitFPUpdates(MF)) {
1097 RI->adjustReg(
1098 MBB, MBBI, DL, FPReg, SPReg,
1099 StackOffset::getFixed(RealStackSize - RVFI->getVarArgsSaveSize()),
1101 }
1102
1103 if (NeedsDwarfCFI)
1104 CFIBuilder.buildDefCFA(FPReg, RVFI->getVarArgsSaveSize());
1105 }
1106
1107 uint64_t SecondSPAdjustAmount = 0;
1108 // Emit the second SP adjustment after saving callee saved registers.
1109 if (FirstSPAdjustAmount) {
1110 SecondSPAdjustAmount = getStackSizeWithRVVPadding(MF) - FirstSPAdjustAmount;
1111 assert(SecondSPAdjustAmount > 0 &&
1112 "SecondSPAdjustAmount should be greater than zero");
1113
1114 allocateStack(MBB, MBBI, MF, SecondSPAdjustAmount,
1115 getStackSizeWithRVVPadding(MF), NeedsDwarfCFI && !hasFP(MF),
1116 NeedProbe, ProbeSize, DynAllocation,
1118 }
1119
1120 if (RVVStackSize) {
1121 if (NeedProbe) {
1122 allocateAndProbeStackForRVV(MF, MBB, MBBI, DL, RVVStackSize,
1124 NeedsDwarfCFI && !hasFP(MF), DynAllocation);
1125 } else {
1126 // We must keep the stack pointer aligned through any intermediate
1127 // updates.
1128 RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg,
1129 StackOffset::getScalable(-RVVStackSize),
1131 }
1132
1133 if (NeedsDwarfCFI && !hasFP(MF)) {
1134 // Emit .cfi_def_cfa_expression "sp + StackSize + RVVStackSize * vlenb".
1136 *RI, SPReg, getStackSizeWithRVVPadding(MF), RVVStackSize / 8));
1137 }
1138
1139 std::advance(MBBI, getRVVCalleeSavedInfo(MF, CSI).size());
1140 if (NeedsDwarfCFI)
1141 emitCalleeSavedRVVPrologCFI(MBB, MBBI, hasFP(MF));
1142 }
1143
1144 if (hasFP(MF)) {
1145 // Realign Stack
1146 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
1147 if (RI->hasStackRealignment(MF)) {
1148 Align MaxAlignment = MFI.getMaxAlign();
1149
1150 const RISCVInstrInfo *TII = STI.getInstrInfo();
1151 if (isInt<12>(-(int)MaxAlignment.value())) {
1152 BuildMI(MBB, MBBI, DL, TII->get(RISCV::ANDI), SPReg)
1153 .addReg(SPReg)
1154 .addImm(-(int)MaxAlignment.value())
1156 } else {
1157 unsigned ShiftAmount = Log2(MaxAlignment);
1158 Register VR =
1159 MF.getRegInfo().createVirtualRegister(&RISCV::GPRRegClass);
1160 BuildMI(MBB, MBBI, DL, TII->get(RISCV::SRLI), VR)
1161 .addReg(SPReg)
1162 .addImm(ShiftAmount)
1164 BuildMI(MBB, MBBI, DL, TII->get(RISCV::SLLI), SPReg)
1165 .addReg(VR)
1166 .addImm(ShiftAmount)
1168 }
1169 if (NeedProbe && RVVStackSize == 0) {
1170 // Do a probe if the align + size allocated just passed the probe size
1171 // and was not yet probed.
1172 if (SecondSPAdjustAmount < ProbeSize &&
1173 SecondSPAdjustAmount + MaxAlignment.value() >= ProbeSize) {
1174 bool IsRV64 = STI.is64Bit();
1175 BuildMI(MBB, MBBI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
1176 .addReg(RISCV::X0)
1177 .addReg(SPReg)
1178 .addImm(0)
1180 }
1181 }
1182 // FP will be used to restore the frame in the epilogue, so we need
1183 // another base register BP to record SP after re-alignment. SP will
1184 // track the current stack after allocating variable sized objects.
1185 if (hasBP(MF)) {
1186 // move BP, SP
1187 BuildMI(MBB, MBBI, DL, TII->get(RISCV::ADDI), BPReg)
1188 .addReg(SPReg)
1189 .addImm(0)
1191 }
1192 }
1193 }
1194}
1195
1196void RISCVFrameLowering::deallocateStack(MachineFunction &MF,
1199 const DebugLoc &DL,
1200 uint64_t &StackSize,
1201 int64_t CFAOffset) const {
1203
1204 RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg, StackOffset::getFixed(StackSize),
1206 StackSize = 0;
1207
1208 if (needsDwarfCFI(MF))
1210 .buildDefCFAOffset(CFAOffset);
1211}
1212
1214 MachineBasicBlock &MBB) const {
1215 const RISCVRegisterInfo *RI = STI.getRegisterInfo();
1216 MachineFrameInfo &MFI = MF.getFrameInfo();
1217 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1218
1219 // All calls are tail calls in GHC calling conv, and functions have no
1220 // prologue/epilogue.
1222 return;
1223
1224 // Get the insert location for the epilogue. If there were no terminators in
1225 // the block, get the last instruction.
1227 DebugLoc DL;
1228 if (!MBB.empty()) {
1229 MBBI = MBB.getLastNonDebugInstr();
1230 if (MBBI != MBB.end())
1231 DL = MBBI->getDebugLoc();
1232
1233 MBBI = MBB.getFirstTerminator();
1234
1235 // Skip to before the restores of all callee-saved registers.
1236 while (MBBI != MBB.begin() &&
1237 std::prev(MBBI)->getFlag(MachineInstr::FrameDestroy))
1238 --MBBI;
1239 }
1240
1241 const auto &CSI = MFI.getCalleeSavedInfo();
1242
1243 // Skip to before the restores of scalar callee-saved registers
1244 // FIXME: assumes exactly one instruction is used to restore each
1245 // callee-saved register.
1246 auto FirstScalarCSRRestoreInsn =
1247 std::next(MBBI, getRVVCalleeSavedInfo(MF, CSI).size());
1248 CFIInstBuilder CFIBuilder(MBB, FirstScalarCSRRestoreInsn,
1250 bool NeedsDwarfCFI = needsDwarfCFI(MF);
1251
1252 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1253 uint64_t RealStackSize = FirstSPAdjustAmount ? FirstSPAdjustAmount
1255 uint64_t StackSize = FirstSPAdjustAmount ? FirstSPAdjustAmount
1257 RVFI->getReservedSpillsSize();
1258 uint64_t FPOffset = RealStackSize - RVFI->getVarArgsSaveSize();
1259 uint64_t RVVStackSize = RVFI->getRVVStackSize();
1260
1261 bool RestoreSPFromFP = RI->hasStackRealignment(MF) ||
1263 if (RVVStackSize) {
1264 // If RestoreSPFromFP the stack pointer will be restored using the frame
1265 // pointer value.
1266 if (!RestoreSPFromFP)
1267 RI->adjustReg(MBB, FirstScalarCSRRestoreInsn, DL, SPReg, SPReg,
1268 StackOffset::getScalable(RVVStackSize),
1270
1271 if (NeedsDwarfCFI) {
1272 if (!hasFP(MF))
1273 CFIBuilder.buildDefCFA(SPReg, RealStackSize);
1274 emitCalleeSavedRVVEpilogCFI(MBB, FirstScalarCSRRestoreInsn);
1275 }
1276 }
1277
1278 if (FirstSPAdjustAmount) {
1279 uint64_t SecondSPAdjustAmount =
1280 getStackSizeWithRVVPadding(MF) - FirstSPAdjustAmount;
1281 assert(SecondSPAdjustAmount > 0 &&
1282 "SecondSPAdjustAmount should be greater than zero");
1283
1284 // If RestoreSPFromFP the stack pointer will be restored using the frame
1285 // pointer value.
1286 if (!RestoreSPFromFP)
1287 RI->adjustReg(MBB, FirstScalarCSRRestoreInsn, DL, SPReg, SPReg,
1288 StackOffset::getFixed(SecondSPAdjustAmount),
1290
1291 if (NeedsDwarfCFI && !hasFP(MF))
1292 CFIBuilder.buildDefCFAOffset(FirstSPAdjustAmount);
1293 }
1294
1295 // Restore the stack pointer using the value of the frame pointer. Only
1296 // necessary if the stack pointer was modified, meaning the stack size is
1297 // unknown.
1298 //
1299 // In order to make sure the stack point is right through the EH region,
1300 // we also need to restore stack pointer from the frame pointer if we
1301 // don't preserve stack space within prologue/epilogue for outgoing variables,
1302 // normally it's just checking the variable sized object is present or not
1303 // is enough, but we also don't preserve that at prologue/epilogue when
1304 // have vector objects in stack.
1305 if (RestoreSPFromFP) {
1306 assert(hasFP(MF) && "frame pointer should not have been eliminated");
1307 RI->adjustReg(MBB, FirstScalarCSRRestoreInsn, DL, SPReg, FPReg,
1309 getStackAlign());
1310 }
1311
1312 if (NeedsDwarfCFI && hasFP(MF))
1313 CFIBuilder.buildDefCFA(SPReg, RealStackSize);
1314
1315 // Skip to after the restores of scalar callee-saved registers
1316 // FIXME: assumes exactly one instruction is used to restore each
1317 // callee-saved register.
1318 MBBI = std::next(FirstScalarCSRRestoreInsn, getUnmanagedCSI(MF, CSI).size());
1319 CFIBuilder.setInsertPoint(MBBI);
1320
1321 if (getLibCallID(MF, CSI) != -1) {
1322 // tail __riscv_restore_[0-12] instruction is considered as a terminator,
1323 // therefore it is unnecessary to place any CFI instructions after it. Just
1324 // deallocate stack if needed and return.
1325 if (StackSize != 0)
1326 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1327 RVFI->getLibCallStackSize());
1328
1329 // Emit epilogue for shadow call stack.
1330 emitSCSEpilogue(MF, MBB, MBBI, DL);
1331 return;
1332 }
1333
1334 // Recover callee-saved registers.
1335 if (NeedsDwarfCFI)
1336 for (const CalleeSavedInfo &CS : getUnmanagedCSI(MF, CSI))
1337 CFIBuilder.buildRestore(CS.getReg());
1338
1339 if (RVFI->isPushable(MF) && MBBI != MBB.end() && isPop(MBBI->getOpcode())) {
1340 // Use available stack adjustment in pop instruction to deallocate stack
1341 // space. Align the stack size down to a multiple of 16. This is needed for
1342 // RVE.
1343 // FIXME: Can we increase the stack size to a multiple of 16 instead?
1344 uint64_t StackAdj =
1345 std::min(alignDown(StackSize, 16), static_cast<uint64_t>(48));
1346 MBBI->getOperand(1).setImm(StackAdj);
1347 StackSize -= StackAdj;
1348
1349 if (StackSize != 0)
1350 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1351 /*stack_adj of cm.pop instr*/ RealStackSize - StackSize);
1352
1353 auto NextI = next_nodbg(MBBI, MBB.end());
1354 if (NextI == MBB.end() || NextI->getOpcode() != RISCV::PseudoRET) {
1355 ++MBBI;
1356 if (NeedsDwarfCFI) {
1357 CFIBuilder.setInsertPoint(MBBI);
1358
1359 for (const CalleeSavedInfo &CS : getPushOrLibCallsSavedInfo(MF, CSI))
1360 CFIBuilder.buildRestore(CS.getReg());
1361
1362 // Update CFA Offset. If this is a QCI interrupt function, there will
1363 // be a leftover offset which is deallocated by `QC.C.MILEAVERET`,
1364 // otherwise getQCIInterruptStackSize() will be 0.
1365 CFIBuilder.buildDefCFAOffset(RVFI->getQCIInterruptStackSize());
1366 }
1367 }
1368 }
1369
1371
1372 // Deallocate stack if StackSize isn't a zero yet. If this is a QCI interrupt
1373 // function, there will be a leftover offset which is deallocated by
1374 // `QC.C.MILEAVERET`, otherwise getQCIInterruptStackSize() will be 0.
1375 if (StackSize != 0)
1376 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1377 RVFI->getQCIInterruptStackSize());
1378
1379 // Emit epilogue for shadow call stack.
1380 emitSCSEpilogue(MF, MBB, MBBI, DL);
1381
1382 // SiFive CLIC needs to swap `sf.mscratchcsw` into `sp`
1384}
1385
1388 Register &FrameReg) const {
1389 const MachineFrameInfo &MFI = MF.getFrameInfo();
1391 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1392
1393 // Callee-saved registers should be referenced relative to the stack
1394 // pointer (positive offset), otherwise use the frame pointer (negative
1395 // offset).
1396 const auto &CSI = getUnmanagedCSI(MF, MFI.getCalleeSavedInfo());
1397 int MinCSFI = 0;
1398 int MaxCSFI = -1;
1400 auto StackID = MFI.getStackID(FI);
1401
1402 assert((StackID == TargetStackID::Default ||
1403 StackID == TargetStackID::ScalableVector) &&
1404 "Unexpected stack ID for the frame object.");
1405 if (StackID == TargetStackID::Default) {
1406 assert(getOffsetOfLocalArea() == 0 && "LocalAreaOffset is not 0!");
1408 MFI.getOffsetAdjustment());
1409 } else if (StackID == TargetStackID::ScalableVector) {
1411 }
1412
1413 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1414
1415 if (CSI.size()) {
1416 MinCSFI = CSI[0].getFrameIdx();
1417 MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
1418 }
1419
1420 if (FI >= MinCSFI && FI <= MaxCSFI) {
1421 FrameReg = SPReg;
1422
1423 if (FirstSPAdjustAmount)
1424 Offset += StackOffset::getFixed(FirstSPAdjustAmount);
1425 else
1427 return Offset;
1428 }
1429
1430 if (RI->hasStackRealignment(MF) && !MFI.isFixedObjectIndex(FI)) {
1431 // If the stack was realigned, the frame pointer is set in order to allow
1432 // SP to be restored, so we need another base register to record the stack
1433 // after realignment.
1434 // |--------------------------| -- <-- FP
1435 // | callee-allocated save | | <----|
1436 // | area for register varargs| | |
1437 // |--------------------------| | |
1438 // | callee-saved registers | | |
1439 // |--------------------------| -- |
1440 // | realignment (the size of | | |
1441 // | this area is not counted | | |
1442 // | in MFI.getStackSize()) | | |
1443 // |--------------------------| -- |-- MFI.getStackSize()
1444 // | RVV alignment padding | | |
1445 // | (not counted in | | |
1446 // | MFI.getStackSize() but | | |
1447 // | counted in | | |
1448 // | RVFI.getRVVStackSize()) | | |
1449 // |--------------------------| -- |
1450 // | RVV objects | | |
1451 // | (not counted in | | |
1452 // | MFI.getStackSize()) | | |
1453 // |--------------------------| -- |
1454 // | padding before RVV | | |
1455 // | (not counted in | | |
1456 // | MFI.getStackSize() or in | | |
1457 // | RVFI.getRVVStackSize()) | | |
1458 // |--------------------------| -- |
1459 // | scalar local variables | | <----'
1460 // |--------------------------| -- <-- BP (if var sized objects present)
1461 // | VarSize objects | |
1462 // |--------------------------| -- <-- SP
1463 if (hasBP(MF)) {
1464 FrameReg = RISCVABI::getBPReg();
1465 } else {
1466 // VarSize objects must be empty in this case!
1467 assert(!MFI.hasVarSizedObjects());
1468 FrameReg = SPReg;
1469 }
1470 } else {
1471 FrameReg = RI->getFrameRegister(MF);
1472 }
1473
1474 if (FrameReg == FPReg) {
1475 Offset += StackOffset::getFixed(RVFI->getVarArgsSaveSize());
1476 // When using FP to access scalable vector objects, we need to minus
1477 // the frame size.
1478 //
1479 // |--------------------------| -- <-- FP
1480 // | callee-allocated save | |
1481 // | area for register varargs| |
1482 // |--------------------------| |
1483 // | callee-saved registers | |
1484 // |--------------------------| | MFI.getStackSize()
1485 // | scalar local variables | |
1486 // |--------------------------| -- (Offset of RVV objects is from here.)
1487 // | RVV objects |
1488 // |--------------------------|
1489 // | VarSize objects |
1490 // |--------------------------| <-- SP
1491 if (StackID == TargetStackID::ScalableVector) {
1492 assert(!RI->hasStackRealignment(MF) &&
1493 "Can't index across variable sized realign");
1494 // We don't expect any extra RVV alignment padding, as the stack size
1495 // and RVV object sections should be correct aligned in their own
1496 // right.
1498 "Inconsistent stack layout");
1500 }
1501 return Offset;
1502 }
1503
1504 // This case handles indexing off both SP and BP.
1505 // If indexing off SP, there must not be any var sized objects
1506 assert(FrameReg == RISCVABI::getBPReg() || !MFI.hasVarSizedObjects());
1507
1508 // When using SP to access frame objects, we need to add RVV stack size.
1509 //
1510 // |--------------------------| -- <-- FP
1511 // | callee-allocated save | | <----|
1512 // | area for register varargs| | |
1513 // |--------------------------| | |
1514 // | callee-saved registers | | |
1515 // |--------------------------| -- |
1516 // | RVV alignment padding | | |
1517 // | (not counted in | | |
1518 // | MFI.getStackSize() but | | |
1519 // | counted in | | |
1520 // | RVFI.getRVVStackSize()) | | |
1521 // |--------------------------| -- |
1522 // | RVV objects | | |-- MFI.getStackSize()
1523 // | (not counted in | | |
1524 // | MFI.getStackSize()) | | |
1525 // |--------------------------| -- |
1526 // | padding before RVV | | |
1527 // | (not counted in | | |
1528 // | MFI.getStackSize()) | | |
1529 // |--------------------------| -- |
1530 // | scalar local variables | | <----'
1531 // |--------------------------| -- <-- BP (if var sized objects present)
1532 // | VarSize objects | |
1533 // |--------------------------| -- <-- SP
1534 //
1535 // The total amount of padding surrounding RVV objects is described by
1536 // RVV->getRVVPadding() and it can be zero. It allows us to align the RVV
1537 // objects to the required alignment.
1538 if (MFI.getStackID(FI) == TargetStackID::Default) {
1539 if (MFI.isFixedObjectIndex(FI)) {
1540 assert(!RI->hasStackRealignment(MF) &&
1541 "Can't index across variable sized realign");
1543 RVFI->getRVVStackSize());
1544 } else {
1546 }
1547 } else if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
1548 // Ensure the base of the RVV stack is correctly aligned: add on the
1549 // alignment padding.
1550 int ScalarLocalVarSize = MFI.getStackSize() -
1551 RVFI->getCalleeSavedStackSize() -
1552 RVFI->getVarArgsSaveSize() + RVFI->getRVVPadding();
1553 Offset += StackOffset::get(ScalarLocalVarSize, RVFI->getRVVStackSize());
1554 }
1555 return Offset;
1556}
1557
1559 const Register &Reg) {
1560 MCRegister BaseReg = TRI.getSubReg(Reg, RISCV::sub_vrm1_0);
1561 // If it's not a grouped vector register, it doesn't have subregister, so
1562 // the base register is just itself.
1563 if (BaseReg == RISCV::NoRegister)
1564 BaseReg = Reg;
1565 return BaseReg;
1566}
1567
1569 BitVector &SavedRegs,
1570 RegScavenger *RS) const {
1572
1573 // In TargetFrameLowering::determineCalleeSaves, any vector register is marked
1574 // as saved if any of its subregister is clobbered, this is not correct in
1575 // vector registers. We only want the vector register to be marked as saved
1576 // if all of its subregisters are clobbered.
1577 // For example:
1578 // Original behavior: If v24 is marked, v24m2, v24m4, v24m8 are also marked.
1579 // Correct behavior: v24m2 is marked only if v24 and v25 are marked.
1580 const MachineRegisterInfo &MRI = MF.getRegInfo();
1581 const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs();
1582 const RISCVRegisterInfo &TRI = *STI.getRegisterInfo();
1583 for (unsigned i = 0; CSRegs[i]; ++i) {
1584 unsigned CSReg = CSRegs[i];
1585 // Only vector registers need special care.
1586 if (!RISCV::VRRegClass.contains(getRVVBaseRegister(TRI, CSReg)))
1587 continue;
1588
1589 SavedRegs.reset(CSReg);
1590
1591 auto SubRegs = TRI.subregs(CSReg);
1592 // Set the register and all its subregisters.
1593 if (!MRI.def_empty(CSReg) || MRI.getUsedPhysRegsMask().test(CSReg)) {
1594 SavedRegs.set(CSReg);
1595 for (unsigned Reg : SubRegs)
1596 SavedRegs.set(Reg);
1597 }
1598
1599 // Combine to super register if all of its subregisters are marked.
1600 if (!SubRegs.empty() && llvm::all_of(SubRegs, [&](unsigned Reg) {
1601 return SavedRegs.test(Reg);
1602 }))
1603 SavedRegs.set(CSReg);
1604 }
1605
1606 // Unconditionally spill RA and FP only if the function uses a frame
1607 // pointer.
1608 if (hasFP(MF)) {
1609 SavedRegs.set(RAReg);
1610 SavedRegs.set(FPReg);
1611 }
1612 // Mark BP as used if function has dedicated base pointer.
1613 if (hasBP(MF))
1614 SavedRegs.set(RISCVABI::getBPReg());
1615
1616 // When using cm.push/pop we must save X27 if we save X26.
1617 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1618 if (RVFI->isPushable(MF) && SavedRegs.test(RISCV::X26))
1619 SavedRegs.set(RISCV::X27);
1620
1621 // SiFive Preemptible Interrupt Handlers need additional frame entries
1623}
1624
1625std::pair<int64_t, Align>
1626RISCVFrameLowering::assignRVVStackObjectOffsets(MachineFunction &MF) const {
1627 MachineFrameInfo &MFI = MF.getFrameInfo();
1628 // Create a buffer of RVV objects to allocate.
1629 SmallVector<int, 8> ObjectsToAllocate;
1630 auto pushRVVObjects = [&](int FIBegin, int FIEnd) {
1631 for (int I = FIBegin, E = FIEnd; I != E; ++I) {
1632 unsigned StackID = MFI.getStackID(I);
1633 if (StackID != TargetStackID::ScalableVector)
1634 continue;
1635 if (MFI.isDeadObjectIndex(I))
1636 continue;
1637
1638 ObjectsToAllocate.push_back(I);
1639 }
1640 };
1641 // First push RVV Callee Saved object, then push RVV stack object
1642 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
1643 const auto &RVVCSI = getRVVCalleeSavedInfo(MF, CSI);
1644 if (!RVVCSI.empty())
1645 pushRVVObjects(RVVCSI[0].getFrameIdx(),
1646 RVVCSI[RVVCSI.size() - 1].getFrameIdx() + 1);
1647 pushRVVObjects(0, MFI.getObjectIndexEnd() - RVVCSI.size());
1648
1649 // The minimum alignment is 16 bytes.
1650 Align RVVStackAlign(16);
1651 const auto &ST = MF.getSubtarget<RISCVSubtarget>();
1652
1653 if (!ST.hasVInstructions()) {
1654 assert(ObjectsToAllocate.empty() &&
1655 "Can't allocate scalable-vector objects without V instructions");
1656 return std::make_pair(0, RVVStackAlign);
1657 }
1658
1659 // Allocate all RVV locals and spills
1660 int64_t Offset = 0;
1661 for (int FI : ObjectsToAllocate) {
1662 // ObjectSize in bytes.
1663 int64_t ObjectSize = MFI.getObjectSize(FI);
1664 auto ObjectAlign =
1665 std::max(Align(RISCV::RVVBytesPerBlock), MFI.getObjectAlign(FI));
1666 // If the data type is the fractional vector type, reserve one vector
1667 // register for it.
1668 if (ObjectSize < RISCV::RVVBytesPerBlock)
1669 ObjectSize = RISCV::RVVBytesPerBlock;
1670 Offset = alignTo(Offset + ObjectSize, ObjectAlign);
1671 MFI.setObjectOffset(FI, -Offset);
1672 // Update the maximum alignment of the RVV stack section
1673 RVVStackAlign = std::max(RVVStackAlign, ObjectAlign);
1674 }
1675
1676 uint64_t StackSize = Offset;
1677
1678 // Ensure the alignment of the RVV stack. Since we want the most-aligned
1679 // object right at the bottom (i.e., any padding at the top of the frame),
1680 // readjust all RVV objects down by the alignment padding.
1681 // Stack size and offsets are multiples of vscale, stack alignment is in
1682 // bytes, we can divide stack alignment by minimum vscale to get a maximum
1683 // stack alignment multiple of vscale.
1684 auto VScale =
1685 std::max<uint64_t>(ST.getRealMinVLen() / RISCV::RVVBitsPerBlock, 1);
1686 if (auto RVVStackAlignVScale = RVVStackAlign.value() / VScale) {
1687 if (auto AlignmentPadding =
1688 offsetToAlignment(StackSize, Align(RVVStackAlignVScale))) {
1689 StackSize += AlignmentPadding;
1690 for (int FI : ObjectsToAllocate)
1691 MFI.setObjectOffset(FI, MFI.getObjectOffset(FI) - AlignmentPadding);
1692 }
1693 }
1694
1695 return std::make_pair(StackSize, RVVStackAlign);
1696}
1697
1699 // For RVV spill, scalable stack offsets computing requires up to two scratch
1700 // registers
1701 static constexpr unsigned ScavSlotsNumRVVSpillScalableObject = 2;
1702
1703 // For RVV spill, non-scalable stack offsets computing requires up to one
1704 // scratch register.
1705 static constexpr unsigned ScavSlotsNumRVVSpillNonScalableObject = 1;
1706
1707 // ADDI instruction's destination register can be used for computing
1708 // offsets. So Scalable stack offsets require up to one scratch register.
1709 static constexpr unsigned ScavSlotsADDIScalableObject = 1;
1710
1711 static constexpr unsigned MaxScavSlotsNumKnown =
1712 std::max({ScavSlotsADDIScalableObject, ScavSlotsNumRVVSpillScalableObject,
1713 ScavSlotsNumRVVSpillNonScalableObject});
1714
1715 unsigned MaxScavSlotsNum = 0;
1717 return false;
1718 for (const MachineBasicBlock &MBB : MF)
1719 for (const MachineInstr &MI : MBB) {
1720 bool IsRVVSpill = RISCV::isRVVSpill(MI);
1721 for (auto &MO : MI.operands()) {
1722 if (!MO.isFI())
1723 continue;
1724 bool IsScalableVectorID = MF.getFrameInfo().getStackID(MO.getIndex()) ==
1726 if (IsRVVSpill) {
1727 MaxScavSlotsNum = std::max(
1728 MaxScavSlotsNum, IsScalableVectorID
1729 ? ScavSlotsNumRVVSpillScalableObject
1730 : ScavSlotsNumRVVSpillNonScalableObject);
1731 } else if (MI.getOpcode() == RISCV::ADDI && IsScalableVectorID) {
1732 MaxScavSlotsNum =
1733 std::max(MaxScavSlotsNum, ScavSlotsADDIScalableObject);
1734 }
1735 }
1736 if (MaxScavSlotsNum == MaxScavSlotsNumKnown)
1737 return MaxScavSlotsNumKnown;
1738 }
1739 return MaxScavSlotsNum;
1740}
1741
1742static bool hasRVVFrameObject(const MachineFunction &MF) {
1743 // Originally, the function will scan all the stack objects to check whether
1744 // if there is any scalable vector object on the stack or not. However, it
1745 // causes errors in the register allocator. In issue 53016, it returns false
1746 // before RA because there is no RVV stack objects. After RA, it returns true
1747 // because there are spilling slots for RVV values during RA. It will not
1748 // reserve BP during register allocation and generate BP access in the PEI
1749 // pass due to the inconsistent behavior of the function.
1750 //
1751 // The function is changed to use hasVInstructions() as the return value. It
1752 // is not precise, but it can make the register allocation correct.
1753 //
1754 // FIXME: Find a better way to make the decision or revisit the solution in
1755 // D103622.
1756 //
1757 // Refer to https://github.com/llvm/llvm-project/issues/53016.
1758 return MF.getSubtarget<RISCVSubtarget>().hasVInstructions();
1759}
1760
1762 const RISCVInstrInfo &TII) {
1763 unsigned FnSize = 0;
1764 for (auto &MBB : MF) {
1765 for (auto &MI : MBB) {
1766 // Far branches over 20-bit offset will be relaxed in branch relaxation
1767 // pass. In the worst case, conditional branches will be relaxed into
1768 // the following instruction sequence. Unconditional branches are
1769 // relaxed in the same way, with the exception that there is no first
1770 // branch instruction.
1771 //
1772 // foo
1773 // bne t5, t6, .rev_cond # `TII->getInstSizeInBytes(MI)` bytes
1774 // sd s11, 0(sp) # 4 bytes, or 2 bytes with Zca
1775 // jump .restore, s11 # 8 bytes
1776 // .rev_cond
1777 // bar
1778 // j .dest_bb # 4 bytes, or 2 bytes with Zca
1779 // .restore:
1780 // ld s11, 0(sp) # 4 bytes, or 2 bytes with Zca
1781 // .dest:
1782 // baz
1783 if (MI.isConditionalBranch())
1784 FnSize += TII.getInstSizeInBytes(MI);
1785 if (MI.isConditionalBranch() || MI.isUnconditionalBranch()) {
1786 if (MF.getSubtarget<RISCVSubtarget>().hasStdExtZca())
1787 FnSize += 2 + 8 + 2 + 2;
1788 else
1789 FnSize += 4 + 8 + 4 + 4;
1790 continue;
1791 }
1792
1793 FnSize += TII.getInstSizeInBytes(MI);
1794 }
1795 }
1796 return FnSize;
1797}
1798
1800 MachineFunction &MF, RegScavenger *RS) const {
1801 const RISCVRegisterInfo *RegInfo =
1802 MF.getSubtarget<RISCVSubtarget>().getRegisterInfo();
1803 const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
1804 MachineFrameInfo &MFI = MF.getFrameInfo();
1805 const TargetRegisterClass *RC = &RISCV::GPRRegClass;
1806 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1807
1808 int64_t RVVStackSize;
1809 Align RVVStackAlign;
1810 std::tie(RVVStackSize, RVVStackAlign) = assignRVVStackObjectOffsets(MF);
1811
1812 RVFI->setRVVStackSize(RVVStackSize);
1813 RVFI->setRVVStackAlign(RVVStackAlign);
1814
1815 if (hasRVVFrameObject(MF)) {
1816 // Ensure the entire stack is aligned to at least the RVV requirement: some
1817 // scalable-vector object alignments are not considered by the
1818 // target-independent code.
1819 MFI.ensureMaxAlignment(RVVStackAlign);
1820 }
1821
1822 unsigned ScavSlotsNum = 0;
1823
1824 // estimateStackSize has been observed to under-estimate the final stack
1825 // size, so give ourselves wiggle-room by checking for stack size
1826 // representable an 11-bit signed field rather than 12-bits.
1827 if (!isInt<11>(MFI.estimateStackSize(MF)))
1828 ScavSlotsNum = 1;
1829
1830 // Far branches over 20-bit offset require a spill slot for scratch register.
1831 bool IsLargeFunction = !isInt<20>(estimateFunctionSizeInBytes(MF, *TII));
1832 if (IsLargeFunction)
1833 ScavSlotsNum = std::max(ScavSlotsNum, 1u);
1834
1835 // RVV loads & stores have no capacity to hold the immediate address offsets
1836 // so we must always reserve an emergency spill slot if the MachineFunction
1837 // contains any RVV spills.
1838 ScavSlotsNum = std::max(ScavSlotsNum, getScavSlotsNumForRVV(MF));
1839
1840 for (unsigned I = 0; I < ScavSlotsNum; I++) {
1841 int FI = MFI.CreateSpillStackObject(RegInfo->getSpillSize(*RC),
1842 RegInfo->getSpillAlign(*RC));
1844
1845 if (IsLargeFunction && RVFI->getBranchRelaxationScratchFrameIndex() == -1)
1846 RVFI->setBranchRelaxationScratchFrameIndex(FI);
1847 }
1848
1849 unsigned Size = RVFI->getReservedSpillsSize();
1850 for (const auto &Info : MFI.getCalleeSavedInfo()) {
1851 int FrameIdx = Info.getFrameIdx();
1852 if (FrameIdx < 0 || MFI.getStackID(FrameIdx) != TargetStackID::Default)
1853 continue;
1854
1855 Size += MFI.getObjectSize(FrameIdx);
1856 }
1857 RVFI->setCalleeSavedStackSize(Size);
1858}
1859
1860// Not preserve stack space within prologue for outgoing variables when the
1861// function contains variable size objects or there are vector objects accessed
1862// by the frame pointer.
1863// Let eliminateCallFramePseudoInstr preserve stack space for it.
1865 return !MF.getFrameInfo().hasVarSizedObjects() &&
1866 !(hasFP(MF) && hasRVVFrameObject(MF));
1867}
1868
1869// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions.
1873 DebugLoc DL = MI->getDebugLoc();
1874
1875 if (!hasReservedCallFrame(MF)) {
1876 // If space has not been reserved for a call frame, ADJCALLSTACKDOWN and
1877 // ADJCALLSTACKUP must be converted to instructions manipulating the stack
1878 // pointer. This is necessary when there is a variable length stack
1879 // allocation (e.g. alloca), which means it's not possible to allocate
1880 // space for outgoing arguments from within the function prologue.
1881 int64_t Amount = MI->getOperand(0).getImm();
1882
1883 if (Amount != 0) {
1884 // Ensure the stack remains aligned after adjustment.
1885 Amount = alignSPAdjust(Amount);
1886
1887 if (MI->getOpcode() == RISCV::ADJCALLSTACKDOWN)
1888 Amount = -Amount;
1889
1890 const RISCVTargetLowering *TLI =
1891 MF.getSubtarget<RISCVSubtarget>().getTargetLowering();
1892 int64_t ProbeSize = TLI->getStackProbeSize(MF, getStackAlign());
1893 if (TLI->hasInlineStackProbe(MF) && -Amount >= ProbeSize) {
1894 // When stack probing is enabled, the decrement of SP may need to be
1895 // probed. We can handle both the decrement and the probing in
1896 // allocateStack.
1897 bool DynAllocation =
1898 MF.getInfo<RISCVMachineFunctionInfo>()->hasDynamicAllocation();
1899 allocateStack(MBB, MI, MF, -Amount, -Amount,
1900 needsDwarfCFI(MF) && !hasFP(MF),
1901 /*NeedProbe=*/true, ProbeSize, DynAllocation,
1903 } else {
1904 const RISCVRegisterInfo &RI = *STI.getRegisterInfo();
1907 }
1908 }
1909 }
1910
1911 return MBB.erase(MI);
1912}
1913
1914// We would like to split the SP adjustment to reduce prologue/epilogue
1915// as following instructions. In this way, the offset of the callee saved
1916// register could fit in a single store. Supposed that the first sp adjust
1917// amount is 2032.
1918// add sp,sp,-2032
1919// sw ra,2028(sp)
1920// sw s0,2024(sp)
1921// sw s1,2020(sp)
1922// sw s3,2012(sp)
1923// sw s4,2008(sp)
1924// add sp,sp,-64
1927 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1928 const MachineFrameInfo &MFI = MF.getFrameInfo();
1929 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
1930 uint64_t StackSize = getStackSizeWithRVVPadding(MF);
1931
1932 // Disable SplitSPAdjust if save-restore libcall, push/pop or QCI interrupts
1933 // are used. The callee-saved registers will be pushed by the save-restore
1934 // libcalls, so we don't have to split the SP adjustment in this case.
1935 if (RVFI->getReservedSpillsSize())
1936 return 0;
1937
1938 // Return the FirstSPAdjustAmount if the StackSize can not fit in a signed
1939 // 12-bit and there exists a callee-saved register needing to be pushed.
1940 if (!isInt<12>(StackSize) && (CSI.size() > 0)) {
1941 // FirstSPAdjustAmount is chosen at most as (2048 - StackAlign) because
1942 // 2048 will cause sp = sp + 2048 in the epilogue to be split into multiple
1943 // instructions. Offsets smaller than 2048 can fit in a single load/store
1944 // instruction, and we have to stick with the stack alignment. 2048 has
1945 // 16-byte alignment. The stack alignment for RV32 and RV64 is 16 and for
1946 // RV32E it is 4. So (2048 - StackAlign) will satisfy the stack alignment.
1947 const uint64_t StackAlign = getStackAlign().value();
1948
1949 // Amount of (2048 - StackAlign) will prevent callee saved and restored
1950 // instructions be compressed, so try to adjust the amount to the largest
1951 // offset that stack compression instructions accept when target supports
1952 // compression instructions.
1953 if (STI.hasStdExtZca()) {
1954 // The compression extensions may support the following instructions:
1955 // riscv32: c.lwsp rd, offset[7:2] => 2^(6 + 2)
1956 // c.swsp rs2, offset[7:2] => 2^(6 + 2)
1957 // c.flwsp rd, offset[7:2] => 2^(6 + 2)
1958 // c.fswsp rs2, offset[7:2] => 2^(6 + 2)
1959 // riscv64: c.ldsp rd, offset[8:3] => 2^(6 + 3)
1960 // c.sdsp rs2, offset[8:3] => 2^(6 + 3)
1961 // c.fldsp rd, offset[8:3] => 2^(6 + 3)
1962 // c.fsdsp rs2, offset[8:3] => 2^(6 + 3)
1963 const uint64_t RVCompressLen = STI.getXLen() * 8;
1964 // Compared with amount (2048 - StackAlign), StackSize needs to
1965 // satisfy the following conditions to avoid using more instructions
1966 // to adjust the sp after adjusting the amount, such as
1967 // StackSize meets the condition (StackSize <= 2048 + RVCompressLen),
1968 // case1: Amount is 2048 - StackAlign: use addi + addi to adjust sp.
1969 // case2: Amount is RVCompressLen: use addi + addi to adjust sp.
1970 auto CanCompress = [&](uint64_t CompressLen) -> bool {
1971 if (StackSize <= 2047 + CompressLen ||
1972 (StackSize > 2048 * 2 - StackAlign &&
1973 StackSize <= 2047 * 2 + CompressLen) ||
1974 StackSize > 2048 * 3 - StackAlign)
1975 return true;
1976
1977 return false;
1978 };
1979 // In the epilogue, addi sp, sp, 496 is used to recover the sp and it
1980 // can be compressed(C.ADDI16SP, offset can be [-512, 496]), but
1981 // addi sp, sp, 512 can not be compressed. So try to use 496 first.
1982 const uint64_t ADDI16SPCompressLen = 496;
1983 if (STI.is64Bit() && CanCompress(ADDI16SPCompressLen))
1984 return ADDI16SPCompressLen;
1985 if (CanCompress(RVCompressLen))
1986 return RVCompressLen;
1987 }
1988 return 2048 - StackAlign;
1989 }
1990 return 0;
1991}
1992
1995 std::vector<CalleeSavedInfo> &CSI, unsigned &MinCSFrameIndex,
1996 unsigned &MaxCSFrameIndex) const {
1997 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1998
1999 // Preemptible Interrupts have two additional Callee-save Frame Indexes,
2000 // not tracked by `CSI`.
2001 if (RVFI->isSiFivePreemptibleInterrupt(MF)) {
2002 for (int I = 0; I < 2; ++I) {
2003 int FI = RVFI->getInterruptCSRFrameIndex(I);
2004 MinCSFrameIndex = std::min<unsigned>(MinCSFrameIndex, FI);
2005 MaxCSFrameIndex = std::max<unsigned>(MaxCSFrameIndex, FI);
2006 }
2007 }
2008
2009 // Early exit if no callee saved registers are modified!
2010 if (CSI.empty())
2011 return true;
2012
2013 if (RVFI->useQCIInterrupt(MF)) {
2014 RVFI->setQCIInterruptStackSize(QCIInterruptPushAmount);
2015 }
2016
2017 if (RVFI->isPushable(MF)) {
2018 // Determine how many GPRs we need to push and save it to RVFI.
2019 unsigned PushedRegNum = getNumPushPopRegs(CSI);
2020
2021 // `QC.C.MIENTER(.NEST)` will save `ra` and `s0`, so we should only push if
2022 // we want to push more than 2 registers. Otherwise, we should push if we
2023 // want to push more than 0 registers.
2024 unsigned OnlyPushIfMoreThan = RVFI->useQCIInterrupt(MF) ? 2 : 0;
2025 if (PushedRegNum > OnlyPushIfMoreThan) {
2026 RVFI->setRVPushRegs(PushedRegNum);
2027 RVFI->setRVPushStackSize(alignTo((STI.getXLen() / 8) * PushedRegNum, 16));
2028 }
2029 }
2030
2031 MachineFrameInfo &MFI = MF.getFrameInfo();
2032 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
2033
2034 for (auto &CS : CSI) {
2035 MCRegister Reg = CS.getReg();
2036 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
2037 unsigned Size = RegInfo->getSpillSize(*RC);
2038
2039 if (RVFI->useQCIInterrupt(MF)) {
2040 const auto *FFI = llvm::find_if(FixedCSRFIQCIInterruptMap, [&](auto P) {
2041 return P.first == CS.getReg();
2042 });
2043 if (FFI != std::end(FixedCSRFIQCIInterruptMap)) {
2044 int64_t Offset = FFI->second * (int64_t)Size;
2045
2046 int FrameIdx = MFI.CreateFixedSpillStackObject(Size, Offset);
2047 assert(FrameIdx < 0);
2048 CS.setFrameIdx(FrameIdx);
2049 continue;
2050 }
2051 }
2052
2053 if (RVFI->useSaveRestoreLibCalls(MF) || RVFI->isPushable(MF)) {
2054 const auto *FII = llvm::find_if(
2055 FixedCSRFIMap, [&](MCPhysReg P) { return P == CS.getReg(); });
2056 unsigned RegNum = std::distance(std::begin(FixedCSRFIMap), FII);
2057
2058 if (FII != std::end(FixedCSRFIMap)) {
2059 int64_t Offset;
2060 if (RVFI->getPushPopKind(MF) ==
2062 Offset = -int64_t(RVFI->getRVPushRegs() - RegNum) * Size;
2063 else
2064 Offset = -int64_t(RegNum + 1) * Size;
2065
2066 if (RVFI->useQCIInterrupt(MF))
2068
2069 int FrameIdx = MFI.CreateFixedSpillStackObject(Size, Offset);
2070 assert(FrameIdx < 0);
2071 CS.setFrameIdx(FrameIdx);
2072 continue;
2073 }
2074 }
2075
2076 // Not a fixed slot.
2077 Align Alignment = RegInfo->getSpillAlign(*RC);
2078 // We may not be able to satisfy the desired alignment specification of
2079 // the TargetRegisterClass if the stack alignment is smaller. Use the
2080 // min.
2081 Alignment = std::min(Alignment, getStackAlign());
2082 int FrameIdx = MFI.CreateStackObject(Size, Alignment, true);
2083 if ((unsigned)FrameIdx < MinCSFrameIndex)
2084 MinCSFrameIndex = FrameIdx;
2085 if ((unsigned)FrameIdx > MaxCSFrameIndex)
2086 MaxCSFrameIndex = FrameIdx;
2087 CS.setFrameIdx(FrameIdx);
2090 }
2091
2092 if (RVFI->useQCIInterrupt(MF)) {
2093 // Allocate a fixed object that covers the entire QCI stack allocation,
2094 // because there are gaps which are reserved for future use.
2096 QCIInterruptPushAmount, -static_cast<int64_t>(QCIInterruptPushAmount));
2097 }
2098
2099 if (RVFI->isPushable(MF)) {
2100 int64_t QCIOffset = RVFI->useQCIInterrupt(MF) ? QCIInterruptPushAmount : 0;
2101 // Allocate a fixed object that covers the full push.
2102 if (int64_t PushSize = RVFI->getRVPushStackSize())
2103 MFI.CreateFixedSpillStackObject(PushSize, -PushSize - QCIOffset);
2104 } else if (int LibCallRegs = getLibCallID(MF, CSI) + 1) {
2105 int64_t LibCallFrameSize =
2106 alignTo((STI.getXLen() / 8) * LibCallRegs, getStackAlign());
2107 MFI.CreateFixedSpillStackObject(LibCallFrameSize, -LibCallFrameSize);
2108 }
2109
2110 return true;
2111}
2112
2116 if (CSI.empty())
2117 return true;
2118
2119 MachineFunction *MF = MBB.getParent();
2120 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
2121 DebugLoc DL;
2122 if (MI != MBB.end() && !MI->isDebugInstr())
2123 DL = MI->getDebugLoc();
2124
2126 if (RVFI->useQCIInterrupt(*MF)) {
2127 // Emit QC.C.MIENTER(.NEST)
2128 BuildMI(
2129 MBB, MI, DL,
2130 TII.get(RVFI->getInterruptStackKind(*MF) ==
2132 ? RISCV::QC_C_MIENTER_NEST
2133 : RISCV::QC_C_MIENTER))
2135
2136 for (auto [Reg, _Offset] : FixedCSRFIQCIInterruptMap)
2137 MBB.addLiveIn(Reg);
2138 }
2139
2140 if (RVFI->isPushable(*MF)) {
2141 // Emit CM.PUSH with base StackAdj & evaluate Push stack
2142 unsigned PushedRegNum = RVFI->getRVPushRegs();
2143 if (PushedRegNum > 0) {
2144 // Use encoded number to represent registers to spill.
2145 unsigned Opcode = getPushOpcode(
2146 RVFI->getPushPopKind(*MF), hasFP(*MF) && !RVFI->useQCIInterrupt(*MF));
2147 unsigned RegEnc = RISCVZC::encodeRegListNumRegs(PushedRegNum);
2148 MachineInstrBuilder PushBuilder =
2149 BuildMI(MBB, MI, DL, TII.get(Opcode))
2151 PushBuilder.addImm(RegEnc);
2152 PushBuilder.addImm(0);
2153
2154 for (unsigned i = 0; i < PushedRegNum; i++)
2155 PushBuilder.addUse(FixedCSRFIMap[i], RegState::Implicit);
2156 }
2157 } else if (const char *SpillLibCall = getSpillLibCallName(*MF, CSI)) {
2158 // Add spill libcall via non-callee-saved register t0.
2159 BuildMI(MBB, MI, DL, TII.get(RISCV::PseudoCALLReg), RISCV::X5)
2160 .addExternalSymbol(SpillLibCall, RISCVII::MO_CALL)
2162
2163 // Add registers spilled in libcall as liveins.
2164 for (auto &CS : CSI)
2165 MBB.addLiveIn(CS.getReg());
2166 }
2167
2168 // Manually spill values not spilled by libcall & Push/Pop.
2169 const auto &UnmanagedCSI = getUnmanagedCSI(*MF, CSI);
2170 const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, CSI);
2171
2172 auto storeRegsToStackSlots = [&](decltype(UnmanagedCSI) CSInfo) {
2173 for (auto &CS : CSInfo) {
2174 // Insert the spill to the stack frame.
2175 MCRegister Reg = CS.getReg();
2176 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2177 TII.storeRegToStackSlot(MBB, MI, Reg, !MBB.isLiveIn(Reg),
2178 CS.getFrameIdx(), RC, TRI, Register(),
2180 }
2181 };
2182 storeRegsToStackSlots(UnmanagedCSI);
2183 storeRegsToStackSlots(RVVCSI);
2184
2185 return true;
2186}
2187
2188static unsigned getCalleeSavedRVVNumRegs(const Register &BaseReg) {
2189 return RISCV::VRRegClass.contains(BaseReg) ? 1
2190 : RISCV::VRM2RegClass.contains(BaseReg) ? 2
2191 : RISCV::VRM4RegClass.contains(BaseReg) ? 4
2192 : 8;
2193}
2194
2195void RISCVFrameLowering::emitCalleeSavedRVVPrologCFI(
2197 MachineFunction *MF = MBB.getParent();
2198 const MachineFrameInfo &MFI = MF->getFrameInfo();
2199 RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2200 const RISCVRegisterInfo &TRI = *STI.getRegisterInfo();
2201
2202 const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, MFI.getCalleeSavedInfo());
2203 if (RVVCSI.empty())
2204 return;
2205
2206 uint64_t FixedSize = getStackSizeWithRVVPadding(*MF);
2207 if (!HasFP) {
2208 uint64_t ScalarLocalVarSize =
2209 MFI.getStackSize() - RVFI->getCalleeSavedStackSize() -
2210 RVFI->getVarArgsSaveSize() + RVFI->getRVVPadding();
2211 FixedSize -= ScalarLocalVarSize;
2212 }
2213
2214 CFIInstBuilder CFIBuilder(MBB, MI, MachineInstr::FrameSetup);
2215 for (auto &CS : RVVCSI) {
2216 // Insert the spill to the stack frame.
2217 int FI = CS.getFrameIdx();
2218 MCRegister BaseReg = getRVVBaseRegister(TRI, CS.getReg());
2219 unsigned NumRegs = getCalleeSavedRVVNumRegs(CS.getReg());
2220 for (unsigned i = 0; i < NumRegs; ++i) {
2221 CFIBuilder.insertCFIInst(createDefCFAOffset(
2222 TRI, BaseReg + i, -FixedSize, MFI.getObjectOffset(FI) / 8 + i));
2223 }
2224 }
2225}
2226
2227void RISCVFrameLowering::emitCalleeSavedRVVEpilogCFI(
2229 MachineFunction *MF = MBB.getParent();
2230 const MachineFrameInfo &MFI = MF->getFrameInfo();
2231 const RISCVRegisterInfo &TRI = *STI.getRegisterInfo();
2232
2233 CFIInstBuilder CFIHelper(MBB, MI, MachineInstr::FrameDestroy);
2234 const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, MFI.getCalleeSavedInfo());
2235 for (auto &CS : RVVCSI) {
2236 MCRegister BaseReg = getRVVBaseRegister(TRI, CS.getReg());
2237 unsigned NumRegs = getCalleeSavedRVVNumRegs(CS.getReg());
2238 for (unsigned i = 0; i < NumRegs; ++i)
2239 CFIHelper.buildRestore(BaseReg + i);
2240 }
2241}
2242
2246 if (CSI.empty())
2247 return true;
2248
2249 MachineFunction *MF = MBB.getParent();
2250 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
2251 DebugLoc DL;
2252 if (MI != MBB.end() && !MI->isDebugInstr())
2253 DL = MI->getDebugLoc();
2254
2255 // Manually restore values not restored by libcall & Push/Pop.
2256 // Reverse the restore order in epilog. In addition, the return
2257 // address will be restored first in the epilogue. It increases
2258 // the opportunity to avoid the load-to-use data hazard between
2259 // loading RA and return by RA. loadRegFromStackSlot can insert
2260 // multiple instructions.
2261 const auto &UnmanagedCSI = getUnmanagedCSI(*MF, CSI);
2262 const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, CSI);
2263
2264 auto loadRegFromStackSlot = [&](decltype(UnmanagedCSI) CSInfo) {
2265 for (auto &CS : CSInfo) {
2266 MCRegister Reg = CS.getReg();
2267 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2268 TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, TRI,
2270 assert(MI != MBB.begin() &&
2271 "loadRegFromStackSlot didn't insert any code!");
2272 }
2273 };
2274 loadRegFromStackSlot(RVVCSI);
2275 loadRegFromStackSlot(UnmanagedCSI);
2276
2278 if (RVFI->useQCIInterrupt(*MF)) {
2279 // Don't emit anything here because restoration is handled by
2280 // QC.C.MILEAVERET which we already inserted to return.
2281 assert(MI->getOpcode() == RISCV::QC_C_MILEAVERET &&
2282 "Unexpected QCI Interrupt Return Instruction");
2283 }
2284
2285 if (RVFI->isPushable(*MF)) {
2286 unsigned PushedRegNum = RVFI->getRVPushRegs();
2287 if (PushedRegNum > 0) {
2288 unsigned Opcode = getPopOpcode(RVFI->getPushPopKind(*MF));
2289 unsigned RegEnc = RISCVZC::encodeRegListNumRegs(PushedRegNum);
2290 MachineInstrBuilder PopBuilder =
2291 BuildMI(MBB, MI, DL, TII.get(Opcode))
2293 // Use encoded number to represent registers to restore.
2294 PopBuilder.addImm(RegEnc);
2295 PopBuilder.addImm(0);
2296
2297 for (unsigned i = 0; i < RVFI->getRVPushRegs(); i++)
2299 }
2300 } else {
2301 const char *RestoreLibCall = getRestoreLibCallName(*MF, CSI);
2302 if (RestoreLibCall) {
2303 // Add restore libcall via tail call.
2305 BuildMI(MBB, MI, DL, TII.get(RISCV::PseudoTAIL))
2306 .addExternalSymbol(RestoreLibCall, RISCVII::MO_CALL)
2308
2309 // Remove trailing returns, since the terminator is now a tail call to the
2310 // restore function.
2311 if (MI != MBB.end() && MI->getOpcode() == RISCV::PseudoRET) {
2312 NewMI->copyImplicitOps(*MF, *MI);
2313 MI->eraseFromParent();
2314 }
2315 }
2316 }
2317 return true;
2318}
2319
2321 // Keep the conventional code flow when not optimizing.
2322 if (MF.getFunction().hasOptNone())
2323 return false;
2324
2325 return true;
2326}
2327
2329 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
2330 const MachineFunction *MF = MBB.getParent();
2331 const auto *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2332
2333 // Make sure VTYPE and VL are not live-in since we will use vsetvli in the
2334 // prologue to get the VLEN, and that will clobber these registers.
2335 //
2336 // We may do also check the stack contains objects with scalable vector type,
2337 // but this will require iterating over all the stack objects, but this may
2338 // not worth since the situation is rare, we could do further check in future
2339 // if we find it is necessary.
2340 if (STI.preferVsetvliOverReadVLENB() &&
2341 (MBB.isLiveIn(RISCV::VTYPE) || MBB.isLiveIn(RISCV::VL)))
2342 return false;
2343
2344 if (!RVFI->useSaveRestoreLibCalls(*MF))
2345 return true;
2346
2347 // Inserting a call to a __riscv_save libcall requires the use of the register
2348 // t0 (X5) to hold the return address. Therefore if this register is already
2349 // used we can't insert the call.
2350
2351 RegScavenger RS;
2352 RS.enterBasicBlock(*TmpMBB);
2353 return !RS.isRegUsed(RISCV::X5);
2354}
2355
2357 const MachineFunction *MF = MBB.getParent();
2358 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
2359 const auto *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
2360
2361 // We do not want QC.C.MILEAVERET to be subject to shrink-wrapping - it must
2362 // come in the final block of its function as it both pops and returns.
2363 if (RVFI->useQCIInterrupt(*MF))
2364 return MBB.succ_empty();
2365
2366 if (!RVFI->useSaveRestoreLibCalls(*MF))
2367 return true;
2368
2369 // Using the __riscv_restore libcalls to restore CSRs requires a tail call.
2370 // This means if we still need to continue executing code within this function
2371 // the restore cannot take place in this basic block.
2372
2373 if (MBB.succ_size() > 1)
2374 return false;
2375
2376 MachineBasicBlock *SuccMBB =
2377 MBB.succ_empty() ? TmpMBB->getFallThrough() : *MBB.succ_begin();
2378
2379 // Doing a tail call should be safe if there are no successors, because either
2380 // we have a returning block or the end of the block is unreachable, so the
2381 // restore will be eliminated regardless.
2382 if (!SuccMBB)
2383 return true;
2384
2385 // The successor can only contain a return, since we would effectively be
2386 // replacing the successor with our own tail return at the end of our block.
2387 return SuccMBB->isReturnBlock() && SuccMBB->size() == 1;
2388}
2389
2391 switch (ID) {
2394 return true;
2398 return false;
2399 }
2400 llvm_unreachable("Invalid TargetStackID::Value");
2401}
2402
2406
2407// Synthesize the probe loop.
2409 Register TargetReg, bool IsRVV) {
2410 assert(TargetReg != RISCV::X2 && "New top of stack cannot already be in SP");
2411
2412 MachineBasicBlock &MBB = *MBBI->getParent();
2413 MachineFunction &MF = *MBB.getParent();
2414
2415 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
2416 const RISCVInstrInfo *TII = Subtarget.getInstrInfo();
2417 bool IsRV64 = Subtarget.is64Bit();
2418 Align StackAlign = Subtarget.getFrameLowering()->getStackAlign();
2419 const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
2420 uint64_t ProbeSize = TLI->getStackProbeSize(MF, StackAlign);
2421
2422 MachineFunction::iterator MBBInsertPoint = std::next(MBB.getIterator());
2423 MachineBasicBlock *LoopTestMBB =
2424 MF.CreateMachineBasicBlock(MBB.getBasicBlock());
2425 MF.insert(MBBInsertPoint, LoopTestMBB);
2426 MachineBasicBlock *ExitMBB = MF.CreateMachineBasicBlock(MBB.getBasicBlock());
2427 MF.insert(MBBInsertPoint, ExitMBB);
2429 Register ScratchReg = RISCV::X7;
2430
2431 // ScratchReg = ProbeSize
2432 TII->movImm(MBB, MBBI, DL, ScratchReg, ProbeSize, Flags);
2433
2434 // LoopTest:
2435 // SUB SP, SP, ProbeSize
2436 BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL, TII->get(RISCV::SUB), SPReg)
2437 .addReg(SPReg)
2438 .addReg(ScratchReg)
2439 .setMIFlags(Flags);
2440
2441 // s[d|w] zero, 0(sp)
2442 BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL,
2443 TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
2444 .addReg(RISCV::X0)
2445 .addReg(SPReg)
2446 .addImm(0)
2447 .setMIFlags(Flags);
2448
2449 if (IsRVV) {
2450 // SUB TargetReg, TargetReg, ProbeSize
2451 BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL, TII->get(RISCV::SUB),
2452 TargetReg)
2453 .addReg(TargetReg)
2454 .addReg(ScratchReg)
2455 .setMIFlags(Flags);
2456
2457 // BGE TargetReg, ProbeSize, LoopTest
2458 BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL, TII->get(RISCV::BGE))
2459 .addReg(TargetReg)
2460 .addReg(ScratchReg)
2461 .addMBB(LoopTestMBB)
2462 .setMIFlags(Flags);
2463
2464 } else {
2465 // BNE SP, TargetReg, LoopTest
2466 BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL, TII->get(RISCV::BNE))
2467 .addReg(SPReg)
2468 .addReg(TargetReg)
2469 .addMBB(LoopTestMBB)
2470 .setMIFlags(Flags);
2471 }
2472
2473 ExitMBB->splice(ExitMBB->end(), &MBB, std::next(MBBI), MBB.end());
2475
2476 LoopTestMBB->addSuccessor(ExitMBB);
2477 LoopTestMBB->addSuccessor(LoopTestMBB);
2478 MBB.addSuccessor(LoopTestMBB);
2479 // Update liveins.
2480 fullyRecomputeLiveIns({ExitMBB, LoopTestMBB});
2481}
2482
2483void RISCVFrameLowering::inlineStackProbe(MachineFunction &MF,
2484 MachineBasicBlock &MBB) const {
2485 // Get the instructions that need to be replaced. We emit at most two of
2486 // these. Remember them in order to avoid complications coming from the need
2487 // to traverse the block while potentially creating more blocks.
2488 SmallVector<MachineInstr *, 4> ToReplace;
2489 for (MachineInstr &MI : MBB) {
2490 unsigned Opc = MI.getOpcode();
2491 if (Opc == RISCV::PROBED_STACKALLOC ||
2492 Opc == RISCV::PROBED_STACKALLOC_RVV) {
2493 ToReplace.push_back(&MI);
2494 }
2495 }
2496
2497 for (MachineInstr *MI : ToReplace) {
2498 if (MI->getOpcode() == RISCV::PROBED_STACKALLOC ||
2499 MI->getOpcode() == RISCV::PROBED_STACKALLOC_RVV) {
2500 MachineBasicBlock::iterator MBBI = MI->getIterator();
2502 Register TargetReg = MI->getOperand(0).getReg();
2503 emitStackProbeInline(MBBI, DL, TargetReg,
2504 (MI->getOpcode() == RISCV::PROBED_STACKALLOC_RVV));
2506 }
2507 }
2508}
unsigned const MachineRegisterInfo * MRI
static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI, unsigned Reg, const StackOffset &Offset)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
static uint64_t estimateFunctionSizeInBytes(const LoongArchInstrInfo *TII, const MachineFunction &MF)
#define I(x, y, z)
Definition MD5.cpp:58
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define P(N)
static constexpr uint64_t QCIInterruptPushAmount
static unsigned getPushOpcode(RISCVMachineFunctionInfo::PushPopKind Kind, bool UpdateFP)
static void emitSiFiveCLICPreemptibleSaves(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL)
static MCRegister getRVVBaseRegister(const RISCVRegisterInfo &TRI, const Register &Reg)
static void createSiFivePreemptibleInterruptFrameEntries(MachineFunction &MF, RISCVMachineFunctionInfo &RVFI)
static constexpr MCPhysReg FPReg
static const char * getRestoreLibCallName(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static bool needsDwarfCFI(const MachineFunction &MF)
Returns true if DWARF CFI instructions ("frame moves") should be emitted.
static constexpr MCPhysReg SPReg
static const char * getSpillLibCallName(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static bool hasRVVFrameObject(const MachineFunction &MF)
static SmallVector< CalleeSavedInfo, 8 > getQCISavedInfo(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static void emitSiFiveCLICPreemptibleRestores(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL)
static SmallVector< CalleeSavedInfo, 8 > getRVVCalleeSavedInfo(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static void appendScalableVectorExpression(const TargetRegisterInfo &TRI, SmallVectorImpl< char > &Expr, int FixedOffset, int ScalableOffset, llvm::raw_string_ostream &Comment)
static bool isPop(unsigned Opcode)
static unsigned getCalleeSavedRVVNumRegs(const Register &BaseReg)
static void emitStackProbeInline(MachineBasicBlock::iterator MBBI, DebugLoc DL, Register TargetReg, bool IsRVV)
static Align getABIStackAlignment(RISCVABI::ABI ABI)
static unsigned getPopOpcode(RISCVMachineFunctionInfo::PushPopKind Kind)
static SmallVector< CalleeSavedInfo, 8 > getPushOrLibCallsSavedInfo(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static int getLibCallID(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static const std::pair< MCPhysReg, int8_t > FixedCSRFIQCIInterruptMap[]
static bool isPush(unsigned Opcode)
static constexpr MCPhysReg RAReg
static void emitSCSPrologue(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL)
static const MCPhysReg FixedCSRFIMap[]
static void emitSCSEpilogue(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL)
static SmallVector< CalleeSavedInfo, 8 > getUnmanagedCSI(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static void emitSiFiveCLICStackSwap(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL)
static unsigned getNumPushPopRegs(const std::vector< CalleeSavedInfo > &CSI)
static unsigned getScavSlotsNumForRVV(MachineFunction &MF)
static MCCFIInstruction createDefCFAOffset(const TargetRegisterInfo &TRI, Register Reg, uint64_t FixedOffset, uint64_t ScalableOffset)
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
bool test(unsigned Idx) const
Definition BitVector.h:461
BitVector & reset()
Definition BitVector.h:392
BitVector & set()
Definition BitVector.h:351
Helper class for creating CFI instructions and inserting them into MIR.
void buildEscape(StringRef Bytes, StringRef Comment="") const
void buildDefCFAOffset(int64_t Offset, MCSymbol *Label=nullptr) const
void buildRestore(MCRegister Reg) const
void buildDefCFARegister(MCRegister Reg) const
void buildOffset(MCRegister Reg, int64_t Offset) const
void insertCFIInst(const MCCFIInstruction &CFIInst) const
void buildDefCFA(MCRegister Reg, int64_t Offset) const
void setInsertPoint(MachineBasicBlock::iterator IP)
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
MCRegister getReg() const
A debug info location.
Definition DebugLoc.h:124
Diagnostic information for unsupported feature in backend.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasOptNone() const
Do not optimize this function (-O0).
Definition Function.h:700
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals, SMLoc Loc={}, StringRef Comment="")
.cfi_escape Allows the user to add arbitrary bytes to the unwind info.
Definition MCDwarf.h:697
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
constexpr unsigned id() const
Definition MCRegister.h:74
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI MachineBasicBlock * getFallThrough(bool JumpToFallThrough=true)
Return the fallthrough block if the block can implicitly transfer control to the block after it by fa...
bool isReturnBlock() const
Convenience function that returns true if the block ends in a return instruction.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
LLVM_ABI void eraseFromParent()
This method unlinks 'this' from the containing function and deletes it.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
LLVM_ABI void ensureMaxAlignment(Align Alignment)
Make sure the function is at least Align bytes aligned.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
int64_t getOffsetAdjustment() const
Return the correction for frame offsets.
LLVM_ABI int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
LLVM_ABI uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isMaxCallFrameSizeComputed() const
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
LLVM_ABI int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset, bool IsImmutable=false)
Create a spill slot at a fixed location on the stack.
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void setStackSize(uint64_t Size)
Set the size of the stack.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition ArrayRef.h:303
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
uint64_t getFirstSPAdjustAmount(const MachineFunction &MF) const
bool enableShrinkWrapping(const MachineFunction &MF) const override
Returns true if the target will correctly handle shrink wrapping.
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
bool hasBP(const MachineFunction &MF) const
void allocateStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, MachineFunction &MF, uint64_t Offset, uint64_t RealStackSize, bool EmitCFI, bool NeedProbe, uint64_t ProbeSize, bool DynAllocation, MachineInstr::MIFlag Flag) const
bool canUseAsEpilogue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a epilogue for the target.
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI, unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
bool hasFPImpl(const MachineFunction &MF) const override
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
const RISCVSubtarget & STI
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
bool isSupportedStackID(TargetStackID::Value ID) const override
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
TargetStackID::Value getStackIDForScalableVectors() const override
Returns the StackID that scalable vectors should be associated with.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
RISCVFrameLowering(const RISCVSubtarget &STI)
uint64_t getStackSizeWithRVVPadding(const MachineFunction &MF) const
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
bool isPushable(const MachineFunction &MF) const
InterruptStackKind getInterruptStackKind(const MachineFunction &MF) const
bool isSiFivePreemptibleInterrupt(const MachineFunction &MF) const
PushPopKind getPushPopKind(const MachineFunction &MF) const
bool useSaveRestoreLibCalls(const MachineFunction &MF) const
bool useQCIInterrupt(const MachineFunction &MF) const
bool hasVInstructions() const
const RISCVRegisterInfo * getRegisterInfo() const override
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
unsigned getStackProbeSize(const MachineFunction &MF, Align StackAlign) const
bool isRegUsed(Register Reg, bool includeReserved=true) const
Return if a specific register is currently used.
void enterBasicBlock(MachineBasicBlock &MBB)
Start tracking liveness from the begin of basic block MBB.
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
Represents a location in source code.
Definition SMLoc.h:23
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition SmallString.h:68
StringRef str() const
Explicit conversion to StringRef.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:31
int64_t getFixed() const
Returns the fixed component of the stack.
Definition TypeSize.h:47
int64_t getScalable() const
Returns the scalable component of the stack.
Definition TypeSize.h:50
static StackOffset get(int64_t Fixed, int64_t Scalable)
Definition TypeSize.h:42
static StackOffset getScalable(int64_t Scalable)
Definition TypeSize.h:41
static StackOffset getFixed(int64_t Fixed)
Definition TypeSize.h:40
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
TargetFrameLowering(StackDirection D, Align StackAl, int LAO, Align TransAl=Align(1), bool StackReal=true)
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment.
TargetInstrInfo - Interface to description of machine instruction set.
TargetOptions Options
LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool hasStackRealignment(const MachineFunction &MF) const
True if stack realignment is required and still possible.
virtual Register getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition CallingConv.h:50
MCRegister getBPReg()
MCRegister getSCSPReg()
static unsigned encodeRegListNumRegs(unsigned NumRegs)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
Definition SFrame.h:77
This is an optimization pass for GlobalISel generic memory operations.
IterT next_nodbg(IterT It, IterT End, bool SkipPseudoOp=true)
Increment It, then continue incrementing it while it points to a debug instruction.
@ Offset
Definition DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1707
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1665
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition MathExtras.h:557
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1721
auto make_first_range(ContainerTy &&c)
Given a container of pairs, return a range over the first elements.
Definition STLExtras.h:1407
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
Definition Alignment.h:197
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:155
unsigned encodeSLEB128(int64_t Value, raw_ostream &OS, unsigned PadTo=0)
Utility function to encode a SLEB128 value to an output stream.
Definition LEB128.h:24
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1740
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1879
unsigned encodeULEB128(uint64_t Value, raw_ostream &OS, unsigned PadTo=0)
Utility function to encode a ULEB128 value to an output stream.
Definition LEB128.h:81
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition Alignment.h:208
void fullyRecomputeLiveIns(ArrayRef< MachineBasicBlock * > MBBs)
Convenience function for recomputing live-in's for a set of MBBs until the computation converges.
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:85
static bool isRVVRegClass(const TargetRegisterClass *RC)
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const