LLVM 22.0.0git
SIFrameLowering.cpp
Go to the documentation of this file.
1//===----------------------- SIFrameLowering.cpp --------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//==-----------------------------------------------------------------------===//
8
9#include "SIFrameLowering.h"
10#include "AMDGPU.h"
11#include "AMDGPULaneMaskUtils.h"
12#include "GCNSubtarget.h"
19
20using namespace llvm;
21
22#define DEBUG_TYPE "frame-info"
23
25 "amdgpu-spill-vgpr-to-agpr",
26 cl::desc("Enable spilling VGPRs to AGPRs"),
28 cl::init(true));
29
30// Find a register matching \p RC from \p LiveUnits which is unused and
31// available throughout the function. On failure, returns AMDGPU::NoRegister.
32// TODO: Rewrite the loop here to iterate over MCRegUnits instead of
33// MCRegisters. This should reduce the number of iterations and avoid redundant
34// checking.
36 const LiveRegUnits &LiveUnits,
37 const TargetRegisterClass &RC) {
38 for (MCRegister Reg : RC) {
39 if (!MRI.isPhysRegUsed(Reg) && LiveUnits.available(Reg) &&
40 !MRI.isReserved(Reg))
41 return Reg;
42 }
43 return MCRegister();
44}
45
46// Find a scratch register that we can use in the prologue. We avoid using
47// callee-save registers since they may appear to be free when this is called
48// from canUseAsPrologue (during shrink wrapping), but then no longer be free
49// when this is called from emitPrologue.
52 const TargetRegisterClass &RC, bool Unused = false) {
53 // Mark callee saved registers as used so we will not choose them.
54 const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs();
55 for (unsigned i = 0; CSRegs[i]; ++i)
56 LiveUnits.addReg(CSRegs[i]);
57
58 // We are looking for a register that can be used throughout the entire
59 // function, so any use is unacceptable.
60 if (Unused)
61 return findUnusedRegister(MRI, LiveUnits, RC);
62
63 for (MCRegister Reg : RC) {
64 if (LiveUnits.available(Reg) && !MRI.isReserved(Reg))
65 return Reg;
66 }
67
68 return MCRegister();
69}
70
71/// Query target location for spilling SGPRs
72/// \p IncludeScratchCopy : Also look for free scratch SGPRs
74 MachineFunction &MF, LiveRegUnits &LiveUnits, Register SGPR,
75 const TargetRegisterClass &RC = AMDGPU::SReg_32_XM0_XEXECRegClass,
76 bool IncludeScratchCopy = true) {
78 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
79
80 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
81 const SIRegisterInfo *TRI = ST.getRegisterInfo();
82 unsigned Size = TRI->getSpillSize(RC);
83 Align Alignment = TRI->getSpillAlign(RC);
84
85 // We need to save and restore the given SGPR.
86
87 Register ScratchSGPR;
88 // 1: Try to save the given register into an unused scratch SGPR. The
89 // LiveUnits should have all the callee saved registers marked as used. For
90 // certain cases we skip copy to scratch SGPR.
91 if (IncludeScratchCopy)
92 ScratchSGPR = findUnusedRegister(MF.getRegInfo(), LiveUnits, RC);
93
94 if (!ScratchSGPR) {
95 int FI = FrameInfo.CreateStackObject(Size, Alignment, true, nullptr,
97
98 if (TRI->spillSGPRToVGPR() &&
99 MFI->allocateSGPRSpillToVGPRLane(MF, FI, /*SpillToPhysVGPRLane=*/true,
100 /*IsPrologEpilog=*/true)) {
101 // 2: There's no free lane to spill, and no free register to save the
102 // SGPR, so we're forced to take another VGPR to use for the spill.
106
107 LLVM_DEBUG(auto Spill = MFI->getSGPRSpillToPhysicalVGPRLanes(FI).front();
108 dbgs() << printReg(SGPR, TRI) << " requires fallback spill to "
109 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane
110 << '\n';);
111 } else {
112 // Remove dead <FI> index
114 // 3: If all else fails, spill the register to memory.
115 FI = FrameInfo.CreateSpillStackObject(Size, Alignment);
117 SGPR,
119 LLVM_DEBUG(dbgs() << "Reserved FI " << FI << " for spilling "
120 << printReg(SGPR, TRI) << '\n');
121 }
122 } else {
126 LiveUnits.addReg(ScratchSGPR);
127 LLVM_DEBUG(dbgs() << "Saving " << printReg(SGPR, TRI) << " with copy to "
128 << printReg(ScratchSGPR, TRI) << '\n');
129 }
130}
131
132// We need to specially emit stack operations here because a different frame
133// register is used than in the rest of the function, as getFrameRegister would
134// use.
135static void buildPrologSpill(const GCNSubtarget &ST, const SIRegisterInfo &TRI,
136 const SIMachineFunctionInfo &FuncInfo,
137 LiveRegUnits &LiveUnits, MachineFunction &MF,
140 Register SpillReg, int FI, Register FrameReg,
141 int64_t DwordOff = 0) {
142 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
143 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
144
145 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
148 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FI),
149 FrameInfo.getObjectAlign(FI));
150 LiveUnits.addReg(SpillReg);
151 bool IsKill = !MBB.isLiveIn(SpillReg);
152 TRI.buildSpillLoadStore(MBB, I, DL, Opc, FI, SpillReg, IsKill, FrameReg,
153 DwordOff, MMO, nullptr, &LiveUnits);
154 if (IsKill)
155 LiveUnits.removeReg(SpillReg);
156}
157
158static void buildEpilogRestore(const GCNSubtarget &ST,
159 const SIRegisterInfo &TRI,
160 const SIMachineFunctionInfo &FuncInfo,
161 LiveRegUnits &LiveUnits, MachineFunction &MF,
164 const DebugLoc &DL, Register SpillReg, int FI,
165 Register FrameReg, int64_t DwordOff = 0) {
166 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
167 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
168
169 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
172 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FI),
173 FrameInfo.getObjectAlign(FI));
174 TRI.buildSpillLoadStore(MBB, I, DL, Opc, FI, SpillReg, false, FrameReg,
175 DwordOff, MMO, nullptr, &LiveUnits);
176}
177
179 const DebugLoc &DL, const SIInstrInfo *TII,
180 Register TargetReg) {
181 MachineFunction *MF = MBB.getParent();
183 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
184 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
185 Register TargetLo = TRI->getSubReg(TargetReg, AMDGPU::sub0);
186 Register TargetHi = TRI->getSubReg(TargetReg, AMDGPU::sub1);
187
188 if (MFI->getGITPtrHigh() != 0xffffffff) {
189 BuildMI(MBB, I, DL, SMovB32, TargetHi)
190 .addImm(MFI->getGITPtrHigh())
191 .addReg(TargetReg, RegState::ImplicitDefine);
192 } else {
193 const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64_pseudo);
194 BuildMI(MBB, I, DL, GetPC64, TargetReg);
195 }
196 Register GitPtrLo = MFI->getGITPtrLoReg(*MF);
197 MF->getRegInfo().addLiveIn(GitPtrLo);
198 MBB.addLiveIn(GitPtrLo);
199 BuildMI(MBB, I, DL, SMovB32, TargetLo)
200 .addReg(GitPtrLo);
201}
202
203static void initLiveUnits(LiveRegUnits &LiveUnits, const SIRegisterInfo &TRI,
204 const SIMachineFunctionInfo *FuncInfo,
206 MachineBasicBlock::iterator MBBI, bool IsProlog) {
207 if (LiveUnits.empty()) {
208 LiveUnits.init(TRI);
209 if (IsProlog) {
210 LiveUnits.addLiveIns(MBB);
211 } else {
212 // In epilog.
213 LiveUnits.addLiveOuts(MBB);
214 LiveUnits.stepBackward(*MBBI);
215 }
216 }
217}
218
219namespace llvm {
220
221// SpillBuilder to save/restore special SGPR spills like the one needed for FP,
222// BP, etc. These spills are delayed until the current function's frame is
223// finalized. For a given register, the builder uses the
224// PrologEpilogSGPRSaveRestoreInfo to decide the spill method.
228 MachineFunction &MF;
229 const GCNSubtarget &ST;
230 MachineFrameInfo &MFI;
231 SIMachineFunctionInfo *FuncInfo;
232 const SIInstrInfo *TII;
233 const SIRegisterInfo &TRI;
234 Register SuperReg;
236 LiveRegUnits &LiveUnits;
237 const DebugLoc &DL;
238 Register FrameReg;
239 ArrayRef<int16_t> SplitParts;
240 unsigned NumSubRegs;
241 unsigned EltSize = 4;
242
243 void saveToMemory(const int FI) const {
244 MachineRegisterInfo &MRI = MF.getRegInfo();
245 assert(!MFI.isDeadObjectIndex(FI));
246
247 initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MI, /*IsProlog*/ true);
248
250 MRI, LiveUnits, AMDGPU::VGPR_32RegClass);
251 if (!TmpVGPR)
252 report_fatal_error("failed to find free scratch register");
253
254 for (unsigned I = 0, DwordOff = 0; I < NumSubRegs; ++I) {
255 Register SubReg = NumSubRegs == 1
256 ? SuperReg
257 : Register(TRI.getSubReg(SuperReg, SplitParts[I]));
258 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR)
259 .addReg(SubReg);
260
261 buildPrologSpill(ST, TRI, *FuncInfo, LiveUnits, MF, MBB, MI, DL, TmpVGPR,
262 FI, FrameReg, DwordOff);
263 DwordOff += 4;
264 }
265 }
266
267 void saveToVGPRLane(const int FI) const {
268 assert(!MFI.isDeadObjectIndex(FI));
269
270 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill);
272 FuncInfo->getSGPRSpillToPhysicalVGPRLanes(FI);
273 assert(Spill.size() == NumSubRegs);
274
275 for (unsigned I = 0; I < NumSubRegs; ++I) {
276 Register SubReg = NumSubRegs == 1
277 ? SuperReg
278 : Register(TRI.getSubReg(SuperReg, SplitParts[I]));
279 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_S32_TO_VGPR),
280 Spill[I].VGPR)
281 .addReg(SubReg)
282 .addImm(Spill[I].Lane)
283 .addReg(Spill[I].VGPR, RegState::Undef);
284 }
285 }
286
287 void copyToScratchSGPR(Register DstReg) const {
288 BuildMI(MBB, MI, DL, TII->get(AMDGPU::COPY), DstReg)
289 .addReg(SuperReg)
291 }
292
293 void restoreFromMemory(const int FI) {
294 MachineRegisterInfo &MRI = MF.getRegInfo();
295
296 initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MI, /*IsProlog*/ false);
298 MRI, LiveUnits, AMDGPU::VGPR_32RegClass);
299 if (!TmpVGPR)
300 report_fatal_error("failed to find free scratch register");
301
302 for (unsigned I = 0, DwordOff = 0; I < NumSubRegs; ++I) {
303 Register SubReg = NumSubRegs == 1
304 ? SuperReg
305 : Register(TRI.getSubReg(SuperReg, SplitParts[I]));
306
307 buildEpilogRestore(ST, TRI, *FuncInfo, LiveUnits, MF, MBB, MI, DL,
308 TmpVGPR, FI, FrameReg, DwordOff);
309 assert(SubReg.isPhysical());
310
311 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), SubReg)
312 .addReg(TmpVGPR, RegState::Kill);
313 DwordOff += 4;
314 }
315 }
316
317 void restoreFromVGPRLane(const int FI) {
318 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill);
320 FuncInfo->getSGPRSpillToPhysicalVGPRLanes(FI);
321 assert(Spill.size() == NumSubRegs);
322
323 for (unsigned I = 0; I < NumSubRegs; ++I) {
324 Register SubReg = NumSubRegs == 1
325 ? SuperReg
326 : Register(TRI.getSubReg(SuperReg, SplitParts[I]));
327 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_RESTORE_S32_FROM_VGPR), SubReg)
328 .addReg(Spill[I].VGPR)
329 .addImm(Spill[I].Lane);
330 }
331 }
332
333 void copyFromScratchSGPR(Register SrcReg) const {
334 BuildMI(MBB, MI, DL, TII->get(AMDGPU::COPY), SuperReg)
335 .addReg(SrcReg)
337 }
338
339public:
344 const DebugLoc &DL, const SIInstrInfo *TII,
345 const SIRegisterInfo &TRI,
346 LiveRegUnits &LiveUnits, Register FrameReg)
347 : MI(MI), MBB(MBB), MF(*MBB.getParent()),
348 ST(MF.getSubtarget<GCNSubtarget>()), MFI(MF.getFrameInfo()),
349 FuncInfo(MF.getInfo<SIMachineFunctionInfo>()), TII(TII), TRI(TRI),
350 SuperReg(Reg), SI(SI), LiveUnits(LiveUnits), DL(DL),
351 FrameReg(FrameReg) {
352 const TargetRegisterClass *RC = TRI.getPhysRegBaseClass(SuperReg);
353 SplitParts = TRI.getRegSplitParts(RC, EltSize);
354 NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
355
356 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
357 }
358
359 void save() {
360 switch (SI.getKind()) {
362 return saveToMemory(SI.getIndex());
364 return saveToVGPRLane(SI.getIndex());
366 return copyToScratchSGPR(SI.getReg());
367 }
368 }
369
370 void restore() {
371 switch (SI.getKind()) {
373 return restoreFromMemory(SI.getIndex());
375 return restoreFromVGPRLane(SI.getIndex());
377 return copyFromScratchSGPR(SI.getReg());
378 }
379 }
380};
381
382} // namespace llvm
383
384// Emit flat scratch setup code, assuming `MFI->hasFlatScratchInit()`
385void SIFrameLowering::emitEntryFunctionFlatScratchInit(
387 const DebugLoc &DL, Register ScratchWaveOffsetReg) const {
388 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
389 const SIInstrInfo *TII = ST.getInstrInfo();
390 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
391 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
392
393 // We don't need this if we only have spills since there is no user facing
394 // scratch.
395
396 // TODO: If we know we don't have flat instructions earlier, we can omit
397 // this from the input registers.
398 //
399 // TODO: We only need to know if we access scratch space through a flat
400 // pointer. Because we only detect if flat instructions are used at all,
401 // this will be used more often than necessary on VI.
402
403 Register FlatScrInitLo;
404 Register FlatScrInitHi;
405
406 if (ST.isAmdPalOS()) {
407 // Extract the scratch offset from the descriptor in the GIT
408 LiveRegUnits LiveUnits;
409 LiveUnits.init(*TRI);
410 LiveUnits.addLiveIns(MBB);
411
412 // Find unused reg to load flat scratch init into
413 MachineRegisterInfo &MRI = MF.getRegInfo();
414 Register FlatScrInit = AMDGPU::NoRegister;
415 ArrayRef<MCPhysReg> AllSGPR64s = TRI->getAllSGPR64(MF);
416 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 1) / 2;
417 AllSGPR64s = AllSGPR64s.slice(
418 std::min(static_cast<unsigned>(AllSGPR64s.size()), NumPreloaded));
419 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF);
420 for (MCPhysReg Reg : AllSGPR64s) {
421 if (LiveUnits.available(Reg) && !MRI.isReserved(Reg) &&
422 MRI.isAllocatable(Reg) && !TRI->isSubRegisterEq(Reg, GITPtrLoReg)) {
423 FlatScrInit = Reg;
424 break;
425 }
426 }
427 assert(FlatScrInit && "Failed to find free register for scratch init");
428
429 FlatScrInitLo = TRI->getSubReg(FlatScrInit, AMDGPU::sub0);
430 FlatScrInitHi = TRI->getSubReg(FlatScrInit, AMDGPU::sub1);
431
432 buildGitPtr(MBB, I, DL, TII, FlatScrInit);
433
434 // We now have the GIT ptr - now get the scratch descriptor from the entry
435 // at offset 0 (or offset 16 for a compute shader).
436 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
437 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
438 auto *MMO = MF.getMachineMemOperand(
439 PtrInfo,
442 8, Align(4));
443 unsigned Offset =
445 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
446 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset);
447 BuildMI(MBB, I, DL, LoadDwordX2, FlatScrInit)
448 .addReg(FlatScrInit)
449 .addImm(EncodedOffset) // offset
450 .addImm(0) // cpol
451 .addMemOperand(MMO);
452
453 // Mask the offset in [47:0] of the descriptor
454 const MCInstrDesc &SAndB32 = TII->get(AMDGPU::S_AND_B32);
455 auto And = BuildMI(MBB, I, DL, SAndB32, FlatScrInitHi)
456 .addReg(FlatScrInitHi)
457 .addImm(0xffff);
458 And->getOperand(3).setIsDead(); // Mark SCC as dead.
459 } else {
460 Register FlatScratchInitReg =
462 assert(FlatScratchInitReg);
463
464 MachineRegisterInfo &MRI = MF.getRegInfo();
465 MRI.addLiveIn(FlatScratchInitReg);
466 MBB.addLiveIn(FlatScratchInitReg);
467
468 FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0);
469 FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1);
470 }
471
472 // Do a 64-bit pointer add.
473 if (ST.flatScratchIsPointer()) {
474 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
475 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo)
476 .addReg(FlatScrInitLo)
477 .addReg(ScratchWaveOffsetReg);
478 auto Addc = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32),
479 FlatScrInitHi)
480 .addReg(FlatScrInitHi)
481 .addImm(0);
482 Addc->getOperand(3).setIsDead(); // Mark SCC as dead.
483
484 using namespace AMDGPU::Hwreg;
485 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32))
486 .addReg(FlatScrInitLo)
487 .addImm(int16_t(HwregEncoding::encode(ID_FLAT_SCR_LO, 0, 32)));
488 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32))
489 .addReg(FlatScrInitHi)
490 .addImm(int16_t(HwregEncoding::encode(ID_FLAT_SCR_HI, 0, 32)));
491 return;
492 }
493
494 // For GFX9.
495 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO)
496 .addReg(FlatScrInitLo)
497 .addReg(ScratchWaveOffsetReg);
498 auto Addc = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32),
499 AMDGPU::FLAT_SCR_HI)
500 .addReg(FlatScrInitHi)
501 .addImm(0);
502 Addc->getOperand(3).setIsDead(); // Mark SCC as dead.
503
504 return;
505 }
506
507 assert(ST.getGeneration() < AMDGPUSubtarget::GFX9);
508
509 // Copy the size in bytes.
510 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO)
511 .addReg(FlatScrInitHi, RegState::Kill);
512
513 // Add wave offset in bytes to private base offset.
514 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init.
515 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), FlatScrInitLo)
516 .addReg(FlatScrInitLo)
517 .addReg(ScratchWaveOffsetReg);
518
519 // Convert offset to 256-byte units.
520 auto LShr = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32),
521 AMDGPU::FLAT_SCR_HI)
522 .addReg(FlatScrInitLo, RegState::Kill)
523 .addImm(8);
524 LShr->getOperand(3).setIsDead(); // Mark SCC as dead.
525}
526
527// Note SGPRSpill stack IDs should only be used for SGPR spilling to VGPRs, not
528// memory. They should have been removed by now.
530 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd();
531 I != E; ++I) {
532 if (!MFI.isDeadObjectIndex(I))
533 return false;
534 }
535
536 return true;
537}
538
539// Shift down registers reserved for the scratch RSRC.
540Register SIFrameLowering::getEntryFunctionReservedScratchRsrcReg(
541 MachineFunction &MF) const {
542
543 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
544 const SIInstrInfo *TII = ST.getInstrInfo();
545 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
546 MachineRegisterInfo &MRI = MF.getRegInfo();
547 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
548
549 assert(MFI->isEntryFunction());
550
551 Register ScratchRsrcReg = MFI->getScratchRSrcReg();
552
553 if (!ScratchRsrcReg || (!MRI.isPhysRegUsed(ScratchRsrcReg) &&
555 return Register();
556
557 if (ST.hasSGPRInitBug() ||
558 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF))
559 return ScratchRsrcReg;
560
561 // We reserved the last registers for this. Shift it down to the end of those
562 // which were actually used.
563 //
564 // FIXME: It might be safer to use a pseudoregister before replacement.
565
566 // FIXME: We should be able to eliminate unused input registers. We only
567 // cannot do this for the resources required for scratch access. For now we
568 // skip over user SGPRs and may leave unused holes.
569
570 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4;
571 ArrayRef<MCPhysReg> AllSGPR128s = TRI->getAllSGPR128(MF);
572 AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded));
573
574 // Skip the last N reserved elements because they should have already been
575 // reserved for VCC etc.
576 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF);
577 for (MCPhysReg Reg : AllSGPR128s) {
578 // Pick the first unallocated one. Make sure we don't clobber the other
579 // reserved input we needed. Also for PAL, make sure we don't clobber
580 // the GIT pointer passed in SGPR0 or SGPR8.
581 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) &&
582 (!GITPtrLoReg || !TRI->isSubRegisterEq(Reg, GITPtrLoReg))) {
583 MRI.replaceRegWith(ScratchRsrcReg, Reg);
585 MRI.reserveReg(Reg, TRI);
586 return Reg;
587 }
588 }
589
590 return ScratchRsrcReg;
591}
592
593static unsigned getScratchScaleFactor(const GCNSubtarget &ST) {
594 return ST.enableFlatScratch() ? 1 : ST.getWavefrontSize();
595}
596
598 MachineBasicBlock &MBB) const {
599 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
600
601 // FIXME: If we only have SGPR spills, we won't actually be using scratch
602 // memory since these spill to VGPRs. We should be cleaning up these unused
603 // SGPR spill frame indices somewhere.
604
605 // FIXME: We still have implicit uses on SGPR spill instructions in case they
606 // need to spill to vector memory. It's likely that will not happen, but at
607 // this point it appears we need the setup. This part of the prolog should be
608 // emitted after frame indices are eliminated.
609
610 // FIXME: Remove all of the isPhysRegUsed checks
611
613 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
614 const SIInstrInfo *TII = ST.getInstrInfo();
615 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
617 const Function &F = MF.getFunction();
618 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
619
620 assert(MFI->isEntryFunction());
621
622 Register PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg(
624
625 // We need to do the replacement of the private segment buffer register even
626 // if there are no stack objects. There could be stores to undef or a
627 // constant without an associated object.
628 //
629 // This will return `Register()` in cases where there are no actual
630 // uses of the SRSRC.
631 Register ScratchRsrcReg;
632 if (!ST.enableFlatScratch())
633 ScratchRsrcReg = getEntryFunctionReservedScratchRsrcReg(MF);
634
635 // Make the selected register live throughout the function.
636 if (ScratchRsrcReg) {
637 for (MachineBasicBlock &OtherBB : MF) {
638 if (&OtherBB != &MBB) {
639 OtherBB.addLiveIn(ScratchRsrcReg);
640 }
641 }
642 }
643
644 // Now that we have fixed the reserved SRSRC we need to locate the
645 // (potentially) preloaded SRSRC.
646 Register PreloadedScratchRsrcReg;
647 if (ST.isAmdHsaOrMesa(F)) {
648 PreloadedScratchRsrcReg =
650 if (ScratchRsrcReg && PreloadedScratchRsrcReg) {
651 // We added live-ins during argument lowering, but since they were not
652 // used they were deleted. We're adding the uses now, so add them back.
653 MRI.addLiveIn(PreloadedScratchRsrcReg);
654 MBB.addLiveIn(PreloadedScratchRsrcReg);
655 }
656 }
657
658 // Debug location must be unknown since the first debug location is used to
659 // determine the end of the prologue.
660 DebugLoc DL;
662
663 // We found the SRSRC first because it needs four registers and has an
664 // alignment requirement. If the SRSRC that we found is clobbering with
665 // the scratch wave offset, which may be in a fixed SGPR or a free SGPR
666 // chosen by SITargetLowering::allocateSystemSGPRs, COPY the scratch
667 // wave offset to a free SGPR.
668 Register ScratchWaveOffsetReg;
669 if (PreloadedScratchWaveOffsetReg &&
670 TRI->isSubRegisterEq(ScratchRsrcReg, PreloadedScratchWaveOffsetReg)) {
671 ArrayRef<MCPhysReg> AllSGPRs = TRI->getAllSGPR32(MF);
672 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs();
673 AllSGPRs = AllSGPRs.slice(
674 std::min(static_cast<unsigned>(AllSGPRs.size()), NumPreloaded));
675 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF);
676 for (MCPhysReg Reg : AllSGPRs) {
677 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) &&
678 !TRI->isSubRegisterEq(ScratchRsrcReg, Reg) && GITPtrLoReg != Reg) {
679 ScratchWaveOffsetReg = Reg;
680 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg)
681 .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill);
682 break;
683 }
684 }
685
686 // FIXME: We can spill incoming arguments and restore at the end of the
687 // prolog.
688 if (!ScratchWaveOffsetReg)
690 "could not find temporary scratch offset register in prolog");
691 } else {
692 ScratchWaveOffsetReg = PreloadedScratchWaveOffsetReg;
693 }
694 assert(ScratchWaveOffsetReg || !PreloadedScratchWaveOffsetReg);
695
696 unsigned Offset = FrameInfo.getStackSize() * getScratchScaleFactor(ST);
697 if (!mayReserveScratchForCWSR(MF)) {
698 if (hasFP(MF)) {
700 assert(FPReg != AMDGPU::FP_REG);
701 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), FPReg).addImm(0);
702 }
703
706 assert(SPReg != AMDGPU::SP_REG);
707 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), SPReg).addImm(Offset);
708 }
709 } else {
710 // We need to check if we're on a compute queue - if we are, then the CWSR
711 // trap handler may need to store some VGPRs on the stack. The first VGPR
712 // block is saved separately, so we only need to allocate space for any
713 // additional VGPR blocks used. For now, we will make sure there's enough
714 // room for the theoretical maximum number of VGPRs that can be allocated.
715 // FIXME: Figure out if the shader uses fewer VGPRs in practice.
716 assert(hasFP(MF));
718 assert(FPReg != AMDGPU::FP_REG);
719 unsigned VGPRSize = llvm::alignTo(
720 (ST.getAddressableNumVGPRs(MFI->getDynamicVGPRBlockSize()) -
722 MFI->getDynamicVGPRBlockSize())) *
723 4,
724 FrameInfo.getMaxAlign());
726
727 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), FPReg)
730 // The MicroEngine ID is 0 for the graphics queue, and 1 or 2 for compute
731 // (3 is unused, so we ignore it). Unfortunately, S_GETREG doesn't set
732 // SCC, so we need to check for 0 manually.
733 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32)).addImm(0).addReg(FPReg);
734 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_CMOVK_I32), FPReg).addImm(VGPRSize);
737 assert(SPReg != AMDGPU::SP_REG);
738
739 // If at least one of the constants can be inlined, then we can use
740 // s_cselect. Otherwise, use a mov and cmovk.
741 if (AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm()) ||
743 ST.hasInv2PiInlineImm())) {
744 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_CSELECT_B32), SPReg)
745 .addImm(Offset + VGPRSize)
746 .addImm(Offset);
747 } else {
748 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), SPReg).addImm(Offset);
749 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_CMOVK_I32), SPReg)
750 .addImm(Offset + VGPRSize);
751 }
752 }
753 }
754
755 bool NeedsFlatScratchInit =
757 (MRI.isPhysRegUsed(AMDGPU::FLAT_SCR) || FrameInfo.hasCalls() ||
758 (!allStackObjectsAreDead(FrameInfo) && ST.enableFlatScratch()));
759
760 if ((NeedsFlatScratchInit || ScratchRsrcReg) &&
761 PreloadedScratchWaveOffsetReg && !ST.flatScratchIsArchitected()) {
762 MRI.addLiveIn(PreloadedScratchWaveOffsetReg);
763 MBB.addLiveIn(PreloadedScratchWaveOffsetReg);
764 }
765
766 if (NeedsFlatScratchInit) {
767 emitEntryFunctionFlatScratchInit(MF, MBB, I, DL, ScratchWaveOffsetReg);
768 }
769
770 if (ScratchRsrcReg) {
771 emitEntryFunctionScratchRsrcRegSetup(MF, MBB, I, DL,
772 PreloadedScratchRsrcReg,
773 ScratchRsrcReg, ScratchWaveOffsetReg);
774 }
775}
776
777// Emit scratch RSRC setup code, assuming `ScratchRsrcReg != AMDGPU::NoReg`
778void SIFrameLowering::emitEntryFunctionScratchRsrcRegSetup(
780 const DebugLoc &DL, Register PreloadedScratchRsrcReg,
781 Register ScratchRsrcReg, Register ScratchWaveOffsetReg) const {
782
783 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
784 const SIInstrInfo *TII = ST.getInstrInfo();
785 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
787 const Function &Fn = MF.getFunction();
788
789 if (ST.isAmdPalOS()) {
790 // The pointer to the GIT is formed from the offset passed in and either
791 // the amdgpu-git-ptr-high function attribute or the top part of the PC
792 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
793 Register Rsrc03 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
794
795 buildGitPtr(MBB, I, DL, TII, Rsrc01);
796
797 // We now have the GIT ptr - now get the scratch descriptor from the entry
798 // at offset 0 (or offset 16 for a compute shader).
800 const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM);
801 auto *MMO = MF.getMachineMemOperand(
802 PtrInfo,
805 16, Align(4));
806 unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0;
807 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
808 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset);
809 BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg)
810 .addReg(Rsrc01)
811 .addImm(EncodedOffset) // offset
812 .addImm(0) // cpol
813 .addReg(ScratchRsrcReg, RegState::ImplicitDefine)
814 .addMemOperand(MMO);
815
816 // The driver will always set the SRD for wave 64 (bits 118:117 of
817 // descriptor / bits 22:21 of third sub-reg will be 0b11)
818 // If the shader is actually wave32 we have to modify the const_index_stride
819 // field of the descriptor 3rd sub-reg (bits 22:21) to 0b10 (stride=32). The
820 // reason the driver does this is that there can be cases where it presents
821 // 2 shaders with different wave size (e.g. VsFs).
822 // TODO: convert to using SCRATCH instructions or multiple SRD buffers
823 if (ST.isWave32()) {
824 const MCInstrDesc &SBitsetB32 = TII->get(AMDGPU::S_BITSET0_B32);
825 BuildMI(MBB, I, DL, SBitsetB32, Rsrc03)
826 .addImm(21)
827 .addReg(Rsrc03);
828 }
829 } else if (ST.isMesaGfxShader(Fn) || !PreloadedScratchRsrcReg) {
830 assert(!ST.isAmdHsaOrMesa(Fn));
831 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
832
833 Register Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2);
834 Register Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
835
836 // Use relocations to get the pointer, and setup the other bits manually.
837 uint64_t Rsrc23 = TII->getScratchRsrcWords23();
838
840 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
841
843 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64);
844
845 BuildMI(MBB, I, DL, Mov64, Rsrc01)
847 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
848 } else {
849 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
850
851 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
852 auto *MMO = MF.getMachineMemOperand(
853 PtrInfo,
856 8, Align(4));
857 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01)
859 .addImm(0) // offset
860 .addImm(0) // cpol
861 .addMemOperand(MMO)
862 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
863
866 }
867 } else {
868 Register Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
869 Register Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
870
871 BuildMI(MBB, I, DL, SMovB32, Rsrc0)
872 .addExternalSymbol("SCRATCH_RSRC_DWORD0")
873 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
874
875 BuildMI(MBB, I, DL, SMovB32, Rsrc1)
876 .addExternalSymbol("SCRATCH_RSRC_DWORD1")
877 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
878 }
879
880 BuildMI(MBB, I, DL, SMovB32, Rsrc2)
881 .addImm(Lo_32(Rsrc23))
882 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
883
884 BuildMI(MBB, I, DL, SMovB32, Rsrc3)
885 .addImm(Hi_32(Rsrc23))
886 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
887 } else if (ST.isAmdHsaOrMesa(Fn)) {
888 assert(PreloadedScratchRsrcReg);
889
890 if (ScratchRsrcReg != PreloadedScratchRsrcReg) {
891 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
892 .addReg(PreloadedScratchRsrcReg, RegState::Kill);
893 }
894 }
895
896 // Add the scratch wave offset into the scratch RSRC.
897 //
898 // We only want to update the first 48 bits, which is the base address
899 // pointer, without touching the adjacent 16 bits of flags. We know this add
900 // cannot carry-out from bit 47, otherwise the scratch allocation would be
901 // impossible to fit in the 48-bit global address space.
902 //
903 // TODO: Evaluate if it is better to just construct an SRD using the flat
904 // scratch init and some constants rather than update the one we are passed.
905 Register ScratchRsrcSub0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
906 Register ScratchRsrcSub1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
907
908 // We cannot Kill ScratchWaveOffsetReg here because we allow it to be used in
909 // the kernel body via inreg arguments.
910 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), ScratchRsrcSub0)
911 .addReg(ScratchRsrcSub0)
912 .addReg(ScratchWaveOffsetReg)
913 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
914 auto Addc = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), ScratchRsrcSub1)
915 .addReg(ScratchRsrcSub1)
916 .addImm(0)
917 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
918 Addc->getOperand(3).setIsDead(); // Mark SCC as dead.
919}
920
922 switch (ID) {
926 return true;
930 return false;
931 }
932 llvm_unreachable("Invalid TargetStackID::Value");
933}
934
935// Activate only the inactive lanes when \p EnableInactiveLanes is true.
936// Otherwise, activate all lanes. It returns the saved exec.
938 MachineFunction &MF,
941 const DebugLoc &DL, bool IsProlog,
942 bool EnableInactiveLanes) {
943 Register ScratchExecCopy;
945 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
946 const SIInstrInfo *TII = ST.getInstrInfo();
947 const SIRegisterInfo &TRI = TII->getRegisterInfo();
949
950 initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MBBI, IsProlog);
951
952 if (FuncInfo->isWholeWaveFunction()) {
953 // Whole wave functions already have a copy of the original EXEC mask that
954 // we can use.
955 assert(IsProlog && "Epilog should look at return, not setup");
956 ScratchExecCopy =
957 TII->getWholeWaveFunctionSetup(MF)->getOperand(0).getReg();
958 assert(ScratchExecCopy && "Couldn't find copy of EXEC");
959 } else {
960 ScratchExecCopy = findScratchNonCalleeSaveRegister(
961 MRI, LiveUnits, *TRI.getWaveMaskRegClass());
962 }
963
964 if (!ScratchExecCopy)
965 report_fatal_error("failed to find free scratch register");
966
967 LiveUnits.addReg(ScratchExecCopy);
968
969 const unsigned SaveExecOpc =
970 ST.isWave32() ? (EnableInactiveLanes ? AMDGPU::S_XOR_SAVEEXEC_B32
971 : AMDGPU::S_OR_SAVEEXEC_B32)
972 : (EnableInactiveLanes ? AMDGPU::S_XOR_SAVEEXEC_B64
973 : AMDGPU::S_OR_SAVEEXEC_B64);
974 auto SaveExec =
975 BuildMI(MBB, MBBI, DL, TII->get(SaveExecOpc), ScratchExecCopy).addImm(-1);
976 SaveExec->getOperand(3).setIsDead(); // Mark SCC as dead.
977
978 return ScratchExecCopy;
979}
980
984 Register FrameReg, Register FramePtrRegScratchCopy) const {
986 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
987 const SIInstrInfo *TII = ST.getInstrInfo();
988 const SIRegisterInfo &TRI = TII->getRegisterInfo();
991
992 // Spill Whole-Wave Mode VGPRs. Save only the inactive lanes of the scratch
993 // registers. However, save all lanes of callee-saved VGPRs. Due to this, we
994 // might end up flipping the EXEC bits twice.
995 Register ScratchExecCopy;
996 SmallVector<std::pair<Register, int>, 2> WWMCalleeSavedRegs, WWMScratchRegs;
997 FuncInfo->splitWWMSpillRegisters(MF, WWMCalleeSavedRegs, WWMScratchRegs);
998 if (!WWMScratchRegs.empty())
999 ScratchExecCopy =
1000 buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
1001 /*IsProlog*/ true, /*EnableInactiveLanes*/ true);
1002
1003 auto StoreWWMRegisters =
1005 for (const auto &Reg : WWMRegs) {
1006 Register VGPR = Reg.first;
1007 int FI = Reg.second;
1008 buildPrologSpill(ST, TRI, *FuncInfo, LiveUnits, MF, MBB, MBBI, DL,
1009 VGPR, FI, FrameReg);
1010 }
1011 };
1012
1013 for (const Register Reg : make_first_range(WWMScratchRegs)) {
1014 if (!MRI.isReserved(Reg)) {
1015 MRI.addLiveIn(Reg);
1016 MBB.addLiveIn(Reg);
1017 }
1018 }
1019 StoreWWMRegisters(WWMScratchRegs);
1020
1021 auto EnableAllLanes = [&]() {
1022 BuildMI(MBB, MBBI, DL, TII->get(LMC.MovOpc), LMC.ExecReg).addImm(-1);
1023 };
1024
1025 if (!WWMCalleeSavedRegs.empty()) {
1026 if (ScratchExecCopy) {
1027 EnableAllLanes();
1028 } else {
1029 ScratchExecCopy = buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
1030 /*IsProlog*/ true,
1031 /*EnableInactiveLanes*/ false);
1032 }
1033 }
1034
1035 StoreWWMRegisters(WWMCalleeSavedRegs);
1036 if (FuncInfo->isWholeWaveFunction()) {
1037 // SI_WHOLE_WAVE_FUNC_SETUP has outlived its purpose, so we can remove
1038 // it now. If we have already saved some WWM CSR registers, then the EXEC is
1039 // already -1 and we don't need to do anything else. Otherwise, set EXEC to
1040 // -1 here.
1041 if (!ScratchExecCopy)
1042 buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL, /*IsProlog*/ true,
1043 /*EnableInactiveLanes*/ true);
1044 else if (WWMCalleeSavedRegs.empty())
1045 EnableAllLanes();
1046 TII->getWholeWaveFunctionSetup(MF)->eraseFromParent();
1047 } else if (ScratchExecCopy) {
1048 // FIXME: Split block and make terminator.
1049 BuildMI(MBB, MBBI, DL, TII->get(LMC.MovOpc), LMC.ExecReg)
1050 .addReg(ScratchExecCopy, RegState::Kill);
1051 LiveUnits.addReg(ScratchExecCopy);
1052 }
1053
1054 Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1055
1056 for (const auto &Spill : FuncInfo->getPrologEpilogSGPRSpills()) {
1057 // Special handle FP spill:
1058 // Skip if FP is saved to a scratch SGPR, the save has already been emitted.
1059 // Otherwise, FP has been moved to a temporary register and spill it
1060 // instead.
1061 Register Reg =
1062 Spill.first == FramePtrReg ? FramePtrRegScratchCopy : Spill.first;
1063 if (!Reg)
1064 continue;
1065
1066 PrologEpilogSGPRSpillBuilder SB(Reg, Spill.second, MBB, MBBI, DL, TII, TRI,
1067 LiveUnits, FrameReg);
1068 SB.save();
1069 }
1070
1071 // If a copy to scratch SGPR has been chosen for any of the SGPR spills, make
1072 // such scratch registers live throughout the function.
1073 SmallVector<Register, 1> ScratchSGPRs;
1074 FuncInfo->getAllScratchSGPRCopyDstRegs(ScratchSGPRs);
1075 if (!ScratchSGPRs.empty()) {
1076 for (MachineBasicBlock &MBB : MF) {
1077 for (MCPhysReg Reg : ScratchSGPRs)
1078 MBB.addLiveIn(Reg);
1079
1080 MBB.sortUniqueLiveIns();
1081 }
1082 if (!LiveUnits.empty()) {
1083 for (MCPhysReg Reg : ScratchSGPRs)
1084 LiveUnits.addReg(Reg);
1085 }
1086 }
1087}
1088
1092 Register FrameReg, Register FramePtrRegScratchCopy) const {
1093 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1094 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1095 const SIInstrInfo *TII = ST.getInstrInfo();
1096 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1098 Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1099
1100 for (const auto &Spill : FuncInfo->getPrologEpilogSGPRSpills()) {
1101 // Special handle FP restore:
1102 // Skip if FP needs to be restored from the scratch SGPR. Otherwise, restore
1103 // the FP value to a temporary register. The frame pointer should be
1104 // overwritten only at the end when all other spills are restored from
1105 // current frame.
1106 Register Reg =
1107 Spill.first == FramePtrReg ? FramePtrRegScratchCopy : Spill.first;
1108 if (!Reg)
1109 continue;
1110
1111 PrologEpilogSGPRSpillBuilder SB(Reg, Spill.second, MBB, MBBI, DL, TII, TRI,
1112 LiveUnits, FrameReg);
1113 SB.restore();
1114 }
1115
1116 // Restore Whole-Wave Mode VGPRs. Restore only the inactive lanes of the
1117 // scratch registers. However, restore all lanes of callee-saved VGPRs. Due to
1118 // this, we might end up flipping the EXEC bits twice.
1119 Register ScratchExecCopy;
1120 SmallVector<std::pair<Register, int>, 2> WWMCalleeSavedRegs, WWMScratchRegs;
1121 FuncInfo->splitWWMSpillRegisters(MF, WWMCalleeSavedRegs, WWMScratchRegs);
1122 auto RestoreWWMRegisters =
1124 for (const auto &Reg : WWMRegs) {
1125 Register VGPR = Reg.first;
1126 int FI = Reg.second;
1127 buildEpilogRestore(ST, TRI, *FuncInfo, LiveUnits, MF, MBB, MBBI, DL,
1128 VGPR, FI, FrameReg);
1129 }
1130 };
1131
1132 if (FuncInfo->isWholeWaveFunction()) {
1133 // For whole wave functions, the EXEC is already -1 at this point.
1134 // Therefore, we can restore the CSR WWM registers right away.
1135 RestoreWWMRegisters(WWMCalleeSavedRegs);
1136
1137 // The original EXEC is the first operand of the return instruction.
1138 MachineInstr &Return = MBB.instr_back();
1139 unsigned Opcode = Return.getOpcode();
1140 switch (Opcode) {
1141 case AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN:
1142 Opcode = AMDGPU::SI_RETURN;
1143 break;
1144 case AMDGPU::SI_TCRETURN_GFX_WholeWave:
1145 Opcode = AMDGPU::SI_TCRETURN_GFX;
1146 break;
1147 default:
1148 llvm_unreachable("Unexpected return inst");
1149 }
1150 Register OrigExec = Return.getOperand(0).getReg();
1151
1152 if (!WWMScratchRegs.empty()) {
1153 BuildMI(MBB, MBBI, DL, TII->get(LMC.XorOpc), LMC.ExecReg)
1154 .addReg(OrigExec)
1155 .addImm(-1);
1156 RestoreWWMRegisters(WWMScratchRegs);
1157 }
1158
1159 // Restore original EXEC.
1160 BuildMI(MBB, MBBI, DL, TII->get(LMC.MovOpc), LMC.ExecReg).addReg(OrigExec);
1161
1162 // Drop the first operand and update the opcode.
1163 Return.removeOperand(0);
1164 Return.setDesc(TII->get(Opcode));
1165
1166 return;
1167 }
1168
1169 if (!WWMScratchRegs.empty()) {
1170 ScratchExecCopy =
1171 buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
1172 /*IsProlog=*/false, /*EnableInactiveLanes=*/true);
1173 }
1174 RestoreWWMRegisters(WWMScratchRegs);
1175 if (!WWMCalleeSavedRegs.empty()) {
1176 if (ScratchExecCopy) {
1177 BuildMI(MBB, MBBI, DL, TII->get(LMC.MovOpc), LMC.ExecReg).addImm(-1);
1178 } else {
1179 ScratchExecCopy = buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
1180 /*IsProlog*/ false,
1181 /*EnableInactiveLanes*/ false);
1182 }
1183 }
1184
1185 RestoreWWMRegisters(WWMCalleeSavedRegs);
1186 if (ScratchExecCopy) {
1187 // FIXME: Split block and make terminator.
1188 BuildMI(MBB, MBBI, DL, TII->get(LMC.MovOpc), LMC.ExecReg)
1189 .addReg(ScratchExecCopy, RegState::Kill);
1190 }
1191}
1192
1194 MachineBasicBlock &MBB) const {
1196 if (FuncInfo->isEntryFunction()) {
1198 return;
1199 }
1200
1201 MachineFrameInfo &MFI = MF.getFrameInfo();
1202 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1203 const SIInstrInfo *TII = ST.getInstrInfo();
1204 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1206
1207 Register StackPtrReg = FuncInfo->getStackPtrOffsetReg();
1208 Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1209 Register BasePtrReg =
1210 TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register();
1211 LiveRegUnits LiveUnits;
1212
1214 // DebugLoc must be unknown since the first instruction with DebugLoc is used
1215 // to determine the end of the prologue.
1216 DebugLoc DL;
1217
1218 if (FuncInfo->isChainFunction()) {
1219 // Functions with the amdgpu_cs_chain[_preserve] CC don't receive a SP, but
1220 // are free to set one up if they need it.
1221 bool UseSP = requiresStackPointerReference(MF);
1222 if (UseSP) {
1223 assert(StackPtrReg != AMDGPU::SP_REG);
1224
1225 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_MOV_B32), StackPtrReg)
1227 }
1228 }
1229
1230 bool HasFP = false;
1231 bool HasBP = false;
1232 uint32_t NumBytes = MFI.getStackSize();
1233 uint32_t RoundedSize = NumBytes;
1234
1235 if (TRI.hasStackRealignment(MF))
1236 HasFP = true;
1237
1238 Register FramePtrRegScratchCopy;
1239 if (!HasFP && !hasFP(MF)) {
1240 // Emit the CSR spill stores with SP base register.
1241 emitCSRSpillStores(MF, MBB, MBBI, DL, LiveUnits,
1242 FuncInfo->isChainFunction() ? Register() : StackPtrReg,
1243 FramePtrRegScratchCopy);
1244 } else {
1245 // CSR spill stores will use FP as base register.
1246 Register SGPRForFPSaveRestoreCopy =
1247 FuncInfo->getScratchSGPRCopyDstReg(FramePtrReg);
1248
1249 initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MBBI, /*IsProlog*/ true);
1250 if (SGPRForFPSaveRestoreCopy) {
1251 // Copy FP to the scratch register now and emit the CFI entry. It avoids
1252 // the extra FP copy needed in the other two cases when FP is spilled to
1253 // memory or to a VGPR lane.
1255 FramePtrReg,
1256 FuncInfo->getPrologEpilogSGPRSaveRestoreInfo(FramePtrReg), MBB, MBBI,
1257 DL, TII, TRI, LiveUnits, FramePtrReg);
1258 SB.save();
1259 LiveUnits.addReg(SGPRForFPSaveRestoreCopy);
1260 } else {
1261 // Copy FP into a new scratch register so that its previous value can be
1262 // spilled after setting up the new frame.
1263 FramePtrRegScratchCopy = findScratchNonCalleeSaveRegister(
1264 MRI, LiveUnits, AMDGPU::SReg_32_XM0_XEXECRegClass);
1265 if (!FramePtrRegScratchCopy)
1266 report_fatal_error("failed to find free scratch register");
1267
1268 LiveUnits.addReg(FramePtrRegScratchCopy);
1269 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrRegScratchCopy)
1270 .addReg(FramePtrReg);
1271 }
1272 }
1273
1274 if (HasFP) {
1275 const unsigned Alignment = MFI.getMaxAlign().value();
1276
1277 RoundedSize += Alignment;
1278 if (LiveUnits.empty()) {
1279 LiveUnits.init(TRI);
1280 LiveUnits.addLiveIns(MBB);
1281 }
1282
1283 // s_add_i32 s33, s32, NumBytes
1284 // s_and_b32 s33, s33, 0b111...0000
1285 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_I32), FramePtrReg)
1286 .addReg(StackPtrReg)
1287 .addImm((Alignment - 1) * getScratchScaleFactor(ST))
1289 auto And = BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_AND_B32), FramePtrReg)
1290 .addReg(FramePtrReg, RegState::Kill)
1291 .addImm(-Alignment * getScratchScaleFactor(ST))
1293 And->getOperand(3).setIsDead(); // Mark SCC as dead.
1294 FuncInfo->setIsStackRealigned(true);
1295 } else if ((HasFP = hasFP(MF))) {
1296 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg)
1297 .addReg(StackPtrReg)
1299 }
1300
1301 // If FP is used, emit the CSR spills with FP base register.
1302 if (HasFP) {
1303 emitCSRSpillStores(MF, MBB, MBBI, DL, LiveUnits, FramePtrReg,
1304 FramePtrRegScratchCopy);
1305 if (FramePtrRegScratchCopy)
1306 LiveUnits.removeReg(FramePtrRegScratchCopy);
1307 }
1308
1309 // If we need a base pointer, set it up here. It's whatever the value of
1310 // the stack pointer is at this point. Any variable size objects will be
1311 // allocated after this, so we can still use the base pointer to reference
1312 // the incoming arguments.
1313 if ((HasBP = TRI.hasBasePointer(MF))) {
1314 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), BasePtrReg)
1315 .addReg(StackPtrReg)
1317 }
1318
1319 if (HasFP && RoundedSize != 0) {
1320 auto Add = BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_I32), StackPtrReg)
1321 .addReg(StackPtrReg)
1322 .addImm(RoundedSize * getScratchScaleFactor(ST))
1324 Add->getOperand(3).setIsDead(); // Mark SCC as dead.
1325 }
1326
1327 bool FPSaved = FuncInfo->hasPrologEpilogSGPRSpillEntry(FramePtrReg);
1328 (void)FPSaved;
1329 assert((!HasFP || FPSaved) &&
1330 "Needed to save FP but didn't save it anywhere");
1331
1332 // If we allow spilling to AGPRs we may have saved FP but then spill
1333 // everything into AGPRs instead of the stack.
1334 assert((HasFP || !FPSaved || EnableSpillVGPRToAGPR) &&
1335 "Saved FP but didn't need it");
1336
1337 bool BPSaved = FuncInfo->hasPrologEpilogSGPRSpillEntry(BasePtrReg);
1338 (void)BPSaved;
1339 assert((!HasBP || BPSaved) &&
1340 "Needed to save BP but didn't save it anywhere");
1341
1342 assert((HasBP || !BPSaved) && "Saved BP but didn't need it");
1343}
1344
1346 MachineBasicBlock &MBB) const {
1347 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1348 if (FuncInfo->isEntryFunction())
1349 return;
1350
1351 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1352 const SIInstrInfo *TII = ST.getInstrInfo();
1353 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1355 LiveRegUnits LiveUnits;
1356 // Get the insert location for the epilogue. If there were no terminators in
1357 // the block, get the last instruction.
1359 DebugLoc DL;
1360 if (!MBB.empty()) {
1361 MBBI = MBB.getLastNonDebugInstr();
1362 if (MBBI != MBB.end())
1363 DL = MBBI->getDebugLoc();
1364
1365 MBBI = MBB.getFirstTerminator();
1366 }
1367
1368 const MachineFrameInfo &MFI = MF.getFrameInfo();
1369 uint32_t NumBytes = MFI.getStackSize();
1370 uint32_t RoundedSize = FuncInfo->isStackRealigned()
1371 ? NumBytes + MFI.getMaxAlign().value()
1372 : NumBytes;
1373 const Register StackPtrReg = FuncInfo->getStackPtrOffsetReg();
1374 Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1375 bool FPSaved = FuncInfo->hasPrologEpilogSGPRSpillEntry(FramePtrReg);
1376
1377 if (RoundedSize != 0) {
1378 if (TRI.hasBasePointer(MF)) {
1379 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), StackPtrReg)
1380 .addReg(TRI.getBaseRegister())
1382 } else if (hasFP(MF)) {
1383 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), StackPtrReg)
1384 .addReg(FramePtrReg)
1386 }
1387 }
1388
1389 Register FramePtrRegScratchCopy;
1390 Register SGPRForFPSaveRestoreCopy =
1391 FuncInfo->getScratchSGPRCopyDstReg(FramePtrReg);
1392 if (FPSaved) {
1393 // CSR spill restores should use FP as base register. If
1394 // SGPRForFPSaveRestoreCopy is not true, restore the previous value of FP
1395 // into a new scratch register and copy to FP later when other registers are
1396 // restored from the current stack frame.
1397 initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MBBI, /*IsProlog*/ false);
1398 if (SGPRForFPSaveRestoreCopy) {
1399 LiveUnits.addReg(SGPRForFPSaveRestoreCopy);
1400 } else {
1401 FramePtrRegScratchCopy = findScratchNonCalleeSaveRegister(
1402 MRI, LiveUnits, AMDGPU::SReg_32_XM0_XEXECRegClass);
1403 if (!FramePtrRegScratchCopy)
1404 report_fatal_error("failed to find free scratch register");
1405
1406 LiveUnits.addReg(FramePtrRegScratchCopy);
1407 }
1408
1409 emitCSRSpillRestores(MF, MBB, MBBI, DL, LiveUnits, FramePtrReg,
1410 FramePtrRegScratchCopy);
1411 }
1412
1413 if (FPSaved) {
1414 // Insert the copy to restore FP.
1415 Register SrcReg = SGPRForFPSaveRestoreCopy ? SGPRForFPSaveRestoreCopy
1416 : FramePtrRegScratchCopy;
1418 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg)
1419 .addReg(SrcReg);
1420 if (SGPRForFPSaveRestoreCopy)
1422 } else {
1423 // Insert the CSR spill restores with SP as the base register.
1424 emitCSRSpillRestores(MF, MBB, MBBI, DL, LiveUnits,
1425 FuncInfo->isChainFunction() ? Register() : StackPtrReg,
1426 FramePtrRegScratchCopy);
1427 }
1428}
1429
1430#ifndef NDEBUG
1432 const MachineFrameInfo &MFI = MF.getFrameInfo();
1433 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1434 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd();
1435 I != E; ++I) {
1436 if (!MFI.isDeadObjectIndex(I) &&
1439 return false;
1440 }
1441 }
1442
1443 return true;
1444}
1445#endif
1446
1448 int FI,
1449 Register &FrameReg) const {
1450 const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
1451
1452 FrameReg = RI->getFrameRegister(MF);
1454}
1455
1457 MachineFunction &MF,
1458 RegScavenger *RS) const {
1459 MachineFrameInfo &MFI = MF.getFrameInfo();
1460
1461 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1462 const SIInstrInfo *TII = ST.getInstrInfo();
1463 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1466
1467 const bool SpillVGPRToAGPR = ST.hasMAIInsts() && FuncInfo->hasSpilledVGPRs()
1469
1470 if (SpillVGPRToAGPR) {
1471 // To track the spill frame indices handled in this pass.
1472 BitVector SpillFIs(MFI.getObjectIndexEnd(), false);
1473 BitVector NonVGPRSpillFIs(MFI.getObjectIndexEnd(), false);
1474
1475 bool SeenDbgInstr = false;
1476
1477 for (MachineBasicBlock &MBB : MF) {
1479 int FrameIndex;
1480 if (MI.isDebugInstr())
1481 SeenDbgInstr = true;
1482
1483 if (TII->isVGPRSpill(MI)) {
1484 // Try to eliminate stack used by VGPR spills before frame
1485 // finalization.
1486 unsigned FIOp = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1487 AMDGPU::OpName::vaddr);
1488 int FI = MI.getOperand(FIOp).getIndex();
1489 Register VReg =
1490 TII->getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg();
1491 if (FuncInfo->allocateVGPRSpillToAGPR(MF, FI,
1492 TRI->isAGPR(MRI, VReg))) {
1493 assert(RS != nullptr);
1495 RS->backward(std::next(MI.getIterator()));
1496 TRI->eliminateFrameIndex(MI, 0, FIOp, RS);
1497 SpillFIs.set(FI);
1498 continue;
1499 }
1500 } else if (TII->isStoreToStackSlot(MI, FrameIndex) ||
1501 TII->isLoadFromStackSlot(MI, FrameIndex))
1502 if (!MFI.isFixedObjectIndex(FrameIndex))
1503 NonVGPRSpillFIs.set(FrameIndex);
1504 }
1505 }
1506
1507 // Stack slot coloring may assign different objects to the same stack slot.
1508 // If not, then the VGPR to AGPR spill slot is dead.
1509 for (unsigned FI : SpillFIs.set_bits())
1510 if (!NonVGPRSpillFIs.test(FI))
1511 FuncInfo->setVGPRToAGPRSpillDead(FI);
1512
1513 for (MachineBasicBlock &MBB : MF) {
1514 for (MCPhysReg Reg : FuncInfo->getVGPRSpillAGPRs())
1515 MBB.addLiveIn(Reg);
1516
1517 for (MCPhysReg Reg : FuncInfo->getAGPRSpillVGPRs())
1518 MBB.addLiveIn(Reg);
1519
1520 MBB.sortUniqueLiveIns();
1521
1522 if (!SpillFIs.empty() && SeenDbgInstr) {
1523 // FIXME: The dead frame indices are replaced with a null register from
1524 // the debug value instructions. We should instead, update it with the
1525 // correct register value. But not sure the register value alone is
1526 for (MachineInstr &MI : MBB) {
1527 if (MI.isDebugValue()) {
1528 uint32_t StackOperandIdx = MI.isDebugValueList() ? 2 : 0;
1529 if (MI.getOperand(StackOperandIdx).isFI() &&
1530 !MFI.isFixedObjectIndex(
1531 MI.getOperand(StackOperandIdx).getIndex()) &&
1532 SpillFIs[MI.getOperand(StackOperandIdx).getIndex()]) {
1533 MI.getOperand(StackOperandIdx)
1534 .ChangeToRegister(Register(), false /*isDef*/);
1535 }
1536 }
1537 }
1538 }
1539 }
1540 }
1541
1542 // At this point we've already allocated all spilled SGPRs to VGPRs if we
1543 // can. Any remaining SGPR spills will go to memory, so move them back to the
1544 // default stack.
1545 bool HaveSGPRToVMemSpill =
1546 FuncInfo->removeDeadFrameIndices(MFI, /*ResetSGPRSpillStackIDs*/ true);
1548 "SGPR spill should have been removed in SILowerSGPRSpills");
1549
1550 // FIXME: The other checks should be redundant with allStackObjectsAreDead,
1551 // but currently hasNonSpillStackObjects is set only from source
1552 // allocas. Stack temps produced from legalization are not counted currently.
1553 if (!allStackObjectsAreDead(MFI)) {
1554 assert(RS && "RegScavenger required if spilling");
1555
1556 // Add an emergency spill slot
1557 RS->addScavengingFrameIndex(FuncInfo->getScavengeFI(MFI, *TRI));
1558
1559 // If we are spilling SGPRs to memory with a large frame, we may need a
1560 // second VGPR emergency frame index.
1561 if (HaveSGPRToVMemSpill &&
1564 }
1565 }
1566}
1567
1569 MachineFunction &MF, RegScavenger *RS) const {
1570 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1571 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1574
1575 if (ST.hasMAIInsts() && !ST.hasGFX90AInsts()) {
1576 // On gfx908, we had initially reserved highest available VGPR for AGPR
1577 // copy. Now since we are done with RA, check if there exist an unused VGPR
1578 // which is lower than the eariler reserved VGPR before RA. If one exist,
1579 // use it for AGPR copy instead of one reserved before RA.
1580 Register VGPRForAGPRCopy = FuncInfo->getVGPRForAGPRCopy();
1581 Register UnusedLowVGPR =
1582 TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF);
1583 if (UnusedLowVGPR && (TRI->getHWRegIndex(UnusedLowVGPR) <
1584 TRI->getHWRegIndex(VGPRForAGPRCopy))) {
1585 // Reserve this newly identified VGPR (for AGPR copy)
1586 // reserved registers should already be frozen at this point
1587 // so we can avoid calling MRI.freezeReservedRegs and just use
1588 // MRI.reserveReg
1589 FuncInfo->setVGPRForAGPRCopy(UnusedLowVGPR);
1590 MRI.reserveReg(UnusedLowVGPR, TRI);
1591 }
1592 }
1593 // We initally reserved the highest available SGPR pair for long branches
1594 // now, after RA, we shift down to a lower unused one if one exists
1595 Register LongBranchReservedReg = FuncInfo->getLongBranchReservedReg();
1596 Register UnusedLowSGPR =
1597 TRI->findUnusedRegister(MRI, &AMDGPU::SGPR_64RegClass, MF);
1598 // If LongBranchReservedReg is null then we didn't find a long branch
1599 // and never reserved a register to begin with so there is nothing to
1600 // shift down. Then if UnusedLowSGPR is null, there isn't available lower
1601 // register to use so just keep the original one we set.
1602 if (LongBranchReservedReg && UnusedLowSGPR) {
1603 FuncInfo->setLongBranchReservedReg(UnusedLowSGPR);
1604 MRI.reserveReg(UnusedLowSGPR, TRI);
1605 }
1606}
1607
1608// The special SGPR spills like the one needed for FP, BP or any reserved
1609// registers delayed until frame lowering.
1611 MachineFunction &MF, BitVector &SavedVGPRs,
1612 bool NeedExecCopyReservedReg) const {
1613 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
1616 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1617 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1618 LiveRegUnits LiveUnits;
1619 LiveUnits.init(*TRI);
1620 // Initially mark callee saved registers as used so we will not choose them
1621 // while looking for scratch SGPRs.
1622 const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs();
1623 for (unsigned I = 0; CSRegs[I]; ++I)
1624 LiveUnits.addReg(CSRegs[I]);
1625
1626 const TargetRegisterClass &RC = *TRI->getWaveMaskRegClass();
1627
1628 Register ReservedRegForExecCopy = MFI->getSGPRForEXECCopy();
1629 if (NeedExecCopyReservedReg ||
1630 (ReservedRegForExecCopy &&
1631 MRI.isPhysRegUsed(ReservedRegForExecCopy, /*SkipRegMaskTest=*/true))) {
1632 MRI.reserveReg(ReservedRegForExecCopy, TRI);
1633 Register UnusedScratchReg = findUnusedRegister(MRI, LiveUnits, RC);
1634 if (UnusedScratchReg) {
1635 // If found any unused scratch SGPR, reserve the register itself for Exec
1636 // copy and there is no need for any spills in that case.
1637 MFI->setSGPRForEXECCopy(UnusedScratchReg);
1638 MRI.replaceRegWith(ReservedRegForExecCopy, UnusedScratchReg);
1639 LiveUnits.addReg(UnusedScratchReg);
1640 } else {
1641 // Needs spill.
1642 assert(!MFI->hasPrologEpilogSGPRSpillEntry(ReservedRegForExecCopy) &&
1643 "Re-reserving spill slot for EXEC copy register");
1644 getVGPRSpillLaneOrTempRegister(MF, LiveUnits, ReservedRegForExecCopy, RC,
1645 /*IncludeScratchCopy=*/false);
1646 }
1647 } else if (ReservedRegForExecCopy) {
1648 // Reset it at this point. There are no whole-wave copies and spills
1649 // encountered.
1650 MFI->setSGPRForEXECCopy(AMDGPU::NoRegister);
1651 }
1652
1653 // hasFP only knows about stack objects that already exist. We're now
1654 // determining the stack slots that will be created, so we have to predict
1655 // them. Stack objects force FP usage with calls.
1656 //
1657 // Note a new VGPR CSR may be introduced if one is used for the spill, but we
1658 // don't want to report it here.
1659 //
1660 // FIXME: Is this really hasReservedCallFrame?
1661 const bool WillHaveFP =
1662 FrameInfo.hasCalls() &&
1663 (SavedVGPRs.any() || !allStackObjectsAreDead(FrameInfo));
1664
1665 if (WillHaveFP || hasFP(MF)) {
1666 Register FramePtrReg = MFI->getFrameOffsetReg();
1667 assert(!MFI->hasPrologEpilogSGPRSpillEntry(FramePtrReg) &&
1668 "Re-reserving spill slot for FP");
1669 getVGPRSpillLaneOrTempRegister(MF, LiveUnits, FramePtrReg);
1670 }
1671
1672 if (TRI->hasBasePointer(MF)) {
1673 Register BasePtrReg = TRI->getBaseRegister();
1674 assert(!MFI->hasPrologEpilogSGPRSpillEntry(BasePtrReg) &&
1675 "Re-reserving spill slot for BP");
1676 getVGPRSpillLaneOrTempRegister(MF, LiveUnits, BasePtrReg);
1677 }
1678}
1679
1680// Only report VGPRs to generic code.
1682 BitVector &SavedVGPRs,
1683 RegScavenger *RS) const {
1685
1686 // If this is a function with the amdgpu_cs_chain[_preserve] calling
1687 // convention and it doesn't contain any calls to llvm.amdgcn.cs.chain, then
1688 // we don't need to save and restore anything.
1689 if (MFI->isChainFunction() && !MF.getFrameInfo().hasTailCall())
1690 return;
1691
1693
1694 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1695 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1696 const SIInstrInfo *TII = ST.getInstrInfo();
1697 bool NeedExecCopyReservedReg = false;
1698
1699 MachineInstr *ReturnMI = nullptr;
1700 for (MachineBasicBlock &MBB : MF) {
1701 for (MachineInstr &MI : MBB) {
1702 // TODO: Walking through all MBBs here would be a bad heuristic. Better
1703 // handle them elsewhere.
1704 if (TII->isWWMRegSpillOpcode(MI.getOpcode()))
1705 NeedExecCopyReservedReg = true;
1706 else if (MI.getOpcode() == AMDGPU::SI_RETURN ||
1707 MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG ||
1708 MI.getOpcode() == AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN ||
1709 (MFI->isChainFunction() &&
1710 TII->isChainCallOpcode(MI.getOpcode()))) {
1711 // We expect all return to be the same size.
1712 assert(!ReturnMI ||
1713 (count_if(MI.operands(), [](auto Op) { return Op.isReg(); }) ==
1714 count_if(ReturnMI->operands(), [](auto Op) { return Op.isReg(); })));
1715 ReturnMI = &MI;
1716 }
1717 }
1718 }
1719
1720 SmallVector<Register> SortedWWMVGPRs;
1721 for (Register Reg : MFI->getWWMReservedRegs()) {
1722 // The shift-back is needed only for the VGPRs used for SGPR spills and they
1723 // are of 32-bit size. SIPreAllocateWWMRegs pass can add tuples into WWM
1724 // reserved registers.
1725 const TargetRegisterClass *RC = TRI->getPhysRegBaseClass(Reg);
1726 if (TRI->getRegSizeInBits(*RC) != 32)
1727 continue;
1728 SortedWWMVGPRs.push_back(Reg);
1729 }
1730
1731 sort(SortedWWMVGPRs, std::greater<Register>());
1732 MFI->shiftWwmVGPRsToLowestRange(MF, SortedWWMVGPRs, SavedVGPRs);
1733
1734 if (MFI->isEntryFunction())
1735 return;
1736
1737 if (MFI->isWholeWaveFunction()) {
1738 // In practice, all the VGPRs are WWM registers, and we will need to save at
1739 // least their inactive lanes. Add them to WWMReservedRegs.
1740 assert(!NeedExecCopyReservedReg &&
1741 "Whole wave functions can use the reg mapped for their i1 argument");
1742
1743 // FIXME: Be more efficient!
1744 unsigned NumArchVGPRs = ST.has1024AddressableVGPRs() ? 1024 : 256;
1745 for (MCRegister Reg :
1746 AMDGPU::VGPR_32RegClass.getRegisters().take_front(NumArchVGPRs))
1747 if (MF.getRegInfo().isPhysRegModified(Reg)) {
1748 MFI->reserveWWMRegister(Reg);
1749 MF.begin()->addLiveIn(Reg);
1750 }
1751 MF.begin()->sortUniqueLiveIns();
1752 }
1753
1754 // Remove any VGPRs used in the return value because these do not need to be saved.
1755 // This prevents CSR restore from clobbering return VGPRs.
1756 if (ReturnMI) {
1757 for (auto &Op : ReturnMI->operands()) {
1758 if (Op.isReg())
1759 SavedVGPRs.reset(Op.getReg());
1760 }
1761 }
1762
1763 // Create the stack objects for WWM registers now.
1764 for (Register Reg : MFI->getWWMReservedRegs()) {
1765 const TargetRegisterClass *RC = TRI->getPhysRegBaseClass(Reg);
1766 MFI->allocateWWMSpill(MF, Reg, TRI->getSpillSize(*RC),
1767 TRI->getSpillAlign(*RC));
1768 }
1769
1770 // Ignore the SGPRs the default implementation found.
1771 SavedVGPRs.clearBitsNotInMask(TRI->getAllVectorRegMask());
1772
1773 // Do not save AGPRs prior to GFX90A because there was no easy way to do so.
1774 // In gfx908 there was do AGPR loads and stores and thus spilling also
1775 // require a temporary VGPR.
1776 if (!ST.hasGFX90AInsts())
1777 SavedVGPRs.clearBitsInMask(TRI->getAllAGPRRegMask());
1778
1779 determinePrologEpilogSGPRSaves(MF, SavedVGPRs, NeedExecCopyReservedReg);
1780
1781 // The Whole-Wave VGPRs need to be specially inserted in the prolog, so don't
1782 // allow the default insertion to handle them.
1783 for (auto &Reg : MFI->getWWMSpills())
1784 SavedVGPRs.reset(Reg.first);
1785}
1786
1788 BitVector &SavedRegs,
1789 RegScavenger *RS) const {
1792 if (MFI->isEntryFunction())
1793 return;
1794
1795 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1796 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1797
1798 // The SP is specifically managed and we don't want extra spills of it.
1799 SavedRegs.reset(MFI->getStackPtrOffsetReg());
1800
1801 const BitVector AllSavedRegs = SavedRegs;
1802 SavedRegs.clearBitsInMask(TRI->getAllVectorRegMask());
1803
1804 // We have to anticipate introducing CSR VGPR spills or spill of caller
1805 // save VGPR reserved for SGPR spills as we now always create stack entry
1806 // for it, if we don't have any stack objects already, since we require a FP
1807 // if there is a call and stack. We will allocate a VGPR for SGPR spills if
1808 // there are any SGPR spills. Whether they are CSR spills or otherwise.
1809 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
1810 const bool WillHaveFP =
1811 FrameInfo.hasCalls() && (AllSavedRegs.any() || MFI->hasSpilledSGPRs());
1812
1813 // FP will be specially managed like SP.
1814 if (WillHaveFP || hasFP(MF))
1815 SavedRegs.reset(MFI->getFrameOffsetReg());
1816
1817 // Return address use with return instruction is hidden through the SI_RETURN
1818 // pseudo. Given that and since the IPRA computes actual register usage and
1819 // does not use CSR list, the clobbering of return address by function calls
1820 // (D117243) or otherwise (D120922) is ignored/not seen by the IPRA's register
1821 // usage collection. This will ensure save/restore of return address happens
1822 // in those scenarios.
1823 const MachineRegisterInfo &MRI = MF.getRegInfo();
1824 Register RetAddrReg = TRI->getReturnAddressReg(MF);
1825 if (!MFI->isEntryFunction() &&
1826 (FrameInfo.hasCalls() || MRI.isPhysRegModified(RetAddrReg))) {
1827 SavedRegs.set(TRI->getSubReg(RetAddrReg, AMDGPU::sub0));
1828 SavedRegs.set(TRI->getSubReg(RetAddrReg, AMDGPU::sub1));
1829 }
1830}
1831
1833 const GCNSubtarget &ST,
1834 std::vector<CalleeSavedInfo> &CSI,
1835 unsigned &MinCSFrameIndex,
1836 unsigned &MaxCSFrameIndex) {
1838 MachineFrameInfo &MFI = MF.getFrameInfo();
1839 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1840
1841 assert(
1842 llvm::is_sorted(CSI,
1843 [](const CalleeSavedInfo &A, const CalleeSavedInfo &B) {
1844 return A.getReg() < B.getReg();
1845 }) &&
1846 "Callee saved registers not sorted");
1847
1848 auto CanUseBlockOps = [&](const CalleeSavedInfo &CSI) {
1849 return !CSI.isSpilledToReg() &&
1850 TRI->getPhysRegBaseClass(CSI.getReg()) == &AMDGPU::VGPR_32RegClass &&
1851 !FuncInfo->isWWMReservedRegister(CSI.getReg());
1852 };
1853
1854 auto CSEnd = CSI.end();
1855 for (auto CSIt = CSI.begin(); CSIt != CSEnd; ++CSIt) {
1856 Register Reg = CSIt->getReg();
1857 if (!CanUseBlockOps(*CSIt))
1858 continue;
1859
1860 // Find all the regs that will fit in a 32-bit mask starting at the current
1861 // reg and build said mask. It should have 1 for every register that's
1862 // included, with the current register as the least significant bit.
1863 uint32_t Mask = 1;
1864 CSEnd = std::remove_if(
1865 CSIt + 1, CSEnd, [&](const CalleeSavedInfo &CSI) -> bool {
1866 if (CanUseBlockOps(CSI) && CSI.getReg() < Reg + 32) {
1867 Mask |= 1 << (CSI.getReg() - Reg);
1868 return true;
1869 } else {
1870 return false;
1871 }
1872 });
1873
1874 const TargetRegisterClass *BlockRegClass = TRI->getRegClassForBlockOp(MF);
1875 Register RegBlock =
1876 TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, BlockRegClass);
1877 if (!RegBlock) {
1878 // We couldn't find a super register for the block. This can happen if
1879 // the register we started with is too high (e.g. v232 if the maximum is
1880 // v255). We therefore try to get the last register block and figure out
1881 // the mask from there.
1882 Register LastBlockStart =
1883 AMDGPU::VGPR0 + alignDown(Reg - AMDGPU::VGPR0, 32);
1884 RegBlock =
1885 TRI->getMatchingSuperReg(LastBlockStart, AMDGPU::sub0, BlockRegClass);
1886 assert(RegBlock && TRI->isSubRegister(RegBlock, Reg) &&
1887 "Couldn't find super register");
1888 int RegDelta = Reg - LastBlockStart;
1889 assert(RegDelta > 0 && llvm::countl_zero(Mask) >= RegDelta &&
1890 "Bad shift amount");
1891 Mask <<= RegDelta;
1892 }
1893
1894 FuncInfo->setMaskForVGPRBlockOps(RegBlock, Mask);
1895
1896 // The stack objects can be a bit smaller than the register block if we know
1897 // some of the high bits of Mask are 0. This may happen often with calling
1898 // conventions where the caller and callee-saved VGPRs are interleaved at
1899 // a small boundary (e.g. 8 or 16).
1900 int UnusedBits = llvm::countl_zero(Mask);
1901 unsigned BlockSize = TRI->getSpillSize(*BlockRegClass) - UnusedBits * 4;
1902 int FrameIdx =
1903 MFI.CreateStackObject(BlockSize, TRI->getSpillAlign(*BlockRegClass),
1904 /*isSpillSlot=*/true);
1905 if ((unsigned)FrameIdx < MinCSFrameIndex)
1906 MinCSFrameIndex = FrameIdx;
1907 if ((unsigned)FrameIdx > MaxCSFrameIndex)
1908 MaxCSFrameIndex = FrameIdx;
1909
1910 CSIt->setFrameIdx(FrameIdx);
1911 CSIt->setReg(RegBlock);
1912 }
1913 CSI.erase(CSEnd, CSI.end());
1914}
1915
1918 std::vector<CalleeSavedInfo> &CSI, unsigned &MinCSFrameIndex,
1919 unsigned &MaxCSFrameIndex) const {
1920 if (CSI.empty())
1921 return true; // Early exit if no callee saved registers are modified!
1922
1923 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1924 bool UseVGPRBlocks = ST.useVGPRBlockOpsForCSR();
1925
1926 if (UseVGPRBlocks)
1927 assignSlotsUsingVGPRBlocks(MF, ST, CSI, MinCSFrameIndex, MaxCSFrameIndex);
1928
1929 return assignCalleeSavedSpillSlots(MF, TRI, CSI) || UseVGPRBlocks;
1930}
1931
1934 std::vector<CalleeSavedInfo> &CSI) const {
1935 if (CSI.empty())
1936 return true; // Early exit if no callee saved registers are modified!
1937
1938 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1939 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1940 const SIRegisterInfo *RI = ST.getRegisterInfo();
1941 Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1942 Register BasePtrReg = RI->getBaseRegister();
1943 Register SGPRForFPSaveRestoreCopy =
1944 FuncInfo->getScratchSGPRCopyDstReg(FramePtrReg);
1945 Register SGPRForBPSaveRestoreCopy =
1946 FuncInfo->getScratchSGPRCopyDstReg(BasePtrReg);
1947 if (!SGPRForFPSaveRestoreCopy && !SGPRForBPSaveRestoreCopy)
1948 return false;
1949
1950 unsigned NumModifiedRegs = 0;
1951
1952 if (SGPRForFPSaveRestoreCopy)
1953 NumModifiedRegs++;
1954 if (SGPRForBPSaveRestoreCopy)
1955 NumModifiedRegs++;
1956
1957 for (auto &CS : CSI) {
1958 if (CS.getReg() == FramePtrReg.asMCReg() && SGPRForFPSaveRestoreCopy) {
1959 CS.setDstReg(SGPRForFPSaveRestoreCopy);
1960 if (--NumModifiedRegs)
1961 break;
1962 } else if (CS.getReg() == BasePtrReg.asMCReg() &&
1963 SGPRForBPSaveRestoreCopy) {
1964 CS.setDstReg(SGPRForBPSaveRestoreCopy);
1965 if (--NumModifiedRegs)
1966 break;
1967 }
1968 }
1969
1970 return false;
1971}
1972
1974 const MachineFunction &MF) const {
1975
1976 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1977 const MachineFrameInfo &MFI = MF.getFrameInfo();
1978 const SIInstrInfo *TII = ST.getInstrInfo();
1979 uint64_t EstStackSize = MFI.estimateStackSize(MF);
1980 uint64_t MaxOffset = EstStackSize - 1;
1981
1982 // We need the emergency stack slots to be allocated in range of the
1983 // MUBUF/flat scratch immediate offset from the base register, so assign these
1984 // first at the incoming SP position.
1985 //
1986 // TODO: We could try sorting the objects to find a hole in the first bytes
1987 // rather than allocating as close to possible. This could save a lot of space
1988 // on frames with alignment requirements.
1989 if (ST.enableFlatScratch()) {
1990 if (TII->isLegalFLATOffset(MaxOffset, AMDGPUAS::PRIVATE_ADDRESS,
1992 return false;
1993 } else {
1994 if (TII->isLegalMUBUFImmOffset(MaxOffset))
1995 return false;
1996 }
1997
1998 return true;
1999}
2000
2004 MachineFunction *MF = MBB.getParent();
2005 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
2006 if (!ST.useVGPRBlockOpsForCSR())
2007 return false;
2008
2009 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
2011 const SIInstrInfo *TII = ST.getInstrInfo();
2013
2014 const TargetRegisterClass *BlockRegClass =
2015 static_cast<const SIRegisterInfo *>(TRI)->getRegClassForBlockOp(*MF);
2016 for (const CalleeSavedInfo &CS : CSI) {
2017 Register Reg = CS.getReg();
2018 if (!BlockRegClass->contains(Reg) ||
2019 !FuncInfo->hasMaskForVGPRBlockOps(Reg)) {
2021 continue;
2022 }
2023
2024 // Build a scratch block store.
2025 uint32_t Mask = FuncInfo->getMaskForVGPRBlockOps(Reg);
2026 int FrameIndex = CS.getFrameIdx();
2027 MachinePointerInfo PtrInfo =
2028 MachinePointerInfo::getFixedStack(*MF, FrameIndex);
2029 MachineMemOperand *MMO =
2031 FrameInfo.getObjectSize(FrameIndex),
2032 FrameInfo.getObjectAlign(FrameIndex));
2033
2034 BuildMI(MBB, MI, MI->getDebugLoc(),
2035 TII->get(AMDGPU::SI_BLOCK_SPILL_V1024_SAVE))
2036 .addReg(Reg, getKillRegState(false))
2037 .addFrameIndex(FrameIndex)
2039 .addImm(0)
2040 .addImm(Mask)
2041 .addMemOperand(MMO);
2042
2043 FuncInfo->setHasSpilledVGPRs();
2044
2045 // Add the register to the liveins. This is necessary because if any of the
2046 // VGPRs in the register block is reserved (e.g. if it's a WWM register),
2047 // then the whole block will be marked as reserved and `updateLiveness` will
2048 // skip it.
2049 MBB.addLiveIn(Reg);
2050 }
2051 MBB.sortUniqueLiveIns();
2052
2053 return true;
2054}
2055
2059 MachineFunction *MF = MBB.getParent();
2060 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
2061 if (!ST.useVGPRBlockOpsForCSR())
2062 return false;
2063
2065 MachineFrameInfo &MFI = MF->getFrameInfo();
2066 const SIInstrInfo *TII = ST.getInstrInfo();
2067 const SIRegisterInfo *SITRI = static_cast<const SIRegisterInfo *>(TRI);
2068 const TargetRegisterClass *BlockRegClass = SITRI->getRegClassForBlockOp(*MF);
2069 for (const CalleeSavedInfo &CS : reverse(CSI)) {
2070 Register Reg = CS.getReg();
2071 if (!BlockRegClass->contains(Reg) ||
2072 !FuncInfo->hasMaskForVGPRBlockOps(Reg)) {
2074 continue;
2075 }
2076
2077 // Build a scratch block load.
2078 uint32_t Mask = FuncInfo->getMaskForVGPRBlockOps(Reg);
2079 int FrameIndex = CS.getFrameIdx();
2080 MachinePointerInfo PtrInfo =
2081 MachinePointerInfo::getFixedStack(*MF, FrameIndex);
2083 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex),
2084 MFI.getObjectAlign(FrameIndex));
2085
2086 auto MIB = BuildMI(MBB, MI, MI->getDebugLoc(),
2087 TII->get(AMDGPU::SI_BLOCK_SPILL_V1024_RESTORE), Reg)
2088 .addFrameIndex(FrameIndex)
2089 .addReg(FuncInfo->getStackPtrOffsetReg())
2090 .addImm(0)
2091 .addImm(Mask)
2092 .addMemOperand(MMO);
2093 SITRI->addImplicitUsesForBlockCSRLoad(MIB, Reg);
2094
2095 // Add the register to the liveins. This is necessary because if any of the
2096 // VGPRs in the register block is reserved (e.g. if it's a WWM register),
2097 // then the whole block will be marked as reserved and `updateLiveness` will
2098 // skip it.
2099 MBB.addLiveIn(Reg);
2100 }
2101
2102 MBB.sortUniqueLiveIns();
2103 return true;
2104}
2105
2107 MachineFunction &MF,
2110 int64_t Amount = I->getOperand(0).getImm();
2111 if (Amount == 0)
2112 return MBB.erase(I);
2113
2114 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
2115 const SIInstrInfo *TII = ST.getInstrInfo();
2116 const DebugLoc &DL = I->getDebugLoc();
2117 unsigned Opc = I->getOpcode();
2118 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
2119 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0;
2120
2121 if (!hasReservedCallFrame(MF)) {
2122 Amount = alignTo(Amount, getStackAlign());
2123 assert(isUInt<32>(Amount) && "exceeded stack address space size");
2126
2127 Amount *= getScratchScaleFactor(ST);
2128 if (IsDestroy)
2129 Amount = -Amount;
2130 auto Add = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), SPReg)
2131 .addReg(SPReg)
2132 .addImm(Amount);
2133 Add->getOperand(3).setIsDead(); // Mark SCC as dead.
2134 } else if (CalleePopAmount != 0) {
2135 llvm_unreachable("is this used?");
2136 }
2137
2138 return MBB.erase(I);
2139}
2140
2141/// Returns true if the frame will require a reference to the stack pointer.
2142///
2143/// This is the set of conditions common to setting up the stack pointer in a
2144/// kernel, and for using a frame pointer in a callable function.
2145///
2146/// FIXME: Should also check hasOpaqueSPAdjustment and if any inline asm
2147/// references SP.
2149 return MFI.hasVarSizedObjects() || MFI.hasStackMap() || MFI.hasPatchPoint();
2150}
2151
2152// The FP for kernels is always known 0, so we never really need to setup an
2153// explicit register for it. However, DisableFramePointerElim will force us to
2154// use a register for it.
2156 const MachineFrameInfo &MFI = MF.getFrameInfo();
2157
2158 // For entry & chain functions we can use an immediate offset in most cases,
2159 // so the presence of calls doesn't imply we need a distinct frame pointer.
2160 if (MFI.hasCalls() &&
2163 // All offsets are unsigned, so need to be addressed in the same direction
2164 // as stack growth.
2165
2166 // FIXME: This function is pretty broken, since it can be called before the
2167 // frame layout is determined or CSR spills are inserted.
2168 return MFI.getStackSize() != 0;
2169 }
2170
2171 return frameTriviallyRequiresSP(MFI) || MFI.isFrameAddressTaken() ||
2172 MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->hasStackRealignment(
2173 MF) ||
2176}
2177
2179 const MachineFunction &MF) const {
2180 return MF.getInfo<SIMachineFunctionInfo>()->isDynamicVGPREnabled() &&
2183}
2184
2185// This is essentially a reduced version of hasFP for entry functions. Since the
2186// stack pointer is known 0 on entry to kernels, we never really need an FP
2187// register. We may need to initialize the stack pointer depending on the frame
2188// properties, which logically overlaps many of the cases where an ordinary
2189// function would require an FP.
2190// Also used for chain functions. While not technically entry functions, chain
2191// functions may need to set up a stack pointer in some situations.
2193 const MachineFunction &MF) const {
2194 // Callable functions always require a stack pointer reference.
2197 "only expected to call this for entry points and chain functions");
2198
2199 const MachineFrameInfo &MFI = MF.getFrameInfo();
2200
2201 // Entry points ordinarily don't need to initialize SP. We have to set it up
2202 // for callees if there are any. Also note tail calls are impossible/don't
2203 // make any sense for kernels.
2204 if (MFI.hasCalls())
2205 return true;
2206
2207 // We still need to initialize the SP if we're doing anything weird that
2208 // references the SP, like variable sized stack objects.
2209 return frameTriviallyRequiresSP(MFI);
2210}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Provides AMDGPU specific target descriptions.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
A set of register units.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static constexpr MCPhysReg FPReg
static constexpr MCPhysReg SPReg
This file declares the machine register scavenger class.
static void buildEpilogRestore(const GCNSubtarget &ST, const SIRegisterInfo &TRI, const SIMachineFunctionInfo &FuncInfo, LiveRegUnits &LiveUnits, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register SpillReg, int FI, Register FrameReg, int64_t DwordOff=0)
static cl::opt< bool > EnableSpillVGPRToAGPR("amdgpu-spill-vgpr-to-agpr", cl::desc("Enable spilling VGPRs to AGPRs"), cl::ReallyHidden, cl::init(true))
static void getVGPRSpillLaneOrTempRegister(MachineFunction &MF, LiveRegUnits &LiveUnits, Register SGPR, const TargetRegisterClass &RC=AMDGPU::SReg_32_XM0_XEXECRegClass, bool IncludeScratchCopy=true)
Query target location for spilling SGPRs IncludeScratchCopy : Also look for free scratch SGPRs.
static void buildGitPtr(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, const SIInstrInfo *TII, Register TargetReg)
static bool allStackObjectsAreDead(const MachineFrameInfo &MFI)
static void buildPrologSpill(const GCNSubtarget &ST, const SIRegisterInfo &TRI, const SIMachineFunctionInfo &FuncInfo, LiveRegUnits &LiveUnits, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register SpillReg, int FI, Register FrameReg, int64_t DwordOff=0)
static Register buildScratchExecCopy(LiveRegUnits &LiveUnits, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool IsProlog, bool EnableInactiveLanes)
static bool frameTriviallyRequiresSP(const MachineFrameInfo &MFI)
Returns true if the frame will require a reference to the stack pointer.
static void assignSlotsUsingVGPRBlocks(MachineFunction &MF, const GCNSubtarget &ST, std::vector< CalleeSavedInfo > &CSI, unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex)
static void initLiveUnits(LiveRegUnits &LiveUnits, const SIRegisterInfo &TRI, const SIMachineFunctionInfo *FuncInfo, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsProlog)
static bool allSGPRSpillsAreDead(const MachineFunction &MF)
static MCRegister findScratchNonCalleeSaveRegister(MachineRegisterInfo &MRI, LiveRegUnits &LiveUnits, const TargetRegisterClass &RC, bool Unused=false)
static MCRegister findUnusedRegister(MachineRegisterInfo &MRI, const LiveRegUnits &LiveUnits, const TargetRegisterClass &RC)
static unsigned getScratchScaleFactor(const GCNSubtarget &ST)
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const int BlockSize
Definition TarWriter.cpp:33
static const LaneMaskConstants & get(const GCNSubtarget &ST)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:191
bool test(unsigned Idx) const
Definition BitVector.h:480
BitVector & reset()
Definition BitVector.h:411
void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords=~0u)
clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
Definition BitVector.h:741
BitVector & set()
Definition BitVector.h:370
bool any() const
any - Returns true if any bit is set.
Definition BitVector.h:189
void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords=~0u)
clearBitsInMask - Clear any bits in this vector that are set in Mask.
Definition BitVector.h:729
iterator_range< const_set_bits_iterator > set_bits() const
Definition BitVector.h:159
bool empty() const
empty - Tests whether there are no bits in this bitvector.
Definition BitVector.h:175
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
MCRegister getReg() const
A debug info location.
Definition DebugLoc.h:124
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
A set of register units used to track register liveness.
bool available(MCRegister Reg) const
Returns true if no part of physical register Reg is live.
void init(const TargetRegisterInfo &TRI)
Initialize and clear the set.
void addReg(MCRegister Reg)
Adds register units covered by physical register Reg.
LLVM_ABI void stepBackward(const MachineInstr &MI)
Updates liveness when stepping backwards over the instruction MI.
LLVM_ABI void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
void removeReg(MCRegister Reg)
Removes all register units covered by physical register Reg.
bool empty() const
Returns true if the set is empty.
LLVM_ABI void addLiveIns(const MachineBasicBlock &MBB)
Adds registers living into block MBB.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
LLVM_ABI int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
bool hasTailCall() const
Returns true if the function contains a tail call.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
int getObjectIndexBegin() const
Return the minimum frame object index.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
mop_range operands()
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
void setIsDead(bool Val=true)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI const MCPhysReg * getCalleeSavedRegs() const
Returns list of callee saved registers.
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
LLVM_ABI bool isPhysRegModified(MCRegister PhysReg, bool SkipNoReturnDef=false) const
Return true if the specified register is modified in this function.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition ArrayRef.h:303
PrologEpilogSGPRSpillBuilder(Register Reg, const PrologEpilogSGPRSaveRestoreInfo SI, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, const SIInstrInfo *TII, const SIRegisterInfo &TRI, LiveRegUnits &LiveUnits, Register FrameReg)
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void backward()
Update internal register state and move MBB iterator backwards.
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Definition Register.h:102
void determinePrologEpilogSGPRSaves(MachineFunction &MF, BitVector &SavedRegs, bool NeedExecCopyReservedReg) const
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS=nullptr) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
bool mayReserveScratchForCWSR(const MachineFunction &MF) const
bool allocateScavengingFrameIndexesNearIncomingSP(const MachineFunction &MF) const override
Control the placement of special register scavenging spill slots when allocating a stack frame.
bool requiresStackPointerReference(const MachineFunction &MF) const
void emitEntryFunctionPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
void emitCSRSpillStores(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL, LiveRegUnits &LiveUnits, Register FrameReg, Register FramePtrRegScratchCopy) const
bool hasFPImpl(const MachineFunction &MF) const override
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override
void determineCalleeSavesSGPR(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
void emitCSRSpillRestores(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL, LiveRegUnits &LiveUnits, Register FrameReg, Register FramePtrRegScratchCopy) const
void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS=nullptr) const override
processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...
bool isSupportedStackID(TargetStackID::Value ID) const override
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
ArrayRef< PrologEpilogSGPRSpill > getPrologEpilogSGPRSpills() const
const WWMSpillsMap & getWWMSpills() const
void getAllScratchSGPRCopyDstRegs(SmallVectorImpl< Register > &Regs) const
ArrayRef< MCPhysReg > getAGPRSpillVGPRs() const
void shiftWwmVGPRsToLowestRange(MachineFunction &MF, SmallVectorImpl< Register > &WWMVGPRs, BitVector &SavedVGPRs)
void setMaskForVGPRBlockOps(Register RegisterBlock, uint32_t Mask)
GCNUserSGPRUsageInfo & getUserSGPRInfo()
void allocateWWMSpill(MachineFunction &MF, Register VGPR, uint64_t Size=4, Align Alignment=Align(4))
void setVGPRToAGPRSpillDead(int FrameIndex)
Register getScratchRSrcReg() const
Returns the physical register reserved for use as the resource descriptor for scratch accesses.
ArrayRef< MCPhysReg > getVGPRSpillAGPRs() const
int getScavengeFI(MachineFrameInfo &MFI, const SIRegisterInfo &TRI)
uint32_t getMaskForVGPRBlockOps(Register RegisterBlock) const
bool hasMaskForVGPRBlockOps(Register RegisterBlock) const
bool hasPrologEpilogSGPRSpillEntry(Register Reg) const
Register getGITPtrLoReg(const MachineFunction &MF) const
void setVGPRForAGPRCopy(Register NewVGPRForAGPRCopy)
bool allocateVGPRSpillToAGPR(MachineFunction &MF, int FI, bool isAGPRtoVGPR)
Reserve AGPRs or VGPRs to support spilling for FrameIndex FI.
void splitWWMSpillRegisters(MachineFunction &MF, SmallVectorImpl< std::pair< Register, int > > &CalleeSavedRegs, SmallVectorImpl< std::pair< Register, int > > &ScratchRegs) const
bool isWWMReservedRegister(Register Reg) const
ArrayRef< SIRegisterInfo::SpilledReg > getSGPRSpillToPhysicalVGPRLanes(int FrameIndex) const
bool allocateSGPRSpillToVGPRLane(MachineFunction &MF, int FI, bool SpillToPhysVGPRLane=false, bool IsPrologEpilog=false)
void setLongBranchReservedReg(Register Reg)
void setHasSpilledVGPRs(bool Spill=true)
bool removeDeadFrameIndices(MachineFrameInfo &MFI, bool ResetSGPRSpillStackIDs)
If ResetSGPRSpillStackIDs is true, reset the stack ID from sgpr-spill to the default stack.
void setScratchReservedForDynamicVGPRs(unsigned SizeInBytes)
MCRegister getPreloadedReg(AMDGPUFunctionArgInfo::PreloadedValue Value) const
bool checkIndexInPrologEpilogSGPRSpills(int FI) const
const ReservedRegSet & getWWMReservedRegs() const
const PrologEpilogSGPRSaveRestoreInfo & getPrologEpilogSGPRSaveRestoreInfo(Register Reg) const
void setIsStackRealigned(bool Realigned=true)
void addToPrologEpilogSGPRSpills(Register Reg, PrologEpilogSGPRSaveRestoreInfo SI)
Register getScratchSGPRCopyDstReg(Register Reg) const
Register getFrameRegister(const MachineFunction &MF) const override
const TargetRegisterClass * getRegClassForBlockOp(const MachineFunction &MF) const
void addImplicitUsesForBlockCSRLoad(MachineInstrBuilder &MIB, Register BlockReg) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:31
int64_t getFixed() const
Returns the fixed component of the stack.
Definition TypeSize.h:47
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
virtual bool hasReservedCallFrame(const MachineFunction &MF) const
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
void restoreCalleeSavedRegister(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const CalleeSavedInfo &CS, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
void spillCalleeSavedRegister(MachineBasicBlock &SaveBlock, MachineBasicBlock::iterator MI, const CalleeSavedInfo &CS, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
spillCalleeSavedRegister - Default implementation for spilling a single callee saved register.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetOptions Options
LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset)
Convert ByteOffset to dwords if the subtarget uses dword SMRD immediate offsets.
LLVM_READNONE constexpr bool isEntryFunctionCC(CallingConv::ID CC)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
LLVM_READNONE constexpr bool isCompute(CallingConv::ID CC)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition MathExtras.h:557
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Definition bit.h:222
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1624
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition MathExtras.h:159
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
auto make_first_range(ContainerTy &&c)
Given a container of pairs, return a range over the first elements.
Definition STLExtras.h:1399
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
Definition STLExtras.h:1900
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:198
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
Definition MathExtras.h:164
@ And
Bitwise or logical AND of integers.
@ Add
Sum of integers.
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:1941
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
static constexpr uint64_t encode(Fields... Values)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
Matching combinators.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.