LLVM 22.0.0git
AMDGPUCallLowering.cpp
Go to the documentation of this file.
1//===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the lowering of LLVM calls to machine code calls for
11/// GlobalISel.
12///
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUCallLowering.h"
16#include "AMDGPU.h"
17#include "AMDGPULegalizerInfo.h"
19#include "SIRegisterInfo.h"
24#include "llvm/IR/IntrinsicsAMDGPU.h"
25
26#define DEBUG_TYPE "amdgpu-call-lowering"
27
28using namespace llvm;
29
30namespace {
31
32/// Wrapper around extendRegister to ensure we extend to a full 32-bit register.
33static Register extendRegisterMin32(CallLowering::ValueHandler &Handler,
34 Register ValVReg, const CCValAssign &VA) {
35 if (VA.getLocVT().getSizeInBits() < 32) {
36 // 16-bit types are reported as legal for 32-bit registers. We need to
37 // extend and do a 32-bit copy to avoid the verifier complaining about it.
38 return Handler.MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0);
39 }
40
41 return Handler.extendRegister(ValVReg, VA);
42}
43
44struct AMDGPUOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
45 AMDGPUOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
47 : OutgoingValueHandler(B, MRI), MIB(MIB) {}
48
50
51 Register getStackAddress(uint64_t Size, int64_t Offset,
53 ISD::ArgFlagsTy Flags) override {
54 llvm_unreachable("not implemented");
55 }
56
57 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
58 const MachinePointerInfo &MPO,
59 const CCValAssign &VA) override {
60 llvm_unreachable("not implemented");
61 }
62
63 void assignValueToReg(Register ValVReg, Register PhysReg,
64 const CCValAssign &VA) override {
65 Register ExtReg = extendRegisterMin32(*this, ValVReg, VA);
66
67 // If this is a scalar return, insert a readfirstlane just in case the value
68 // ends up in a VGPR.
69 // FIXME: Assert this is a shader return.
70 const SIRegisterInfo *TRI
71 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
72 if (TRI->isSGPRReg(MRI, PhysReg)) {
73 LLT Ty = MRI.getType(ExtReg);
74 LLT S32 = LLT::scalar(32);
75 if (Ty != S32) {
76 // FIXME: We should probably support readfirstlane intrinsics with all
77 // legal 32-bit types.
78 assert(Ty.getSizeInBits() == 32);
79 if (Ty.isPointer())
80 ExtReg = MIRBuilder.buildPtrToInt(S32, ExtReg).getReg(0);
81 else
82 ExtReg = MIRBuilder.buildBitcast(S32, ExtReg).getReg(0);
83 }
84
85 auto ToSGPR = MIRBuilder
86 .buildIntrinsic(Intrinsic::amdgcn_readfirstlane,
87 {MRI.getType(ExtReg)})
88 .addReg(ExtReg);
89 ExtReg = ToSGPR.getReg(0);
90 }
91
92 MIRBuilder.buildCopy(PhysReg, ExtReg);
93 MIB.addUse(PhysReg, RegState::Implicit);
94 }
95};
96
97struct AMDGPUIncomingArgHandler : public CallLowering::IncomingValueHandler {
98 uint64_t StackUsed = 0;
99
100 AMDGPUIncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
101 : IncomingValueHandler(B, MRI) {}
102
103 Register getStackAddress(uint64_t Size, int64_t Offset,
105 ISD::ArgFlagsTy Flags) override {
106 auto &MFI = MIRBuilder.getMF().getFrameInfo();
107
108 // Byval is assumed to be writable memory, but other stack passed arguments
109 // are not.
110 const bool IsImmutable = !Flags.isByVal();
111 int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable);
112 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
113 auto AddrReg = MIRBuilder.buildFrameIndex(
115 StackUsed = std::max(StackUsed, Size + Offset);
116 return AddrReg.getReg(0);
117 }
118
119 void assignValueToReg(Register ValVReg, Register PhysReg,
120 const CCValAssign &VA) override {
121 markPhysRegUsed(PhysReg);
122
123 if (VA.getLocVT().getSizeInBits() < 32) {
124 // 16-bit types are reported as legal for 32-bit registers. We need to do
125 // a 32-bit copy, and truncate to avoid the verifier complaining about it.
126 auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg);
127
128 // If we have signext/zeroext, it applies to the whole 32-bit register
129 // before truncation.
130 auto Extended =
131 buildExtensionHint(VA, Copy.getReg(0), LLT(VA.getLocVT()));
132 MIRBuilder.buildTrunc(ValVReg, Extended);
133 return;
134 }
135
136 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
137 }
138
139 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
140 const MachinePointerInfo &MPO,
141 const CCValAssign &VA) override {
142 MachineFunction &MF = MIRBuilder.getMF();
143
144 auto *MMO = MF.getMachineMemOperand(
146 inferAlignFromPtrInfo(MF, MPO));
147 MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
148 }
149
150 /// How the physical register gets marked varies between formal
151 /// parameters (it's a basic-block live-in), and a call instruction
152 /// (it's an implicit-def of the BL).
153 virtual void markPhysRegUsed(unsigned PhysReg) = 0;
154};
155
156struct FormalArgHandler : public AMDGPUIncomingArgHandler {
157 FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
158 : AMDGPUIncomingArgHandler(B, MRI) {}
159
160 void markPhysRegUsed(unsigned PhysReg) override {
161 MIRBuilder.getMBB().addLiveIn(PhysReg);
162 }
163};
164
165struct CallReturnHandler : public AMDGPUIncomingArgHandler {
166 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
168 : AMDGPUIncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {}
169
170 void markPhysRegUsed(unsigned PhysReg) override {
171 MIB.addDef(PhysReg, RegState::Implicit);
172 }
173
175};
176
177struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler {
178 /// For tail calls, the byte offset of the call's argument area from the
179 /// callee's. Unused elsewhere.
180 int FPDiff;
181
182 // Cache the SP register vreg if we need it more than once in this call site.
184
185 bool IsTailCall;
186
187 AMDGPUOutgoingArgHandler(MachineIRBuilder &MIRBuilder,
189 bool IsTailCall = false, int FPDiff = 0)
190 : AMDGPUOutgoingValueHandler(MIRBuilder, MRI, MIB), FPDiff(FPDiff),
191 IsTailCall(IsTailCall) {}
192
193 Register getStackAddress(uint64_t Size, int64_t Offset,
195 ISD::ArgFlagsTy Flags) override {
196 MachineFunction &MF = MIRBuilder.getMF();
197 const LLT PtrTy = LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32);
198 const LLT S32 = LLT::scalar(32);
199
200 if (IsTailCall) {
201 Offset += FPDiff;
202 int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
203 auto FIReg = MIRBuilder.buildFrameIndex(PtrTy, FI);
205 return FIReg.getReg(0);
206 }
207
209
210 if (!SPReg) {
211 const GCNSubtarget &ST = MIRBuilder.getMF().getSubtarget<GCNSubtarget>();
212 if (ST.enableFlatScratch()) {
213 // The stack is accessed unswizzled, so we can use a regular copy.
214 SPReg = MIRBuilder.buildCopy(PtrTy,
215 MFI->getStackPtrOffsetReg()).getReg(0);
216 } else {
217 // The address we produce here, without knowing the use context, is going
218 // to be interpreted as a vector address, so we need to convert to a
219 // swizzled address.
220 SPReg = MIRBuilder.buildInstr(AMDGPU::G_AMDGPU_WAVE_ADDRESS, {PtrTy},
221 {MFI->getStackPtrOffsetReg()}).getReg(0);
222 }
223 }
224
225 auto OffsetReg = MIRBuilder.buildConstant(S32, Offset);
226
227 auto AddrReg = MIRBuilder.buildPtrAdd(PtrTy, SPReg, OffsetReg);
229 return AddrReg.getReg(0);
230 }
231
232 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
233 const MachinePointerInfo &MPO,
234 const CCValAssign &VA) override {
235 MachineFunction &MF = MIRBuilder.getMF();
236 uint64_t LocMemOffset = VA.getLocMemOffset();
237 const auto &ST = MF.getSubtarget<GCNSubtarget>();
238
239 auto *MMO = MF.getMachineMemOperand(
240 MPO, MachineMemOperand::MOStore, MemTy,
241 commonAlignment(ST.getStackAlignment(), LocMemOffset));
242 MIRBuilder.buildStore(ValVReg, Addr, *MMO);
243 }
244
245 void assignValueToAddress(const CallLowering::ArgInfo &Arg,
246 unsigned ValRegIndex, Register Addr, LLT MemTy,
247 const MachinePointerInfo &MPO,
248 const CCValAssign &VA) override {
250 ? extendRegister(Arg.Regs[ValRegIndex], VA)
251 : Arg.Regs[ValRegIndex];
252 assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA);
253 }
254};
255} // anonymous namespace
256
260
261// FIXME: Compatibility shim
263 switch (MIOpc) {
264 case TargetOpcode::G_SEXT:
265 return ISD::SIGN_EXTEND;
266 case TargetOpcode::G_ZEXT:
267 return ISD::ZERO_EXTEND;
268 case TargetOpcode::G_ANYEXT:
269 return ISD::ANY_EXTEND;
270 default:
271 llvm_unreachable("not an extend opcode");
272 }
273}
274
275bool AMDGPUCallLowering::canLowerReturn(MachineFunction &MF,
276 CallingConv::ID CallConv,
278 bool IsVarArg) const {
279 // For shaders. Vector types should be explicitly handled by CC.
280 if (AMDGPU::isEntryFunctionCC(CallConv))
281 return true;
282
284 const SITargetLowering &TLI = *getTLI<SITargetLowering>();
285 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,
286 MF.getFunction().getContext());
287
288 return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv, IsVarArg));
289}
290
291/// Lower the return value for the already existing \p Ret. This assumes that
292/// \p B's insertion point is correct.
293bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B,
294 const Value *Val, ArrayRef<Register> VRegs,
295 MachineInstrBuilder &Ret) const {
296 if (!Val)
297 return true;
298
299 auto &MF = B.getMF();
300 const auto &F = MF.getFunction();
301 const DataLayout &DL = MF.getDataLayout();
302 MachineRegisterInfo *MRI = B.getMRI();
303 LLVMContext &Ctx = F.getContext();
304
305 CallingConv::ID CC = F.getCallingConv();
306 const SITargetLowering &TLI = *getTLI<SITargetLowering>();
307
308 SmallVector<EVT, 8> SplitEVTs;
309 ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
310 assert(VRegs.size() == SplitEVTs.size() &&
311 "For each split Type there should be exactly one VReg.");
312
313 SmallVector<ArgInfo, 8> SplitRetInfos;
314
315 for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
316 EVT VT = SplitEVTs[i];
317 Register Reg = VRegs[i];
318 ArgInfo RetInfo(Reg, VT.getTypeForEVT(Ctx), 0);
319 setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F);
320
321 if (VT.isScalarInteger()) {
322 unsigned ExtendOp = TargetOpcode::G_ANYEXT;
323 if (RetInfo.Flags[0].isSExt()) {
324 assert(RetInfo.Regs.size() == 1 && "expect only simple return values");
325 ExtendOp = TargetOpcode::G_SEXT;
326 } else if (RetInfo.Flags[0].isZExt()) {
327 assert(RetInfo.Regs.size() == 1 && "expect only simple return values");
328 ExtendOp = TargetOpcode::G_ZEXT;
329 }
330
331 EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT,
332 extOpcodeToISDExtOpcode(ExtendOp));
333 if (ExtVT != VT) {
334 RetInfo.Ty = ExtVT.getTypeForEVT(Ctx);
335 LLT ExtTy = getLLTForType(*RetInfo.Ty, DL);
336 Reg = B.buildInstr(ExtendOp, {ExtTy}, {Reg}).getReg(0);
337 }
338 }
339
340 if (Reg != RetInfo.Regs[0]) {
341 RetInfo.Regs[0] = Reg;
342 // Reset the arg flags after modifying Reg.
343 setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F);
344 }
345
346 splitToValueTypes(RetInfo, SplitRetInfos, DL, CC);
347 }
348
349 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg());
350
351 OutgoingValueAssigner Assigner(AssignFn);
352 AMDGPUOutgoingValueHandler RetHandler(B, *MRI, Ret);
353 return determineAndHandleAssignments(RetHandler, Assigner, SplitRetInfos, B,
354 CC, F.isVarArg());
355}
356
358 ArrayRef<Register> VRegs,
359 FunctionLoweringInfo &FLI) const {
360
361 MachineFunction &MF = B.getMF();
363 MFI->setIfReturnsVoid(!Val);
364
365 assert(!Val == VRegs.empty() && "Return value without a vreg");
366
367 CallingConv::ID CC = B.getMF().getFunction().getCallingConv();
368 const bool IsShader = AMDGPU::isShader(CC);
369 const bool IsWaveEnd =
370 (IsShader && MFI->returnsVoid()) || AMDGPU::isKernel(CC);
371 if (IsWaveEnd) {
372 B.buildInstr(AMDGPU::S_ENDPGM)
373 .addImm(0);
374 return true;
375 }
376
377 const bool IsWholeWave = MFI->isWholeWaveFunction();
378 unsigned ReturnOpc = IsWholeWave ? AMDGPU::G_AMDGPU_WHOLE_WAVE_FUNC_RETURN
379 : IsShader ? AMDGPU::SI_RETURN_TO_EPILOG
380 : AMDGPU::SI_RETURN;
381 auto Ret = B.buildInstrNoInsert(ReturnOpc);
382
383 if (!FLI.CanLowerReturn)
384 insertSRetStores(B, Val->getType(), VRegs, FLI.DemoteRegister);
385 else if (!lowerReturnVal(B, Val, VRegs, Ret))
386 return false;
387
388 if (IsWholeWave)
389 addOriginalExecToReturn(B.getMF(), Ret);
390
391 // TODO: Handle CalleeSavedRegsViaCopy.
392
393 B.insertInstr(Ret);
394 return true;
395}
396
397void AMDGPUCallLowering::lowerParameterPtr(Register DstReg, MachineIRBuilder &B,
398 uint64_t Offset) const {
399 MachineFunction &MF = B.getMF();
402 Register KernArgSegmentPtr =
404 Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr);
405
406 auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset);
407
408 B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg);
409}
410
411void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, ArgInfo &OrigArg,
413 Align Alignment) const {
414 MachineFunction &MF = B.getMF();
415 const Function &F = MF.getFunction();
416 const DataLayout &DL = F.getDataLayout();
418
420
421 SmallVector<ArgInfo, 32> SplitArgs;
422 SmallVector<uint64_t> FieldOffsets;
423 splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv(), &FieldOffsets);
424
425 unsigned Idx = 0;
426 for (ArgInfo &SplitArg : SplitArgs) {
427 Register PtrReg = B.getMRI()->createGenericVirtualRegister(PtrTy);
428 lowerParameterPtr(PtrReg, B, Offset + FieldOffsets[Idx]);
429
430 LLT ArgTy = getLLTForType(*SplitArg.Ty, DL);
431 if (SplitArg.Flags[0].isPointer()) {
432 // Compensate for losing pointeriness in splitValueTypes.
433 LLT PtrTy = LLT::pointer(SplitArg.Flags[0].getPointerAddrSpace(),
434 ArgTy.getScalarSizeInBits());
435 ArgTy = ArgTy.isVector() ? LLT::vector(ArgTy.getElementCount(), PtrTy)
436 : PtrTy;
437 }
438
439 MachineMemOperand *MMO = MF.getMachineMemOperand(
440 PtrInfo,
443 ArgTy, commonAlignment(Alignment, FieldOffsets[Idx]));
444
445 assert(SplitArg.Regs.size() == 1);
446
447 B.buildLoad(SplitArg.Regs[0], PtrReg, *MMO);
448 ++Idx;
449 }
450}
451
452// Allocate special inputs passed in user SGPRs.
453static void allocateHSAUserSGPRs(CCState &CCInfo,
455 MachineFunction &MF,
456 const SIRegisterInfo &TRI,
458 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
459 const GCNUserSGPRUsageInfo &UserSGPRInfo = Info.getUserSGPRInfo();
460 if (UserSGPRInfo.hasPrivateSegmentBuffer()) {
461 Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
462 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
463 CCInfo.AllocateReg(PrivateSegmentBufferReg);
464 }
465
466 if (UserSGPRInfo.hasDispatchPtr()) {
467 Register DispatchPtrReg = Info.addDispatchPtr(TRI);
468 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
469 CCInfo.AllocateReg(DispatchPtrReg);
470 }
471
472 if (UserSGPRInfo.hasQueuePtr()) {
473 Register QueuePtrReg = Info.addQueuePtr(TRI);
474 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
475 CCInfo.AllocateReg(QueuePtrReg);
476 }
477
478 if (UserSGPRInfo.hasKernargSegmentPtr()) {
480 Register InputPtrReg = Info.addKernargSegmentPtr(TRI);
482 Register VReg = MRI.createGenericVirtualRegister(P4);
483 MRI.addLiveIn(InputPtrReg, VReg);
484 B.getMBB().addLiveIn(InputPtrReg);
485 B.buildCopy(VReg, InputPtrReg);
486 CCInfo.AllocateReg(InputPtrReg);
487 }
488
489 if (UserSGPRInfo.hasDispatchID()) {
490 Register DispatchIDReg = Info.addDispatchID(TRI);
491 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
492 CCInfo.AllocateReg(DispatchIDReg);
493 }
494
495 if (UserSGPRInfo.hasFlatScratchInit()) {
496 Register FlatScratchInitReg = Info.addFlatScratchInit(TRI);
497 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
498 CCInfo.AllocateReg(FlatScratchInitReg);
499 }
500
501 if (UserSGPRInfo.hasPrivateSegmentSize()) {
502 Register PrivateSegmentSizeReg = Info.addPrivateSegmentSize(TRI);
503 MF.addLiveIn(PrivateSegmentSizeReg, &AMDGPU::SGPR_32RegClass);
504 CCInfo.AllocateReg(PrivateSegmentSizeReg);
505 }
506
507 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
508 // these from the dispatch pointer.
509}
510
512 MachineIRBuilder &B, const Function &F,
513 ArrayRef<ArrayRef<Register>> VRegs) const {
514 MachineFunction &MF = B.getMF();
515 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>();
518 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
520 const DataLayout &DL = F.getDataLayout();
521
523 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
524
525 allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info);
526
527 unsigned i = 0;
528 const Align KernArgBaseAlign(16);
529 const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset();
530 uint64_t ExplicitArgOffset = 0;
531
532 // TODO: Align down to dword alignment and extract bits for extending loads.
533 for (auto &Arg : F.args()) {
534 // TODO: Add support for kernarg preload.
535 if (Arg.hasAttribute("amdgpu-hidden-argument")) {
536 LLVM_DEBUG(dbgs() << "Preloading hidden arguments is not supported\n");
537 return false;
538 }
539
540 const bool IsByRef = Arg.hasByRefAttr();
541 Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
542 unsigned AllocSize = DL.getTypeAllocSize(ArgTy);
543 if (AllocSize == 0)
544 continue;
545
546 MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt;
547 Align ABIAlign = DL.getValueOrABITypeAlignment(ParamAlign, ArgTy);
548
549 uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset;
550 ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize;
551
552 if (Arg.use_empty()) {
553 ++i;
554 continue;
555 }
556
557 Align Alignment = commonAlignment(KernArgBaseAlign, ArgOffset);
558
559 if (IsByRef) {
560 unsigned ByRefAS = cast<PointerType>(Arg.getType())->getAddressSpace();
561
562 assert(VRegs[i].size() == 1 &&
563 "expected only one register for byval pointers");
564 if (ByRefAS == AMDGPUAS::CONSTANT_ADDRESS) {
565 lowerParameterPtr(VRegs[i][0], B, ArgOffset);
566 } else {
567 const LLT ConstPtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
568 Register PtrReg = MRI.createGenericVirtualRegister(ConstPtrTy);
569 lowerParameterPtr(PtrReg, B, ArgOffset);
570
571 B.buildAddrSpaceCast(VRegs[i][0], PtrReg);
572 }
573 } else {
574 ArgInfo OrigArg(VRegs[i], Arg, i);
575 const unsigned OrigArgIdx = i + AttributeList::FirstArgIndex;
576 setArgFlags(OrigArg, OrigArgIdx, DL, F);
577 lowerParameter(B, OrigArg, ArgOffset, Alignment);
578 }
579
580 ++i;
581 }
582
583 if (Info->getNumKernargPreloadedSGPRs())
584 Info->setNumWaveDispatchSGPRs(Info->getNumUserSGPRs());
585
586 TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
587 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false);
588 return true;
589}
590
593 FunctionLoweringInfo &FLI) const {
594 CallingConv::ID CC = F.getCallingConv();
595
596 // The infrastructure for normal calling convention lowering is essentially
597 // useless for kernels. We want to avoid any kind of legalization or argument
598 // splitting.
600 return lowerFormalArgumentsKernel(B, F, VRegs);
601
602 const bool IsGraphics = AMDGPU::isGraphics(CC);
603 const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC);
604
605 MachineFunction &MF = B.getMF();
606 MachineBasicBlock &MBB = B.getMBB();
609 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
610 const SIRegisterInfo *TRI = Subtarget.getRegisterInfo();
611 const DataLayout &DL = F.getDataLayout();
612
614 CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext());
615 const GCNUserSGPRUsageInfo &UserSGPRInfo = Info->getUserSGPRInfo();
616
617 if (UserSGPRInfo.hasImplicitBufferPtr()) {
618 Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI);
619 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
620 CCInfo.AllocateReg(ImplicitBufferPtrReg);
621 }
622
623 // FIXME: This probably isn't defined for mesa
624 if (UserSGPRInfo.hasFlatScratchInit() && !Subtarget.isAmdPalOS()) {
625 Register FlatScratchInitReg = Info->addFlatScratchInit(*TRI);
626 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
627 CCInfo.AllocateReg(FlatScratchInitReg);
628 }
629
630 SmallVector<ArgInfo, 32> SplitArgs;
631 unsigned Idx = 0;
632 unsigned PSInputNum = 0;
633
634 // Insert the hidden sret parameter if the return value won't fit in the
635 // return registers.
636 if (!FLI.CanLowerReturn)
638
639 for (auto &Arg : F.args()) {
640 if (DL.getTypeStoreSize(Arg.getType()) == 0)
641 continue;
642
643 if (Info->isWholeWaveFunction() && Idx == 0) {
644 assert(VRegs[Idx].size() == 1 && "Expected only one register");
645
646 // The first argument for whole wave functions is the original EXEC value.
647 B.buildInstr(AMDGPU::G_AMDGPU_WHOLE_WAVE_FUNC_SETUP)
648 .addDef(VRegs[Idx][0]);
649
650 ++Idx;
651 continue;
652 }
653
654 const bool InReg = Arg.hasAttribute(Attribute::InReg);
655
656 if (Arg.hasAttribute(Attribute::SwiftSelf) ||
657 Arg.hasAttribute(Attribute::SwiftError) ||
658 Arg.hasAttribute(Attribute::Nest))
659 return false;
660
661 if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) {
662 const bool ArgUsed = !Arg.use_empty();
663 bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum);
664
665 if (!SkipArg) {
666 Info->markPSInputAllocated(PSInputNum);
667 if (ArgUsed)
668 Info->markPSInputEnabled(PSInputNum);
669 }
670
671 ++PSInputNum;
672
673 if (SkipArg) {
674 for (Register R : VRegs[Idx])
675 B.buildUndef(R);
676
677 ++Idx;
678 continue;
679 }
680 }
681
682 ArgInfo OrigArg(VRegs[Idx], Arg, Idx);
683 const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex;
684 setArgFlags(OrigArg, OrigArgIdx, DL, F);
685
686 splitToValueTypes(OrigArg, SplitArgs, DL, CC);
687 ++Idx;
688 }
689
690 // At least one interpolation mode must be enabled or else the GPU will
691 // hang.
692 //
693 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
694 // set PSInputAddr, the user wants to enable some bits after the compilation
695 // based on run-time states. Since we can't know what the final PSInputEna
696 // will look like, so we shouldn't do anything here and the user should take
697 // responsibility for the correct programming.
698 //
699 // Otherwise, the following restrictions apply:
700 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
701 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
702 // enabled too.
703 if (CC == CallingConv::AMDGPU_PS) {
704 if ((Info->getPSInputAddr() & 0x7F) == 0 ||
705 ((Info->getPSInputAddr() & 0xF) == 0 &&
706 Info->isPSInputAllocated(11))) {
707 CCInfo.AllocateReg(AMDGPU::VGPR0);
708 CCInfo.AllocateReg(AMDGPU::VGPR1);
709 Info->markPSInputAllocated(0);
710 Info->markPSInputEnabled(0);
711 }
712
713 if (Subtarget.isAmdPalOS()) {
714 // For isAmdPalOS, the user does not enable some bits after compilation
715 // based on run-time states; the register values being generated here are
716 // the final ones set in hardware. Therefore we need to apply the
717 // workaround to PSInputAddr and PSInputEnable together. (The case where
718 // a bit is set in PSInputAddr but not PSInputEnable is where the frontend
719 // set up an input arg for a particular interpolation mode, but nothing
720 // uses that input arg. Really we should have an earlier pass that removes
721 // such an arg.)
722 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
723 if ((PsInputBits & 0x7F) == 0 ||
724 ((PsInputBits & 0xF) == 0 &&
725 (PsInputBits >> 11 & 1)))
726 Info->markPSInputEnabled(llvm::countr_zero(Info->getPSInputAddr()));
727 }
728 }
729
731 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg());
732
733 if (!MBB.empty())
734 B.setInstr(*MBB.begin());
735
736 if (!IsEntryFunc && !IsGraphics) {
737 // For the fixed ABI, pass workitem IDs in the last argument register.
738 TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info);
739
740 if (!Subtarget.enableFlatScratch())
741 CCInfo.AllocateReg(Info->getScratchRSrcReg());
742 TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
743 }
744
745 IncomingValueAssigner Assigner(AssignFn);
746 if (!determineAssignments(Assigner, SplitArgs, CCInfo))
747 return false;
748
749 if (IsEntryFunc) {
750 // This assumes the registers are allocated by CCInfo in ascending order
751 // with no gaps.
752 Info->setNumWaveDispatchSGPRs(
753 CCInfo.getFirstUnallocated(AMDGPU::SGPR_32RegClass.getRegisters()));
754 Info->setNumWaveDispatchVGPRs(
755 CCInfo.getFirstUnallocated(AMDGPU::VGPR_32RegClass.getRegisters()));
756 }
757
758 FormalArgHandler Handler(B, MRI);
759 if (!handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, B))
760 return false;
761
762 uint64_t StackSize = Assigner.StackSize;
763
764 // Start adding system SGPRs.
765 if (IsEntryFunc)
766 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsGraphics);
767
768 // When we tail call, we need to check if the callee's arguments will fit on
769 // the caller's stack. So, whenever we lower formal arguments, we should keep
770 // track of this information, since we might lower a tail call in this
771 // function later.
772 Info->setBytesInStackArgArea(StackSize);
773
774 // Move back to the end of the basic block.
775 B.setMBB(MBB);
776
777 return true;
778}
779
781 CCState &CCInfo,
782 SmallVectorImpl<std::pair<MCRegister, Register>> &ArgRegs,
783 CallLoweringInfo &Info) const {
784 MachineFunction &MF = MIRBuilder.getMF();
785
786 // If there's no call site, this doesn't correspond to a call from the IR and
787 // doesn't need implicit inputs.
788 if (!Info.CB)
789 return true;
790
791 const AMDGPUFunctionArgInfo *CalleeArgInfo
793
795 const AMDGPUFunctionArgInfo &CallerArgInfo = MFI->getArgInfo();
796
797
798 // TODO: Unify with private memory register handling. This is complicated by
799 // the fact that at least in kernels, the input argument is not necessarily
800 // in the same location as the input.
810 };
811
812 static constexpr StringLiteral ImplicitAttrNames[][2] = {
813 {"amdgpu-no-dispatch-ptr", ""},
814 {"amdgpu-no-queue-ptr", ""},
815 {"amdgpu-no-implicitarg-ptr", ""},
816 {"amdgpu-no-dispatch-id", ""},
817 {"amdgpu-no-workgroup-id-x", "amdgpu-no-cluster-id-x"},
818 {"amdgpu-no-workgroup-id-y", "amdgpu-no-cluster-id-y"},
819 {"amdgpu-no-workgroup-id-z", "amdgpu-no-cluster-id-z"},
820 {"amdgpu-no-lds-kernel-id", ""},
821 };
822
824
825 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
826 const AMDGPULegalizerInfo *LI
827 = static_cast<const AMDGPULegalizerInfo*>(ST.getLegalizerInfo());
828
829 unsigned I = 0;
830 for (auto InputID : InputRegs) {
831 const ArgDescriptor *OutgoingArg;
832 const TargetRegisterClass *ArgRC;
833 LLT ArgTy;
834
835 // If the callee does not use the attribute value, skip copying the value.
836 if (all_of(ImplicitAttrNames[I++], [&](StringRef AttrName) {
837 return AttrName.empty() || Info.CB->hasFnAttr(AttrName);
838 }))
839 continue;
840
841 std::tie(OutgoingArg, ArgRC, ArgTy) =
842 CalleeArgInfo->getPreloadedValue(InputID);
843 if (!OutgoingArg)
844 continue;
845
846 const ArgDescriptor *IncomingArg;
847 const TargetRegisterClass *IncomingArgRC;
848 std::tie(IncomingArg, IncomingArgRC, ArgTy) =
849 CallerArgInfo.getPreloadedValue(InputID);
850 assert(IncomingArgRC == ArgRC);
851
852 Register InputReg = MRI.createGenericVirtualRegister(ArgTy);
853
854 if (IncomingArg) {
855 LI->buildLoadInputValue(InputReg, MIRBuilder, IncomingArg, ArgRC, ArgTy);
856 } else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) {
857 LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder);
858 } else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) {
859 std::optional<uint32_t> Id =
861 if (Id) {
862 MIRBuilder.buildConstant(InputReg, *Id);
863 } else {
864 MIRBuilder.buildUndef(InputReg);
865 }
866 } else {
867 // We may have proven the input wasn't needed, although the ABI is
868 // requiring it. We just need to allocate the register appropriately.
869 MIRBuilder.buildUndef(InputReg);
870 }
871
872 if (OutgoingArg->isRegister()) {
873 ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg);
874 if (!CCInfo.AllocateReg(OutgoingArg->getRegister()))
875 report_fatal_error("failed to allocate implicit input argument");
876 } else {
877 LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n");
878 return false;
879 }
880 }
881
882 // Pack workitem IDs into a single register or pass it as is if already
883 // packed.
884 const ArgDescriptor *OutgoingArg;
885 const TargetRegisterClass *ArgRC;
886 LLT ArgTy;
887
888 std::tie(OutgoingArg, ArgRC, ArgTy) =
890 if (!OutgoingArg)
891 std::tie(OutgoingArg, ArgRC, ArgTy) =
893 if (!OutgoingArg)
894 std::tie(OutgoingArg, ArgRC, ArgTy) =
896 if (!OutgoingArg)
897 return false;
898
899 auto WorkitemIDX =
900 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
901 auto WorkitemIDY =
902 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
903 auto WorkitemIDZ =
904 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
905
906 const ArgDescriptor *IncomingArgX = std::get<0>(WorkitemIDX);
907 const ArgDescriptor *IncomingArgY = std::get<0>(WorkitemIDY);
908 const ArgDescriptor *IncomingArgZ = std::get<0>(WorkitemIDZ);
909 const LLT S32 = LLT::scalar(32);
910
911 const bool NeedWorkItemIDX = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-x");
912 const bool NeedWorkItemIDY = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-y");
913 const bool NeedWorkItemIDZ = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-z");
914
915 // If incoming ids are not packed we need to pack them.
916 // FIXME: Should consider known workgroup size to eliminate known 0 cases.
917 Register InputReg;
918 if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX &&
919 NeedWorkItemIDX) {
920 if (ST.getMaxWorkitemID(MF.getFunction(), 0) != 0) {
921 InputReg = MRI.createGenericVirtualRegister(S32);
922 LI->buildLoadInputValue(InputReg, MIRBuilder, IncomingArgX,
923 std::get<1>(WorkitemIDX),
924 std::get<2>(WorkitemIDX));
925 } else {
926 InputReg = MIRBuilder.buildConstant(S32, 0).getReg(0);
927 }
928 }
929
930 if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY &&
931 NeedWorkItemIDY && ST.getMaxWorkitemID(MF.getFunction(), 1) != 0) {
932 Register Y = MRI.createGenericVirtualRegister(S32);
933 LI->buildLoadInputValue(Y, MIRBuilder, IncomingArgY,
934 std::get<1>(WorkitemIDY), std::get<2>(WorkitemIDY));
935
936 Y = MIRBuilder.buildShl(S32, Y, MIRBuilder.buildConstant(S32, 10)).getReg(0);
937 InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Y).getReg(0) : Y;
938 }
939
940 if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ &&
941 NeedWorkItemIDZ && ST.getMaxWorkitemID(MF.getFunction(), 2) != 0) {
942 Register Z = MRI.createGenericVirtualRegister(S32);
943 LI->buildLoadInputValue(Z, MIRBuilder, IncomingArgZ,
944 std::get<1>(WorkitemIDZ), std::get<2>(WorkitemIDZ));
945
946 Z = MIRBuilder.buildShl(S32, Z, MIRBuilder.buildConstant(S32, 20)).getReg(0);
947 InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Z).getReg(0) : Z;
948 }
949
950 if (!InputReg &&
951 (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) {
952 InputReg = MRI.createGenericVirtualRegister(S32);
953 if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) {
954 // We're in a situation where the outgoing function requires the workitem
955 // ID, but the calling function does not have it (e.g a graphics function
956 // calling a C calling convention function). This is illegal, but we need
957 // to produce something.
958 MIRBuilder.buildUndef(InputReg);
959 } else {
960 // Workitem ids are already packed, any of present incoming arguments will
961 // carry all required fields.
963 IncomingArgX ? *IncomingArgX :
964 IncomingArgY ? *IncomingArgY : *IncomingArgZ, ~0u);
965 LI->buildLoadInputValue(InputReg, MIRBuilder, &IncomingArg,
966 &AMDGPU::VGPR_32RegClass, S32);
967 }
968 }
969
970 if (OutgoingArg->isRegister()) {
971 if (InputReg)
972 ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg);
973
974 if (!CCInfo.AllocateReg(OutgoingArg->getRegister()))
975 report_fatal_error("failed to allocate implicit input argument");
976 } else {
977 LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n");
978 return false;
979 }
980
981 return true;
982}
983
984/// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for
985/// CC.
986static std::pair<CCAssignFn *, CCAssignFn *>
988 return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)};
989}
990
991static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
992 bool IsTailCall, bool IsWave32,
994 bool IsDynamicVGPRChainCall = false) {
995 // For calls to amdgpu_cs_chain functions, the address is known to be uniform.
996 assert((AMDGPU::isChainCC(CC) || !IsIndirect || !IsTailCall) &&
997 "Indirect calls can't be tail calls, "
998 "because the address can be divergent");
999 if (!IsTailCall)
1000 return AMDGPU::G_SI_CALL;
1001
1002 if (AMDGPU::isChainCC(CC)) {
1003 if (IsDynamicVGPRChainCall)
1004 return IsWave32 ? AMDGPU::SI_CS_CHAIN_TC_W32_DVGPR
1005 : AMDGPU::SI_CS_CHAIN_TC_W64_DVGPR;
1006 return IsWave32 ? AMDGPU::SI_CS_CHAIN_TC_W32 : AMDGPU::SI_CS_CHAIN_TC_W64;
1007 }
1008
1009 if (CallerF.getFunction().getCallingConv() ==
1011 return AMDGPU::SI_TCRETURN_GFX_WholeWave;
1012
1014 return AMDGPU::SI_TCRETURN_GFX;
1015
1016 return AMDGPU::SI_TCRETURN;
1017}
1018
1019// Add operands to call instruction to track the callee.
1021 MachineIRBuilder &MIRBuilder,
1023 bool IsDynamicVGPRChainCall = false) {
1024 if (Info.Callee.isReg()) {
1025 CallInst.addReg(Info.Callee.getReg());
1026 CallInst.addImm(0);
1027 } else if (Info.Callee.isGlobal() && Info.Callee.getOffset() == 0) {
1028 // The call lowering lightly assumed we can directly encode a call target in
1029 // the instruction, which is not the case. Materialize the address here.
1030 const GlobalValue *GV = Info.Callee.getGlobal();
1031 auto Ptr = MIRBuilder.buildGlobalValue(
1032 LLT::pointer(GV->getAddressSpace(), 64), GV);
1033 CallInst.addReg(Ptr.getReg(0));
1034
1035 if (IsDynamicVGPRChainCall) {
1036 // DynamicVGPR chain calls are always indirect.
1037 CallInst.addImm(0);
1038 } else
1039 CallInst.add(Info.Callee);
1040 } else
1041 return false;
1042
1043 return true;
1044}
1045
1048 SmallVectorImpl<ArgInfo> &InArgs) const {
1049 const Function &CallerF = MF.getFunction();
1050 CallingConv::ID CalleeCC = Info.CallConv;
1051 CallingConv::ID CallerCC = CallerF.getCallingConv();
1052
1053 // If the calling conventions match, then everything must be the same.
1054 if (CalleeCC == CallerCC)
1055 return true;
1056
1057 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1058
1059 // Make sure that the caller and callee preserve all of the same registers.
1060 const auto *TRI = ST.getRegisterInfo();
1061
1062 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
1063 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
1064 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
1065 return false;
1066
1067 // Check if the caller and callee will handle arguments in the same way.
1069 CCAssignFn *CalleeAssignFnFixed;
1070 CCAssignFn *CalleeAssignFnVarArg;
1071 std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
1072 getAssignFnsForCC(CalleeCC, TLI);
1073
1074 CCAssignFn *CallerAssignFnFixed;
1075 CCAssignFn *CallerAssignFnVarArg;
1076 std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
1077 getAssignFnsForCC(CallerCC, TLI);
1078
1079 // FIXME: We are not accounting for potential differences in implicitly passed
1080 // inputs, but only the fixed ABI is supported now anyway.
1081 IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed,
1082 CalleeAssignFnVarArg);
1083 IncomingValueAssigner CallerAssigner(CallerAssignFnFixed,
1084 CallerAssignFnVarArg);
1085 return resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner);
1086}
1087
1090 SmallVectorImpl<ArgInfo> &OutArgs) const {
1091 // If there are no outgoing arguments, then we are done.
1092 if (OutArgs.empty())
1093 return true;
1094
1095 const Function &CallerF = MF.getFunction();
1096 CallingConv::ID CalleeCC = Info.CallConv;
1097 CallingConv::ID CallerCC = CallerF.getCallingConv();
1099
1100 CCAssignFn *AssignFnFixed;
1101 CCAssignFn *AssignFnVarArg;
1102 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
1103
1104 // We have outgoing arguments. Make sure that we can tail call with them.
1106 CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext());
1107 OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg);
1108
1109 if (!determineAssignments(Assigner, OutArgs, OutInfo)) {
1110 LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n");
1111 return false;
1112 }
1113
1114 // Make sure that they can fit on the caller's stack.
1115 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1116 if (OutInfo.getStackSize() > FuncInfo->getBytesInStackArgArea()) {
1117 LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n");
1118 return false;
1119 }
1120
1121 // Verify that the parameters in callee-saved registers match.
1122 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1123 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1124 const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC);
1126 return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs);
1127}
1128
1131 SmallVectorImpl<ArgInfo> &InArgs, SmallVectorImpl<ArgInfo> &OutArgs) const {
1132 // Must pass all target-independent checks in order to tail call optimize.
1133 if (!Info.IsTailCall)
1134 return false;
1135
1136 // Indirect calls can't be tail calls, because the address can be divergent.
1137 // TODO Check divergence info if the call really is divergent.
1138 if (Info.Callee.isReg())
1139 return false;
1140
1141 MachineFunction &MF = B.getMF();
1142 const Function &CallerF = MF.getFunction();
1143 CallingConv::ID CalleeCC = Info.CallConv;
1144 CallingConv::ID CallerCC = CallerF.getCallingConv();
1145
1146 const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
1147 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
1148 // Kernels aren't callable, and don't have a live in return address so it
1149 // doesn't make sense to do a tail call with entry functions.
1150 if (!CallerPreserved)
1151 return false;
1152
1153 if (!AMDGPU::mayTailCallThisCC(CalleeCC)) {
1154 LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n");
1155 return false;
1156 }
1157
1158 if (any_of(CallerF.args(), [](const Argument &A) {
1159 return A.hasByValAttr() || A.hasSwiftErrorAttr();
1160 })) {
1161 LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval "
1162 "or swifterror arguments\n");
1163 return false;
1164 }
1165
1166 // If we have -tailcallopt, then we're done.
1168 return AMDGPU::canGuaranteeTCO(CalleeCC) &&
1169 CalleeCC == CallerF.getCallingConv();
1170 }
1171
1172 // Verify that the incoming and outgoing arguments from the callee are
1173 // safe to tail call.
1174 if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {
1175 LLVM_DEBUG(
1176 dbgs()
1177 << "... Caller and callee have incompatible calling conventions.\n");
1178 return false;
1179 }
1180
1181 // FIXME: We need to check if any arguments passed in SGPR are uniform. If
1182 // they are not, this cannot be a tail call. If they are uniform, but may be
1183 // VGPR, we need to insert readfirstlanes.
1184 if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))
1185 return false;
1186
1187 LLVM_DEBUG(dbgs() << "... Call is eligible for tail call optimization.\n");
1188 return true;
1189}
1190
1191// Insert outgoing implicit arguments for a call, by inserting copies to the
1192// implicit argument registers and adding the necessary implicit uses to the
1193// call instruction.
1196 const GCNSubtarget &ST, const SIMachineFunctionInfo &FuncInfo,
1197 CallingConv::ID CalleeCC,
1198 ArrayRef<std::pair<MCRegister, Register>> ImplicitArgRegs) const {
1199 if (!ST.enableFlatScratch()) {
1200 // Insert copies for the SRD. In the HSA case, this should be an identity
1201 // copy.
1202 auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::fixed_vector(4, 32),
1203 FuncInfo.getScratchRSrcReg());
1204
1205 auto CalleeRSrcReg = AMDGPU::isChainCC(CalleeCC)
1206 ? AMDGPU::SGPR48_SGPR49_SGPR50_SGPR51
1207 : AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3;
1208
1209 MIRBuilder.buildCopy(CalleeRSrcReg, ScratchRSrcReg);
1210 CallInst.addReg(CalleeRSrcReg, RegState::Implicit);
1211 }
1212
1213 for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) {
1214 MIRBuilder.buildCopy((Register)ArgReg.first, ArgReg.second);
1215 CallInst.addReg(ArgReg.first, RegState::Implicit);
1216 }
1217}
1218
1219namespace {
1220// Chain calls have special arguments that we need to handle. These have the
1221// same index as they do in the llvm.amdgcn.cs.chain intrinsic.
1222enum ChainCallArgIdx {
1223 Exec = 1,
1224 Flags = 4,
1225 NumVGPRs = 5,
1226 FallbackExec = 6,
1227 FallbackCallee = 7,
1228};
1229} // anonymous namespace
1230
1232 MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
1233 SmallVectorImpl<ArgInfo> &OutArgs) const {
1234 MachineFunction &MF = MIRBuilder.getMF();
1235 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1237 const Function &F = MF.getFunction();
1239 const SIInstrInfo *TII = ST.getInstrInfo();
1240 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1242
1243 // True when we're tail calling, but without -tailcallopt.
1244 bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt;
1245
1246 // Find out which ABI gets to decide where things go.
1247 CallingConv::ID CalleeCC = Info.CallConv;
1248 CCAssignFn *AssignFnFixed;
1249 CCAssignFn *AssignFnVarArg;
1250 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
1251
1252 MachineInstrBuilder CallSeqStart;
1253 if (!IsSibCall)
1254 CallSeqStart = MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP);
1255
1256 bool IsChainCall = AMDGPU::isChainCC(Info.CallConv);
1257 bool IsDynamicVGPRChainCall = false;
1258
1259 if (IsChainCall) {
1260 ArgInfo FlagsArg = Info.OrigArgs[ChainCallArgIdx::Flags];
1261 const APInt &FlagsValue = cast<ConstantInt>(FlagsArg.OrigValue)->getValue();
1262 if (FlagsValue.isZero()) {
1263 if (Info.OrigArgs.size() != 5) {
1264 LLVM_DEBUG(dbgs() << "No additional args allowed if flags == 0\n");
1265 return false;
1266 }
1267 } else if (FlagsValue.isOneBitSet(0)) {
1268 IsDynamicVGPRChainCall = true;
1269
1270 if (Info.OrigArgs.size() != 8) {
1271 LLVM_DEBUG(dbgs() << "Expected 3 additional args\n");
1272 return false;
1273 }
1274
1275 // On GFX12, we can only change the VGPR allocation for wave32.
1276 if (!ST.isWave32()) {
1277 F.getContext().diagnose(DiagnosticInfoUnsupported(
1278 F, "dynamic VGPR mode is only supported for wave32"));
1279 return false;
1280 }
1281
1282 ArgInfo FallbackExecArg = Info.OrigArgs[ChainCallArgIdx::FallbackExec];
1283 assert(FallbackExecArg.Regs.size() == 1 &&
1284 "Expected single register for fallback EXEC");
1285 if (!FallbackExecArg.Ty->isIntegerTy(ST.getWavefrontSize())) {
1286 LLVM_DEBUG(dbgs() << "Bad type for fallback EXEC\n");
1287 return false;
1288 }
1289 }
1290 }
1291
1292 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), /*IsTailCall*/ true,
1293 ST.isWave32(), CalleeCC, IsDynamicVGPRChainCall);
1294 auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
1295
1296 if (FuncInfo->isWholeWaveFunction())
1297 addOriginalExecToReturn(MF, MIB);
1298
1299 // Keep track of the index of the next operand to be added to the call
1300 unsigned CalleeIdx = MIB->getNumOperands();
1301
1302 if (!addCallTargetOperands(MIB, MIRBuilder, Info, IsDynamicVGPRChainCall))
1303 return false;
1304
1305 // Byte offset for the tail call. When we are sibcalling, this will always
1306 // be 0.
1307 MIB.addImm(0);
1308
1309 // If this is a chain call, we need to pass in the EXEC mask as well as any
1310 // other special args.
1311 if (IsChainCall) {
1312 auto AddRegOrImm = [&](const ArgInfo &Arg) {
1313 if (auto CI = dyn_cast<ConstantInt>(Arg.OrigValue)) {
1314 MIB.addImm(CI->getSExtValue());
1315 } else {
1316 MIB.addReg(Arg.Regs[0]);
1317 unsigned Idx = MIB->getNumOperands() - 1;
1318 MIB->getOperand(Idx).setReg(constrainOperandRegClass(
1319 MF, *TRI, MRI, *TII, *ST.getRegBankInfo(), *MIB, MIB->getDesc(),
1320 MIB->getOperand(Idx), Idx));
1321 }
1322 };
1323
1324 ArgInfo ExecArg = Info.OrigArgs[ChainCallArgIdx::Exec];
1325 assert(ExecArg.Regs.size() == 1 && "Too many regs for EXEC");
1326
1327 if (!ExecArg.Ty->isIntegerTy(ST.getWavefrontSize())) {
1328 LLVM_DEBUG(dbgs() << "Bad type for EXEC");
1329 return false;
1330 }
1331
1332 AddRegOrImm(ExecArg);
1333 if (IsDynamicVGPRChainCall)
1334 std::for_each(Info.OrigArgs.begin() + ChainCallArgIdx::NumVGPRs,
1335 Info.OrigArgs.end(), AddRegOrImm);
1336 }
1337
1338 // Tell the call which registers are clobbered.
1339 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);
1340 MIB.addRegMask(Mask);
1341
1342 // FPDiff is the byte offset of the call's argument area from the callee's.
1343 // Stores to callee stack arguments will be placed in FixedStackSlots offset
1344 // by this amount for a tail call. In a sibling call it must be 0 because the
1345 // caller will deallocate the entire stack and the callee still expects its
1346 // arguments to begin at SP+0.
1347 int FPDiff = 0;
1348
1349 // This will be 0 for sibcalls, potentially nonzero for tail calls produced
1350 // by -tailcallopt. For sibcalls, the memory operands for the call are
1351 // already available in the caller's incoming argument space.
1352 unsigned NumBytes = 0;
1353 if (!IsSibCall) {
1354 // We aren't sibcalling, so we need to compute FPDiff. We need to do this
1355 // before handling assignments, because FPDiff must be known for memory
1356 // arguments.
1357 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
1359 CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext());
1360
1361 // FIXME: Not accounting for callee implicit inputs
1362 OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg);
1363 if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo))
1364 return false;
1365
1366 // The callee will pop the argument stack as a tail call. Thus, we must
1367 // keep it 16-byte aligned.
1368 NumBytes = alignTo(OutInfo.getStackSize(), ST.getStackAlignment());
1369
1370 // FPDiff will be negative if this tail call requires more space than we
1371 // would automatically have in our incoming argument space. Positive if we
1372 // actually shrink the stack.
1373 FPDiff = NumReusableBytes - NumBytes;
1374
1375 // The stack pointer must be 16-byte aligned at all times it's used for a
1376 // memory operation, which in practice means at *all* times and in
1377 // particular across call boundaries. Therefore our own arguments started at
1378 // a 16-byte aligned SP and the delta applied for the tail call should
1379 // satisfy the same constraint.
1380 assert(isAligned(ST.getStackAlignment(), FPDiff) &&
1381 "unaligned stack on tail call");
1382 }
1383
1385 CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext());
1386
1387 // We could pass MIB and directly add the implicit uses to the call
1388 // now. However, as an aesthetic choice, place implicit argument operands
1389 // after the ordinary user argument registers.
1391
1392 if (Info.CallConv != CallingConv::AMDGPU_Gfx &&
1393 Info.CallConv != CallingConv::AMDGPU_Gfx_WholeWave &&
1394 !AMDGPU::isChainCC(Info.CallConv)) {
1395 // With a fixed ABI, allocate fixed registers before user arguments.
1396 if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info))
1397 return false;
1398 }
1399
1400 OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg);
1401
1402 if (!determineAssignments(Assigner, OutArgs, CCInfo))
1403 return false;
1404
1405 // Do the actual argument marshalling.
1406 AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, true, FPDiff);
1407 if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder))
1408 return false;
1409
1410 if (Info.ConvergenceCtrlToken) {
1411 MIB.addUse(Info.ConvergenceCtrlToken, RegState::Implicit);
1412 }
1413 handleImplicitCallArguments(MIRBuilder, MIB, ST, *FuncInfo, CalleeCC,
1414 ImplicitArgRegs);
1415
1416 // If we have -tailcallopt, we need to adjust the stack. We'll do the call
1417 // sequence start and end here.
1418 if (!IsSibCall) {
1419 MIB->getOperand(CalleeIdx + 1).setImm(FPDiff);
1420 CallSeqStart.addImm(NumBytes).addImm(0);
1421 // End the call sequence *before* emitting the call. Normally, we would
1422 // tidy the frame up after the call. However, here, we've laid out the
1423 // parameters so that when SP is reset, they will be in the correct
1424 // location.
1425 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN).addImm(NumBytes).addImm(0);
1426 }
1427
1428 // Now we can add the actual call instruction to the correct basic block.
1429 MIRBuilder.insertInstr(MIB);
1430
1431 // If this is a whole wave tail call, we need to constrain the register for
1432 // the original EXEC.
1433 if (MIB->getOpcode() == AMDGPU::SI_TCRETURN_GFX_WholeWave) {
1434 MIB->getOperand(0).setReg(
1435 constrainOperandRegClass(MF, *TRI, MRI, *TII, *ST.getRegBankInfo(),
1436 *MIB, MIB->getDesc(), MIB->getOperand(0), 0));
1437 }
1438
1439 // If Callee is a reg, since it is used by a target specific
1440 // instruction, it must have a register class matching the
1441 // constraint of that instruction.
1442
1443 // FIXME: We should define regbankselectable call instructions to handle
1444 // divergent call targets.
1445 if (MIB->getOperand(CalleeIdx).isReg()) {
1446 MIB->getOperand(CalleeIdx).setReg(constrainOperandRegClass(
1447 MF, *TRI, MRI, *TII, *ST.getRegBankInfo(), *MIB, MIB->getDesc(),
1448 MIB->getOperand(CalleeIdx), CalleeIdx));
1449 }
1450
1452 Info.LoweredTailCall = true;
1453 return true;
1454}
1455
1456/// Lower a call to the @llvm.amdgcn.cs.chain intrinsic.
1458 CallLoweringInfo &Info) const {
1459 ArgInfo Callee = Info.OrigArgs[0];
1460 ArgInfo SGPRArgs = Info.OrigArgs[2];
1461 ArgInfo VGPRArgs = Info.OrigArgs[3];
1462
1463 MachineFunction &MF = MIRBuilder.getMF();
1464 const Function &F = MF.getFunction();
1465 const DataLayout &DL = F.getDataLayout();
1466
1467 // The function to jump to is actually the first argument, so we'll change the
1468 // Callee and other info to match that before using our existing helper.
1469 const Value *CalleeV = Callee.OrigValue->stripPointerCasts();
1470 if (const Function *F = dyn_cast<Function>(CalleeV)) {
1471 Info.Callee = MachineOperand::CreateGA(F, 0);
1472 Info.CallConv = F->getCallingConv();
1473 } else {
1474 assert(Callee.Regs.size() == 1 && "Too many regs for the callee");
1475 Info.Callee = MachineOperand::CreateReg(Callee.Regs[0], false);
1476 Info.CallConv = CallingConv::AMDGPU_CS_Chain; // amdgpu_cs_chain_preserve
1477 // behaves the same here.
1478 }
1479
1480 // The function that we're calling cannot be vararg (only the intrinsic is).
1481 Info.IsVarArg = false;
1482
1483 assert(
1484 all_of(SGPRArgs.Flags, [](ISD::ArgFlagsTy F) { return F.isInReg(); }) &&
1485 "SGPR arguments should be marked inreg");
1486 assert(
1487 none_of(VGPRArgs.Flags, [](ISD::ArgFlagsTy F) { return F.isInReg(); }) &&
1488 "VGPR arguments should not be marked inreg");
1489
1491 splitToValueTypes(SGPRArgs, OutArgs, DL, Info.CallConv);
1492 splitToValueTypes(VGPRArgs, OutArgs, DL, Info.CallConv);
1493
1494 Info.IsMustTailCall = true;
1495 return lowerTailCall(MIRBuilder, Info, OutArgs);
1496}
1497
1499 CallLoweringInfo &Info) const {
1500 if (Function *F = Info.CB->getCalledFunction())
1501 if (F->isIntrinsic()) {
1502 switch (F->getIntrinsicID()) {
1503 case Intrinsic::amdgcn_cs_chain:
1504 return lowerChainCall(MIRBuilder, Info);
1505 case Intrinsic::amdgcn_call_whole_wave:
1506 Info.CallConv = CallingConv::AMDGPU_Gfx_WholeWave;
1507
1508 // Get the callee from the original instruction, so it doesn't look like
1509 // this is an indirect call.
1510 Info.Callee = MachineOperand::CreateGA(
1511 cast<GlobalValue>(Info.CB->getOperand(0)), /*Offset=*/0);
1512 Info.OrigArgs.erase(Info.OrigArgs.begin());
1513 Info.IsVarArg = false;
1514 break;
1515 default:
1516 llvm_unreachable("Unexpected intrinsic call");
1517 }
1518 }
1519
1520 if (Info.IsVarArg) {
1521 LLVM_DEBUG(dbgs() << "Variadic functions not implemented\n");
1522 return false;
1523 }
1524
1525 MachineFunction &MF = MIRBuilder.getMF();
1526 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1527 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1528
1529 const Function &F = MF.getFunction();
1532 const DataLayout &DL = F.getDataLayout();
1533
1535 for (auto &OrigArg : Info.OrigArgs)
1536 splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv);
1537
1539 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy())
1540 splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv);
1541
1542 // If we can lower as a tail call, do that instead.
1543 bool CanTailCallOpt =
1544 isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs);
1545
1546 // We must emit a tail call if we have musttail.
1547 if (Info.IsMustTailCall && !CanTailCallOpt) {
1548 LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n");
1549 return false;
1550 }
1551
1552 Info.IsTailCall = CanTailCallOpt;
1553 if (CanTailCallOpt)
1554 return lowerTailCall(MIRBuilder, Info, OutArgs);
1555
1556 // Find out which ABI gets to decide where things go.
1557 CCAssignFn *AssignFnFixed;
1558 CCAssignFn *AssignFnVarArg;
1559 std::tie(AssignFnFixed, AssignFnVarArg) =
1560 getAssignFnsForCC(Info.CallConv, TLI);
1561
1562 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP)
1563 .addImm(0)
1564 .addImm(0);
1565
1566 // Create a temporarily-floating call instruction so we can add the implicit
1567 // uses of arg registers.
1568 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false, ST.isWave32(),
1569 Info.CallConv);
1570
1571 auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
1572 MIB.addDef(TRI->getReturnAddressReg(MF));
1573
1574 if (!Info.IsConvergent)
1576
1577 if (!addCallTargetOperands(MIB, MIRBuilder, Info))
1578 return false;
1579
1580 // Tell the call which registers are clobbered.
1581 const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv);
1582 MIB.addRegMask(Mask);
1583
1585 CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext());
1586
1587 // We could pass MIB and directly add the implicit uses to the call
1588 // now. However, as an aesthetic choice, place implicit argument operands
1589 // after the ordinary user argument registers.
1591
1592 if (Info.CallConv != CallingConv::AMDGPU_Gfx &&
1593 Info.CallConv != CallingConv::AMDGPU_Gfx_WholeWave) {
1594 // With a fixed ABI, allocate fixed registers before user arguments.
1595 if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info))
1596 return false;
1597 }
1598
1599 // Do the actual argument marshalling.
1600 OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg);
1601 if (!determineAssignments(Assigner, OutArgs, CCInfo))
1602 return false;
1603
1604 AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, false);
1605 if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder))
1606 return false;
1607
1609
1610 if (Info.ConvergenceCtrlToken) {
1611 MIB.addUse(Info.ConvergenceCtrlToken, RegState::Implicit);
1612 }
1613 handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, Info.CallConv,
1614 ImplicitArgRegs);
1615
1616 // Get a count of how many bytes are to be pushed on the stack.
1617 unsigned NumBytes = CCInfo.getStackSize();
1618
1619 // If Callee is a reg, since it is used by a target specific
1620 // instruction, it must have a register class matching the
1621 // constraint of that instruction.
1622
1623 // FIXME: We should define regbankselectable call instructions to handle
1624 // divergent call targets.
1625 if (MIB->getOperand(1).isReg()) {
1626 MIB->getOperand(1).setReg(constrainOperandRegClass(
1627 MF, *TRI, MRI, *ST.getInstrInfo(),
1628 *ST.getRegBankInfo(), *MIB, MIB->getDesc(), MIB->getOperand(1),
1629 1));
1630 }
1631
1632 // Now we can add the actual call instruction to the correct position.
1633 MIRBuilder.insertInstr(MIB);
1634
1635 // Finally we can copy the returned value back into its virtual-register. In
1636 // symmetry with the arguments, the physical register must be an
1637 // implicit-define of the call instruction.
1638 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) {
1639 CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv,
1640 Info.IsVarArg);
1641 IncomingValueAssigner Assigner(RetAssignFn);
1642 CallReturnHandler Handler(MIRBuilder, MRI, MIB);
1643 if (!determineAndHandleAssignments(Handler, Assigner, InArgs, MIRBuilder,
1644 Info.CallConv, Info.IsVarArg))
1645 return false;
1646 }
1647
1648 uint64_t CalleePopBytes = NumBytes;
1649
1650 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN)
1651 .addImm(0)
1652 .addImm(CalleePopBytes);
1653
1654 if (!Info.CanLowerReturn) {
1655 insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs,
1656 Info.DemoteRegister, Info.DemoteStackIndex);
1657 }
1658
1659 return true;
1660}
1661
1662void AMDGPUCallLowering::addOriginalExecToReturn(
1663 MachineFunction &MF, MachineInstrBuilder &Ret) const {
1664 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1665 const SIInstrInfo *TII = ST.getInstrInfo();
1666 const MachineInstr *Setup = TII->getWholeWaveFunctionSetup(MF);
1667 Ret.addReg(Setup->getOperand(0).getReg());
1668}
unsigned const MachineRegisterInfo * MRI
static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, bool IsTailCall, std::optional< CallLowering::PtrAuthInfo > &PAI, MachineRegisterInfo &MRI)
static std::pair< CCAssignFn *, CCAssignFn * > getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI)
Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for CC.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc)
static void allocateHSAUserSGPRs(CCState &CCInfo, MachineIRBuilder &B, MachineFunction &MF, const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info)
static bool addCallTargetOperands(MachineInstrBuilder &CallInst, MachineIRBuilder &MIRBuilder, AMDGPUCallLowering::CallLoweringInfo &Info, bool IsDynamicVGPRChainCall=false)
This file describes how to lower LLVM calls to machine code calls.
constexpr LLT S32
This file declares the targeting of the Machinelegalizer class for AMDGPU.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
const HexagonInstrInfo * TII
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This file declares the MachineIRBuilder class.
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static constexpr MCPhysReg SPReg
Interface definition for SIRegisterInfo.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static const AMDGPUFunctionArgInfo FixedABIFunctionInfo
bool lowerTailCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, SmallVectorImpl< ArgInfo > &OutArgs) const
bool isEligibleForTailCallOptimization(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, SmallVectorImpl< ArgInfo > &InArgs, SmallVectorImpl< ArgInfo > &OutArgs) const
Returns true if the call can be lowered as a tail call.
bool lowerFormalArgumentsKernel(MachineIRBuilder &B, const Function &F, ArrayRef< ArrayRef< Register > > VRegs) const
bool lowerReturn(MachineIRBuilder &B, const Value *Val, ArrayRef< Register > VRegs, FunctionLoweringInfo &FLI) const override
This hook behaves as the extended lowerReturn function, but for targets that do not support swifterro...
void handleImplicitCallArguments(MachineIRBuilder &MIRBuilder, MachineInstrBuilder &CallInst, const GCNSubtarget &ST, const SIMachineFunctionInfo &MFI, CallingConv::ID CalleeCC, ArrayRef< std::pair< MCRegister, Register > > ImplicitArgRegs) const
bool areCalleeOutgoingArgsTailCallable(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &OutArgs) const
bool lowerChainCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const
Lower a call to the @llvm.amdgcn.cs.chain intrinsic.
AMDGPUCallLowering(const AMDGPUTargetLowering &TLI)
bool passSpecialInputs(MachineIRBuilder &MIRBuilder, CCState &CCInfo, SmallVectorImpl< std::pair< MCRegister, Register > > &ArgRegs, CallLoweringInfo &Info) const
bool lowerFormalArguments(MachineIRBuilder &B, const Function &F, ArrayRef< ArrayRef< Register > > VRegs, FunctionLoweringInfo &FLI) const override
This hook must be implemented to lower the incoming (formal) arguments, described by VRegs,...
bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override
This hook must be implemented to lower the given call instruction, including argument and return valu...
bool doCallerAndCalleePassArgsTheSameWay(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs) const
static std::optional< uint32_t > getLDSKernelIdMetadata(const Function &F)
unsigned getExplicitKernelArgOffset() const
Returns the offset in bytes from the start of the input buffer of the first explicit kernel argument.
static CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg)
Selects the correct CCAssignFn for a given CallingConvention value.
Class for arbitrary precision integers.
Definition APInt.h:78
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:380
bool isOneBitSet(unsigned BitNo) const
Determine if this APInt Value only has the specified bit set.
Definition APInt.h:366
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
This class represents a function call, abstracting a target machine's calling convention.
void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg, int FI) const
Load the returned value from the stack into virtual registers in VRegs.
bool handleAssignments(ValueHandler &Handler, SmallVectorImpl< ArgInfo > &Args, CCState &CCState, SmallVectorImpl< CCValAssign > &ArgLocs, MachineIRBuilder &MIRBuilder, ArrayRef< Register > ThisReturnRegs={}) const
Use Handler to insert code to handle the argument/return values represented by Args.
bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs, ValueAssigner &CalleeAssigner, ValueAssigner &CallerAssigner) const
void splitToValueTypes(const ArgInfo &OrigArgInfo, SmallVectorImpl< ArgInfo > &SplitArgs, const DataLayout &DL, CallingConv::ID CallConv, SmallVectorImpl< uint64_t > *Offsets=nullptr) const
Break OrigArgInfo into one or more pieces the calling convention can process, returned in SplitArgs.
void insertSRetIncomingArgument(const Function &F, SmallVectorImpl< ArgInfo > &SplitArgs, Register &DemoteReg, MachineRegisterInfo &MRI, const DataLayout &DL) const
Insert the hidden sret ArgInfo to the beginning of SplitArgs.
bool determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, bool IsVarArg, ArrayRef< Register > ThisReturnRegs={}) const
Invoke ValueAssigner::assignArg on each of the given Args and then use Handler to move them to the as...
void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg) const
Store the return value given by VRegs into stack starting at the offset specified in DemoteReg.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< ArgInfo > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
bool determineAssignments(ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, CCState &CCInfo) const
Analyze the argument list in Args, using Assigner to populate CCInfo.
bool checkReturn(CCState &CCInfo, SmallVectorImpl< BaseArgInfo > &Outs, CCAssignFn *Fn) const
CallLowering(const TargetLowering *TLI)
const TargetLowering * getTLI() const
Getter for generic TargetLowering class.
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Diagnostic information for unsupported feature in backend.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Register DemoteRegister
DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg allocated to hold a pointer to ...
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
iterator_range< arg_iterator > args()
Definition Function.h:890
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
const SIRegisterInfo * getRegisterInfo() const override
bool hasPrivateSegmentBuffer() const
unsigned getAddressSpace() const
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr ElementCount getElementCount() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setHasTailCall(bool V=true)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_OR Op0, Op1.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition Register.h:19
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
Register getScratchRSrcReg() const
Returns the physical register reserved for use as the resource descriptor for scratch accesses.
MCRegister getPreloadedReg(AMDGPUFunctionArgInfo::PreloadedValue Value) const
AMDGPUFunctionArgInfo & getArgInfo()
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
Definition StringRef.h:862
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:151
TargetOptions Options
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ PRIVATE_ADDRESS
Address space for private memory.
LLVM_READNONE constexpr bool isShader(CallingConv::ID CC)
LLVM_READNONE constexpr bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
LLVM_READNONE constexpr bool isKernel(CallingConv::ID CC)
LLVM_READNONE constexpr bool isEntryFunctionCC(CallingConv::ID CC)
LLVM_READNONE constexpr bool isChainCC(CallingConv::ID CC)
LLVM_READNONE constexpr bool canGuaranteeTCO(CallingConv::ID CC)
LLVM_READNONE constexpr bool isGraphics(CallingConv::ID CC)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_Gfx
Used for AMD graphics targets.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition ISDOpcodes.h:41
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:835
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:826
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:832
@ Implicit
Not emitted register (e.g. carry, or temporary result).
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition Utils.cpp:56
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1657
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition Alignment.h:145
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:186
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1712
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1719
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:155
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition Analysis.cpp:119
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:212
LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
LLVM_ABI Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition Utils.cpp:899
std::tuple< const ArgDescriptor *, const TargetRegisterClass *, LLT > getPreloadedValue(PreloadedValue Value) const
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
MCRegister getRegister() const
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
Helper struct shared between Function Specialization and SCCP Solver.
Definition SCCPSolver.h:42
const Value * OrigValue
Optionally track the original IR value for the argument.
SmallVector< Register, 4 > Regs
SmallVector< ISD::ArgFlagsTy, 4 > Flags
Base class for ValueHandlers used for arguments coming into the current function, or for return value...
Base class for ValueHandlers used for arguments passed to a function call, or for return values.
uint64_t StackSize
The size of the currently allocated portion of the stack.
Register extendRegister(Register ValReg, const CCValAssign &VA, unsigned MaxSizeBits=0)
Extend a register to the location type given in VA, capped at extending to at most MaxSize bits.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition ValueTypes.h:157
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:117