LLVM 22.0.0git
AMDGPUBaseInfo.cpp
Go to the documentation of this file.
1//===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AMDGPUBaseInfo.h"
10#include "AMDGPU.h"
11#include "AMDGPUAsmUtils.h"
12#include "AMDKernelCodeT.h"
17#include "llvm/IR/Attributes.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/Function.h"
20#include "llvm/IR/GlobalValue.h"
21#include "llvm/IR/IntrinsicsAMDGPU.h"
22#include "llvm/IR/IntrinsicsR600.h"
23#include "llvm/IR/LLVMContext.h"
24#include "llvm/IR/Metadata.h"
25#include "llvm/MC/MCInstrInfo.h"
30#include <optional>
31
32#define GET_INSTRINFO_NAMED_OPS
33#define GET_INSTRMAP_INFO
34#include "AMDGPUGenInstrInfo.inc"
35
37 "amdhsa-code-object-version", llvm::cl::Hidden,
39 llvm::cl::desc("Set default AMDHSA Code Object Version (module flag "
40 "or asm directive still take priority if present)"));
41
42namespace {
43
44/// \returns Bit mask for given bit \p Shift and bit \p Width.
45unsigned getBitMask(unsigned Shift, unsigned Width) {
46 return ((1 << Width) - 1) << Shift;
47}
48
49/// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
50///
51/// \returns Packed \p Dst.
52unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
53 unsigned Mask = getBitMask(Shift, Width);
54 return ((Src << Shift) & Mask) | (Dst & ~Mask);
55}
56
57/// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
58///
59/// \returns Unpacked bits.
60unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
61 return (Src & getBitMask(Shift, Width)) >> Shift;
62}
63
64/// \returns Vmcnt bit shift (lower bits).
65unsigned getVmcntBitShiftLo(unsigned VersionMajor) {
66 return VersionMajor >= 11 ? 10 : 0;
67}
68
69/// \returns Vmcnt bit width (lower bits).
70unsigned getVmcntBitWidthLo(unsigned VersionMajor) {
71 return VersionMajor >= 11 ? 6 : 4;
72}
73
74/// \returns Expcnt bit shift.
75unsigned getExpcntBitShift(unsigned VersionMajor) {
76 return VersionMajor >= 11 ? 0 : 4;
77}
78
79/// \returns Expcnt bit width.
80unsigned getExpcntBitWidth(unsigned VersionMajor) { return 3; }
81
82/// \returns Lgkmcnt bit shift.
83unsigned getLgkmcntBitShift(unsigned VersionMajor) {
84 return VersionMajor >= 11 ? 4 : 8;
85}
86
87/// \returns Lgkmcnt bit width.
88unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
89 return VersionMajor >= 10 ? 6 : 4;
90}
91
92/// \returns Vmcnt bit shift (higher bits).
93unsigned getVmcntBitShiftHi(unsigned VersionMajor) { return 14; }
94
95/// \returns Vmcnt bit width (higher bits).
96unsigned getVmcntBitWidthHi(unsigned VersionMajor) {
97 return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
98}
99
100/// \returns Loadcnt bit width
101unsigned getLoadcntBitWidth(unsigned VersionMajor) {
102 return VersionMajor >= 12 ? 6 : 0;
103}
104
105/// \returns Samplecnt bit width.
106unsigned getSamplecntBitWidth(unsigned VersionMajor) {
107 return VersionMajor >= 12 ? 6 : 0;
108}
109
110/// \returns Bvhcnt bit width.
111unsigned getBvhcntBitWidth(unsigned VersionMajor) {
112 return VersionMajor >= 12 ? 3 : 0;
113}
114
115/// \returns Dscnt bit width.
116unsigned getDscntBitWidth(unsigned VersionMajor) {
117 return VersionMajor >= 12 ? 6 : 0;
118}
119
120/// \returns Dscnt bit shift in combined S_WAIT instructions.
121unsigned getDscntBitShift(unsigned VersionMajor) { return 0; }
122
123/// \returns Storecnt or Vscnt bit width, depending on VersionMajor.
124unsigned getStorecntBitWidth(unsigned VersionMajor) {
125 return VersionMajor >= 10 ? 6 : 0;
126}
127
128/// \returns Kmcnt bit width.
129unsigned getKmcntBitWidth(unsigned VersionMajor) {
130 return VersionMajor >= 12 ? 5 : 0;
131}
132
133/// \returns Xcnt bit width.
134unsigned getXcntBitWidth(unsigned VersionMajor, unsigned VersionMinor) {
135 return VersionMajor == 12 && VersionMinor == 5 ? 6 : 0;
136}
137
138/// \returns shift for Loadcnt/Storecnt in combined S_WAIT instructions.
139unsigned getLoadcntStorecntBitShift(unsigned VersionMajor) {
140 return VersionMajor >= 12 ? 8 : 0;
141}
142
143/// \returns VaSdst bit width
144inline unsigned getVaSdstBitWidth() { return 3; }
145
146/// \returns VaSdst bit shift
147inline unsigned getVaSdstBitShift() { return 9; }
148
149/// \returns VmVsrc bit width
150inline unsigned getVmVsrcBitWidth() { return 3; }
151
152/// \returns VmVsrc bit shift
153inline unsigned getVmVsrcBitShift() { return 2; }
154
155/// \returns VaVdst bit width
156inline unsigned getVaVdstBitWidth() { return 4; }
157
158/// \returns VaVdst bit shift
159inline unsigned getVaVdstBitShift() { return 12; }
160
161/// \returns VaVcc bit width
162inline unsigned getVaVccBitWidth() { return 1; }
163
164/// \returns VaVcc bit shift
165inline unsigned getVaVccBitShift() { return 1; }
166
167/// \returns SaSdst bit width
168inline unsigned getSaSdstBitWidth() { return 1; }
169
170/// \returns SaSdst bit shift
171inline unsigned getSaSdstBitShift() { return 0; }
172
173/// \returns VaSsrc width
174inline unsigned getVaSsrcBitWidth() { return 1; }
175
176/// \returns VaSsrc bit shift
177inline unsigned getVaSsrcBitShift() { return 8; }
178
179/// \returns HoldCnt bit shift
180inline unsigned getHoldCntWidth() { return 1; }
181
182/// \returns HoldCnt bit shift
183inline unsigned getHoldCntBitShift() { return 7; }
184
185} // end anonymous namespace
186
187namespace llvm {
188
189namespace AMDGPU {
190
191/// \returns true if the target supports signed immediate offset for SMRD
192/// instructions.
194 return isGFX9Plus(ST);
195}
196
197/// \returns True if \p STI is AMDHSA.
198bool isHsaAbi(const MCSubtargetInfo &STI) {
199 return STI.getTargetTriple().getOS() == Triple::AMDHSA;
200}
201
203 if (auto *Ver = mdconst::extract_or_null<ConstantInt>(
204 M.getModuleFlag("amdhsa_code_object_version"))) {
205 return (unsigned)Ver->getZExtValue() / 100;
206 }
207
209}
210
213}
214
215unsigned getAMDHSACodeObjectVersion(unsigned ABIVersion) {
216 switch (ABIVersion) {
218 return 4;
220 return 5;
222 return 6;
223 default:
225 }
226}
227
228uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion) {
229 if (T.getOS() != Triple::AMDHSA)
230 return 0;
231
232 switch (CodeObjectVersion) {
233 case 4:
235 case 5:
237 case 6:
239 default:
240 report_fatal_error("Unsupported AMDHSA Code Object Version " +
241 Twine(CodeObjectVersion));
242 }
243}
244
245unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion) {
246 switch (CodeObjectVersion) {
247 case AMDHSA_COV4:
248 return 48;
249 case AMDHSA_COV5:
250 case AMDHSA_COV6:
251 default:
253 }
254}
255
256// FIXME: All such magic numbers about the ABI should be in a
257// central TD file.
258unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion) {
259 switch (CodeObjectVersion) {
260 case AMDHSA_COV4:
261 return 24;
262 case AMDHSA_COV5:
263 case AMDHSA_COV6:
264 default:
266 }
267}
268
269unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion) {
270 switch (CodeObjectVersion) {
271 case AMDHSA_COV4:
272 return 32;
273 case AMDHSA_COV5:
274 case AMDHSA_COV6:
275 default:
277 }
278}
279
280unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion) {
281 switch (CodeObjectVersion) {
282 case AMDHSA_COV4:
283 return 40;
284 case AMDHSA_COV5:
285 case AMDHSA_COV6:
286 default:
288 }
289}
290
291#define GET_MIMGBaseOpcodesTable_IMPL
292#define GET_MIMGDimInfoTable_IMPL
293#define GET_MIMGInfoTable_IMPL
294#define GET_MIMGLZMappingTable_IMPL
295#define GET_MIMGMIPMappingTable_IMPL
296#define GET_MIMGBiasMappingTable_IMPL
297#define GET_MIMGOffsetMappingTable_IMPL
298#define GET_MIMGG16MappingTable_IMPL
299#define GET_MAIInstInfoTable_IMPL
300#define GET_WMMAInstInfoTable_IMPL
301#include "AMDGPUGenSearchableTables.inc"
302
303int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
304 unsigned VDataDwords, unsigned VAddrDwords) {
305 const MIMGInfo *Info =
306 getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, VDataDwords, VAddrDwords);
307 return Info ? Info->Opcode : -1;
308}
309
311 const MIMGInfo *Info = getMIMGInfo(Opc);
312 return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
313}
314
315int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
316 const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
317 const MIMGInfo *NewInfo =
318 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
319 NewChannels, OrigInfo->VAddrDwords);
320 return NewInfo ? NewInfo->Opcode : -1;
321}
322
323unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
324 const MIMGDimInfo *Dim, bool IsA16,
325 bool IsG16Supported) {
326 unsigned AddrWords = BaseOpcode->NumExtraArgs;
327 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
328 (BaseOpcode->LodOrClampOrMip ? 1 : 0);
329 if (IsA16)
330 AddrWords += divideCeil(AddrComponents, 2);
331 else
332 AddrWords += AddrComponents;
333
334 // Note: For subtargets that support A16 but not G16, enabling A16 also
335 // enables 16 bit gradients.
336 // For subtargets that support A16 (operand) and G16 (done with a different
337 // instruction encoding), they are independent.
338
339 if (BaseOpcode->Gradients) {
340 if ((IsA16 && !IsG16Supported) || BaseOpcode->G16)
341 // There are two gradients per coordinate, we pack them separately.
342 // For the 3d case,
343 // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv)
344 AddrWords += alignTo<2>(Dim->NumGradients / 2);
345 else
346 AddrWords += Dim->NumGradients;
347 }
348 return AddrWords;
349}
350
351struct MUBUFInfo {
359 bool tfe;
360};
361
362struct MTBUFInfo {
369};
370
371struct SMInfo {
374};
375
376struct VOPInfo {
379};
380
383};
384
387};
388
391};
392
398};
399
400struct VOPDInfo {
405 bool VOPD3;
406};
407
411};
412
413#define GET_FP4FP8DstByteSelTable_DECL
414#define GET_FP4FP8DstByteSelTable_IMPL
415
419};
420
425};
426
427#define GET_MTBUFInfoTable_DECL
428#define GET_MTBUFInfoTable_IMPL
429#define GET_MUBUFInfoTable_DECL
430#define GET_MUBUFInfoTable_IMPL
431#define GET_SMInfoTable_DECL
432#define GET_SMInfoTable_IMPL
433#define GET_VOP1InfoTable_DECL
434#define GET_VOP1InfoTable_IMPL
435#define GET_VOP2InfoTable_DECL
436#define GET_VOP2InfoTable_IMPL
437#define GET_VOP3InfoTable_DECL
438#define GET_VOP3InfoTable_IMPL
439#define GET_VOPC64DPPTable_DECL
440#define GET_VOPC64DPPTable_IMPL
441#define GET_VOPC64DPP8Table_DECL
442#define GET_VOPC64DPP8Table_IMPL
443#define GET_VOPCAsmOnlyInfoTable_DECL
444#define GET_VOPCAsmOnlyInfoTable_IMPL
445#define GET_VOP3CAsmOnlyInfoTable_DECL
446#define GET_VOP3CAsmOnlyInfoTable_IMPL
447#define GET_VOPDComponentTable_DECL
448#define GET_VOPDComponentTable_IMPL
449#define GET_VOPDPairs_DECL
450#define GET_VOPDPairs_IMPL
451#define GET_VOPTrue16Table_DECL
452#define GET_VOPTrue16Table_IMPL
453#define GET_True16D16Table_IMPL
454#define GET_WMMAOpcode2AddrMappingTable_DECL
455#define GET_WMMAOpcode2AddrMappingTable_IMPL
456#define GET_WMMAOpcode3AddrMappingTable_DECL
457#define GET_WMMAOpcode3AddrMappingTable_IMPL
458#define GET_getMFMA_F8F6F4_WithSize_DECL
459#define GET_getMFMA_F8F6F4_WithSize_IMPL
460#define GET_isMFMA_F8F6F4Table_IMPL
461#define GET_isCvtScaleF32_F32F16ToF8F4Table_IMPL
462
463#include "AMDGPUGenSearchableTables.inc"
464
465int getMTBUFBaseOpcode(unsigned Opc) {
466 const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc);
467 return Info ? Info->BaseOpcode : -1;
468}
469
470int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) {
471 const MTBUFInfo *Info =
472 getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
473 return Info ? Info->Opcode : -1;
474}
475
476int getMTBUFElements(unsigned Opc) {
477 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
478 return Info ? Info->elements : 0;
479}
480
481bool getMTBUFHasVAddr(unsigned Opc) {
482 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
483 return Info && Info->has_vaddr;
484}
485
486bool getMTBUFHasSrsrc(unsigned Opc) {
487 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
488 return Info && Info->has_srsrc;
489}
490
491bool getMTBUFHasSoffset(unsigned Opc) {
492 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
493 return Info && Info->has_soffset;
494}
495
496int getMUBUFBaseOpcode(unsigned Opc) {
497 const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
498 return Info ? Info->BaseOpcode : -1;
499}
500
501int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) {
502 const MUBUFInfo *Info =
503 getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
504 return Info ? Info->Opcode : -1;
505}
506
507int getMUBUFElements(unsigned Opc) {
508 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
509 return Info ? Info->elements : 0;
510}
511
512bool getMUBUFHasVAddr(unsigned Opc) {
513 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
514 return Info && Info->has_vaddr;
515}
516
517bool getMUBUFHasSrsrc(unsigned Opc) {
518 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
519 return Info && Info->has_srsrc;
520}
521
522bool getMUBUFHasSoffset(unsigned Opc) {
523 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
524 return Info && Info->has_soffset;
525}
526
527bool getMUBUFIsBufferInv(unsigned Opc) {
528 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
529 return Info && Info->IsBufferInv;
530}
531
532bool getMUBUFTfe(unsigned Opc) {
533 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
534 return Info && Info->tfe;
535}
536
537bool getSMEMIsBuffer(unsigned Opc) {
538 const SMInfo *Info = getSMEMOpcodeHelper(Opc);
539 return Info && Info->IsBuffer;
540}
541
542bool getVOP1IsSingle(unsigned Opc) {
543 const VOPInfo *Info = getVOP1OpcodeHelper(Opc);
544 return !Info || Info->IsSingle;
545}
546
547bool getVOP2IsSingle(unsigned Opc) {
548 const VOPInfo *Info = getVOP2OpcodeHelper(Opc);
549 return !Info || Info->IsSingle;
550}
551
552bool getVOP3IsSingle(unsigned Opc) {
553 const VOPInfo *Info = getVOP3OpcodeHelper(Opc);
554 return !Info || Info->IsSingle;
555}
556
557bool isVOPC64DPP(unsigned Opc) {
558 return isVOPC64DPPOpcodeHelper(Opc) || isVOPC64DPP8OpcodeHelper(Opc);
559}
560
561bool isVOPCAsmOnly(unsigned Opc) { return isVOPCAsmOnlyOpcodeHelper(Opc); }
562
563bool getMAIIsDGEMM(unsigned Opc) {
564 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
565 return Info && Info->is_dgemm;
566}
567
568bool getMAIIsGFX940XDL(unsigned Opc) {
569 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
570 return Info && Info->is_gfx940_xdl;
571}
572
573bool getWMMAIsXDL(unsigned Opc) {
574 const WMMAInstInfo *Info = getWMMAInstInfoHelper(Opc);
575 return Info ? Info->is_wmma_xdl : false;
576}
577
579 switch (EncodingVal) {
582 return 6;
584 return 4;
587 default:
588 return 8;
589 }
590
591 llvm_unreachable("covered switch over mfma scale formats");
592}
593
595 unsigned BLGP,
596 unsigned F8F8Opcode) {
597 uint8_t SrcANumRegs = mfmaScaleF8F6F4FormatToNumRegs(CBSZ);
598 uint8_t SrcBNumRegs = mfmaScaleF8F6F4FormatToNumRegs(BLGP);
599 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
600}
601
603 switch (Fmt) {
606 return 16;
609 return 12;
611 return 8;
612 }
613
614 llvm_unreachable("covered switch over wmma scale formats");
615}
616
618 unsigned FmtB,
619 unsigned F8F8Opcode) {
620 uint8_t SrcANumRegs = wmmaScaleF8F6F4FormatToNumRegs(FmtA);
621 uint8_t SrcBNumRegs = wmmaScaleF8F6F4FormatToNumRegs(FmtB);
622 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
623}
624
626 if (ST.hasFeature(AMDGPU::FeatureGFX1250Insts))
628 if (ST.hasFeature(AMDGPU::FeatureGFX12Insts))
630 if (ST.hasFeature(AMDGPU::FeatureGFX11Insts))
632 llvm_unreachable("Subtarget generation does not support VOPD!");
633}
634
635CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3) {
636 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(Opc) : 0;
637 Opc = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : Opc;
638 const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
639 if (Info) {
640 // Check that Opc can be used as VOPDY for this encoding. V_MOV_B32 as a
641 // VOPDX is just a placeholder here, it is supported on all encodings.
642 // TODO: This can be optimized by creating tables of supported VOPDY
643 // opcodes per encoding.
644 unsigned VOPDMov = AMDGPU::getVOPDOpcode(AMDGPU::V_MOV_B32_e32, VOPD3);
645 bool CanBeVOPDY = getVOPDFull(VOPDMov, AMDGPU::getVOPDOpcode(Opc, VOPD3),
646 EncodingFamily, VOPD3) != -1;
647 return {VOPD3 ? Info->CanBeVOPD3X : Info->CanBeVOPDX, CanBeVOPDY};
648 }
649
650 return {false, false};
651}
652
653unsigned getVOPDOpcode(unsigned Opc, bool VOPD3) {
654 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(Opc) : 0;
655 Opc = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : Opc;
656 const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
657 return Info ? Info->VOPDOp : ~0u;
658}
659
660bool isVOPD(unsigned Opc) {
661 return AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0X);
662}
663
664bool isMAC(unsigned Opc) {
665 return Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
666 Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
667 Opc == AMDGPU::V_MAC_F32_e64_vi ||
668 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
669 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
670 Opc == AMDGPU::V_MAC_F16_e64_vi ||
671 Opc == AMDGPU::V_FMAC_F64_e64_gfx90a ||
672 Opc == AMDGPU::V_FMAC_F64_e64_gfx12 ||
673 Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
674 Opc == AMDGPU::V_FMAC_F32_e64_gfx11 ||
675 Opc == AMDGPU::V_FMAC_F32_e64_gfx12 ||
676 Opc == AMDGPU::V_FMAC_F32_e64_vi ||
677 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10 ||
678 Opc == AMDGPU::V_FMAC_DX9_ZERO_F32_e64_gfx11 ||
679 Opc == AMDGPU::V_FMAC_F16_e64_gfx10 ||
680 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx11 ||
681 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx11 ||
682 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx12 ||
683 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx12 ||
684 Opc == AMDGPU::V_DOT2C_F32_F16_e64_vi ||
685 Opc == AMDGPU::V_DOT2C_F32_BF16_e64_vi ||
686 Opc == AMDGPU::V_DOT2C_I32_I16_e64_vi ||
687 Opc == AMDGPU::V_DOT4C_I32_I8_e64_vi ||
688 Opc == AMDGPU::V_DOT8C_I32_I4_e64_vi;
689}
690
691bool isPermlane16(unsigned Opc) {
692 return Opc == AMDGPU::V_PERMLANE16_B32_gfx10 ||
693 Opc == AMDGPU::V_PERMLANEX16_B32_gfx10 ||
694 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx11 ||
695 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx11 ||
696 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx12 ||
697 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx12 ||
698 Opc == AMDGPU::V_PERMLANE16_VAR_B32_e64_gfx12 ||
699 Opc == AMDGPU::V_PERMLANEX16_VAR_B32_e64_gfx12;
700}
701
703 return Opc == AMDGPU::V_CVT_F32_BF8_e64_gfx12 ||
704 Opc == AMDGPU::V_CVT_F32_FP8_e64_gfx12 ||
705 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp_gfx12 ||
706 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp_gfx12 ||
707 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp8_gfx12 ||
708 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp8_gfx12 ||
709 Opc == AMDGPU::V_CVT_PK_F32_BF8_fake16_e64_gfx12 ||
710 Opc == AMDGPU::V_CVT_PK_F32_FP8_fake16_e64_gfx12 ||
711 Opc == AMDGPU::V_CVT_PK_F32_BF8_t16_e64_gfx12 ||
712 Opc == AMDGPU::V_CVT_PK_F32_FP8_t16_e64_gfx12;
713}
714
715bool isGenericAtomic(unsigned Opc) {
716 return Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP ||
717 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD ||
718 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB ||
719 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN ||
720 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN ||
721 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX ||
722 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX ||
723 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND ||
724 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR ||
725 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR ||
726 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC ||
727 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC ||
728 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD ||
729 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMIN ||
730 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX ||
731 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP ||
732 Opc == AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG;
733}
734
735bool isAsyncStore(unsigned Opc) {
736 return Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_gfx1250 ||
737 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_gfx1250 ||
738 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_gfx1250 ||
739 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_gfx1250 ||
740 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_SADDR_gfx1250 ||
741 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_SADDR_gfx1250 ||
742 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_SADDR_gfx1250 ||
743 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_SADDR_gfx1250;
744}
745
746bool isTensorStore(unsigned Opc) {
747 return Opc == TENSOR_STORE_FROM_LDS_gfx1250 ||
748 Opc == TENSOR_STORE_FROM_LDS_D2_gfx1250;
749}
750
751unsigned getTemporalHintType(const MCInstrDesc TID) {
754 unsigned Opc = TID.getOpcode();
755 // Async and Tensor store should have the temporal hint type of TH_TYPE_STORE
756 if (TID.mayStore() &&
757 (isAsyncStore(Opc) || isTensorStore(Opc) || !TID.mayLoad()))
758 return CPol::TH_TYPE_STORE;
759
760 // This will default to returning TH_TYPE_LOAD when neither MayStore nor
761 // MayLoad flag is present which is the case with instructions like
762 // image_get_resinfo.
763 return CPol::TH_TYPE_LOAD;
764}
765
766bool isTrue16Inst(unsigned Opc) {
767 const VOPTrue16Info *Info = getTrue16OpcodeHelper(Opc);
768 return Info && Info->IsTrue16;
769}
770
772 const FP4FP8DstByteSelInfo *Info = getFP4FP8DstByteSelHelper(Opc);
773 if (!Info)
774 return FPType::None;
775 if (Info->HasFP8DstByteSel)
776 return FPType::FP8;
777 if (Info->HasFP4DstByteSel)
778 return FPType::FP4;
779
780 return FPType::None;
781}
782
783unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc) {
784 const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom2AddrOpcode(Opc);
785 return Info ? Info->Opcode3Addr : ~0u;
786}
787
788unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc) {
789 const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom3AddrOpcode(Opc);
790 return Info ? Info->Opcode2Addr : ~0u;
791}
792
793// Wrapper for Tablegen'd function. enum Subtarget is not defined in any
794// header files, so we need to wrap it in a function that takes unsigned
795// instead.
796int getMCOpcode(uint16_t Opcode, unsigned Gen) {
797 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
798}
799
800unsigned getBitOp2(unsigned Opc) {
801 switch (Opc) {
802 default:
803 return 0;
804 case AMDGPU::V_AND_B32_e32:
805 return 0x40;
806 case AMDGPU::V_OR_B32_e32:
807 return 0x54;
808 case AMDGPU::V_XOR_B32_e32:
809 return 0x14;
810 case AMDGPU::V_XNOR_B32_e32:
811 return 0x41;
812 }
813}
814
815int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily,
816 bool VOPD3) {
817 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(OpY) : 0;
818 OpY = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : OpY;
819 const VOPDInfo *Info =
820 getVOPDInfoFromComponentOpcodes(OpX, OpY, EncodingFamily, VOPD3);
821 return Info ? Info->Opcode : -1;
822}
823
824std::pair<unsigned, unsigned> getVOPDComponents(unsigned VOPDOpcode) {
825 const VOPDInfo *Info = getVOPDOpcodeHelper(VOPDOpcode);
826 assert(Info);
827 const auto *OpX = getVOPDBaseFromComponent(Info->OpX);
828 const auto *OpY = getVOPDBaseFromComponent(Info->OpY);
829 assert(OpX && OpY);
830 return {OpX->BaseVOP, OpY->BaseVOP};
831}
832
833namespace VOPD {
834
835ComponentProps::ComponentProps(const MCInstrDesc &OpDesc, bool VOP3Layout) {
837
840 auto TiedIdx = OpDesc.getOperandConstraint(Component::SRC2, MCOI::TIED_TO);
841 assert(TiedIdx == -1 || TiedIdx == Component::DST);
842 HasSrc2Acc = TiedIdx != -1;
843 Opcode = OpDesc.getOpcode();
844
845 IsVOP3 = VOP3Layout || (OpDesc.TSFlags & SIInstrFlags::VOP3);
846 SrcOperandsNum = AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src2) ? 3
847 : AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::imm) ? 3
848 : AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src1) ? 2
849 : 1;
850 assert(SrcOperandsNum <= Component::MAX_SRC_NUM);
851
852 if (Opcode == AMDGPU::V_CNDMASK_B32_e32 ||
853 Opcode == AMDGPU::V_CNDMASK_B32_e64) {
854 // CNDMASK is an awkward exception, it has FP modifiers, but not FP
855 // operands.
856 NumVOPD3Mods = 2;
857 if (IsVOP3)
858 SrcOperandsNum = 3;
859 } else if (isSISrcFPOperand(OpDesc,
860 getNamedOperandIdx(Opcode, OpName::src0))) {
861 // All FP VOPD instructions have Neg modifiers for all operands except
862 // for tied src2.
863 NumVOPD3Mods = SrcOperandsNum;
864 if (HasSrc2Acc)
865 --NumVOPD3Mods;
866 }
867
868 if (OpDesc.TSFlags & SIInstrFlags::VOP3)
869 return;
870
871 auto OperandsNum = OpDesc.getNumOperands();
872 unsigned CompOprIdx;
873 for (CompOprIdx = Component::SRC1; CompOprIdx < OperandsNum; ++CompOprIdx) {
874 if (OpDesc.operands()[CompOprIdx].OperandType == AMDGPU::OPERAND_KIMM32) {
875 MandatoryLiteralIdx = CompOprIdx;
876 break;
877 }
878 }
879}
880
882 return getNamedOperandIdx(Opcode, OpName::bitop3);
883}
884
885unsigned ComponentInfo::getIndexInParsedOperands(unsigned CompOprIdx) const {
886 assert(CompOprIdx < Component::MAX_OPR_NUM);
887
888 if (CompOprIdx == Component::DST)
890
891 auto CompSrcIdx = CompOprIdx - Component::DST_NUM;
892 if (CompSrcIdx < getCompParsedSrcOperandsNum())
893 return getIndexOfSrcInParsedOperands(CompSrcIdx);
894
895 // The specified operand does not exist.
896 return 0;
897}
898
900 std::function<unsigned(unsigned, unsigned)> GetRegIdx,
901 const MCRegisterInfo &MRI, bool SkipSrc, bool AllowSameVGPR,
902 bool VOPD3) const {
903
904 auto OpXRegs = getRegIndices(ComponentIndex::X, GetRegIdx,
905 CompInfo[ComponentIndex::X].isVOP3());
906 auto OpYRegs = getRegIndices(ComponentIndex::Y, GetRegIdx,
907 CompInfo[ComponentIndex::Y].isVOP3());
908
909 const auto banksOverlap = [&MRI](MCRegister X, MCRegister Y,
910 unsigned BanksMask) -> bool {
911 MCRegister BaseX = MRI.getSubReg(X, AMDGPU::sub0);
912 MCRegister BaseY = MRI.getSubReg(Y, AMDGPU::sub0);
913 if (!BaseX)
914 BaseX = X;
915 if (!BaseY)
916 BaseY = Y;
917 if ((BaseX & BanksMask) == (BaseY & BanksMask))
918 return true;
919 if (BaseX != X /* This is 64-bit register */ &&
920 ((BaseX + 1) & BanksMask) == (BaseY & BanksMask))
921 return true;
922 if (BaseY != Y && (BaseX & BanksMask) == ((BaseY + 1) & BanksMask))
923 return true;
924
925 // If both are 64-bit bank conflict will be detected yet while checking
926 // the first subreg.
927 return false;
928 };
929
930 unsigned CompOprIdx;
931 for (CompOprIdx = 0; CompOprIdx < Component::MAX_OPR_NUM; ++CompOprIdx) {
932 unsigned BanksMasks = VOPD3 ? VOPD3_VGPR_BANK_MASKS[CompOprIdx]
933 : VOPD_VGPR_BANK_MASKS[CompOprIdx];
934 if (!OpXRegs[CompOprIdx] || !OpYRegs[CompOprIdx])
935 continue;
936
937 if (SkipSrc && CompOprIdx >= Component::DST_NUM)
938 continue;
939
940 if (CompOprIdx < Component::DST_NUM) {
941 // Even if we do not check vdst parity, vdst operands still shall not
942 // overlap.
943 if (MRI.regsOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx]))
944 return CompOprIdx;
945 if (VOPD3) // No need to check dst parity.
946 continue;
947 }
948
949 if (banksOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx], BanksMasks) &&
950 (!AllowSameVGPR || CompOprIdx < Component::DST_NUM ||
951 OpXRegs[CompOprIdx] != OpYRegs[CompOprIdx]))
952 return CompOprIdx;
953 }
954
955 return {};
956}
957
958// Return an array of VGPR registers [DST,SRC0,SRC1,SRC2] used
959// by the specified component. If an operand is unused
960// or is not a VGPR, the corresponding value is 0.
961//
962// GetRegIdx(Component, MCOperandIdx) must return a VGPR register index
963// for the specified component and MC operand. The callback must return 0
964// if the operand is not a register or not a VGPR.
966InstInfo::getRegIndices(unsigned CompIdx,
967 std::function<unsigned(unsigned, unsigned)> GetRegIdx,
968 bool VOPD3) const {
969 assert(CompIdx < COMPONENTS_NUM);
970
971 const auto &Comp = CompInfo[CompIdx];
973
974 RegIndices[DST] = GetRegIdx(CompIdx, Comp.getIndexOfDstInMCOperands());
975
976 for (unsigned CompOprIdx : {SRC0, SRC1, SRC2}) {
977 unsigned CompSrcIdx = CompOprIdx - DST_NUM;
978 RegIndices[CompOprIdx] =
979 Comp.hasRegSrcOperand(CompSrcIdx)
980 ? GetRegIdx(CompIdx,
981 Comp.getIndexOfSrcInMCOperands(CompSrcIdx, VOPD3))
982 : 0;
983 }
984 return RegIndices;
985}
986
987} // namespace VOPD
988
990 return VOPD::InstInfo(OpX, OpY);
991}
992
993VOPD::InstInfo getVOPDInstInfo(unsigned VOPDOpcode,
994 const MCInstrInfo *InstrInfo) {
995 auto [OpX, OpY] = getVOPDComponents(VOPDOpcode);
996 const auto &OpXDesc = InstrInfo->get(OpX);
997 const auto &OpYDesc = InstrInfo->get(OpY);
998 bool VOPD3 = InstrInfo->get(VOPDOpcode).TSFlags & SIInstrFlags::VOPD3;
1000 VOPD::ComponentInfo OpYInfo(OpYDesc, OpXInfo, VOPD3);
1001 return VOPD::InstInfo(OpXInfo, OpYInfo);
1002}
1003
1004namespace IsaInfo {
1005
1007 : STI(STI), XnackSetting(TargetIDSetting::Any),
1008 SramEccSetting(TargetIDSetting::Any) {
1009 if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
1010 XnackSetting = TargetIDSetting::Unsupported;
1011 if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
1012 SramEccSetting = TargetIDSetting::Unsupported;
1013}
1014
1016 // Check if xnack or sramecc is explicitly enabled or disabled. In the
1017 // absence of the target features we assume we must generate code that can run
1018 // in any environment.
1019 SubtargetFeatures Features(FS);
1020 std::optional<bool> XnackRequested;
1021 std::optional<bool> SramEccRequested;
1022
1023 for (const std::string &Feature : Features.getFeatures()) {
1024 if (Feature == "+xnack")
1025 XnackRequested = true;
1026 else if (Feature == "-xnack")
1027 XnackRequested = false;
1028 else if (Feature == "+sramecc")
1029 SramEccRequested = true;
1030 else if (Feature == "-sramecc")
1031 SramEccRequested = false;
1032 }
1033
1034 bool XnackSupported = isXnackSupported();
1035 bool SramEccSupported = isSramEccSupported();
1036
1037 if (XnackRequested) {
1038 if (XnackSupported) {
1039 XnackSetting =
1040 *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off;
1041 } else {
1042 // If a specific xnack setting was requested and this GPU does not support
1043 // xnack emit a warning. Setting will remain set to "Unsupported".
1044 if (*XnackRequested) {
1045 errs() << "warning: xnack 'On' was requested for a processor that does "
1046 "not support it!\n";
1047 } else {
1048 errs() << "warning: xnack 'Off' was requested for a processor that "
1049 "does not support it!\n";
1050 }
1051 }
1052 }
1053
1054 if (SramEccRequested) {
1055 if (SramEccSupported) {
1056 SramEccSetting =
1057 *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off;
1058 } else {
1059 // If a specific sramecc setting was requested and this GPU does not
1060 // support sramecc emit a warning. Setting will remain set to
1061 // "Unsupported".
1062 if (*SramEccRequested) {
1063 errs() << "warning: sramecc 'On' was requested for a processor that "
1064 "does not support it!\n";
1065 } else {
1066 errs() << "warning: sramecc 'Off' was requested for a processor that "
1067 "does not support it!\n";
1068 }
1069 }
1070 }
1071}
1072
1073static TargetIDSetting
1075 if (FeatureString.ends_with("-"))
1076 return TargetIDSetting::Off;
1077 if (FeatureString.ends_with("+"))
1078 return TargetIDSetting::On;
1079
1080 llvm_unreachable("Malformed feature string");
1081}
1082
1084 SmallVector<StringRef, 3> TargetIDSplit;
1085 TargetID.split(TargetIDSplit, ':');
1086
1087 for (const auto &FeatureString : TargetIDSplit) {
1088 if (FeatureString.starts_with("xnack"))
1089 XnackSetting = getTargetIDSettingFromFeatureString(FeatureString);
1090 if (FeatureString.starts_with("sramecc"))
1091 SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString);
1092 }
1093}
1094
1095std::string AMDGPUTargetID::toString() const {
1096 std::string StringRep;
1097 raw_string_ostream StreamRep(StringRep);
1098
1099 auto TargetTriple = STI.getTargetTriple();
1100 auto Version = getIsaVersion(STI.getCPU());
1101
1102 StreamRep << TargetTriple.getArchName() << '-' << TargetTriple.getVendorName()
1103 << '-' << TargetTriple.getOSName() << '-'
1104 << TargetTriple.getEnvironmentName() << '-';
1105
1106 std::string Processor;
1107 // TODO: Following else statement is present here because we used various
1108 // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803').
1109 // Remove once all aliases are removed from GCNProcessors.td.
1110 if (Version.Major >= 9)
1111 Processor = STI.getCPU().str();
1112 else
1113 Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) +
1114 Twine(Version.Stepping))
1115 .str();
1116
1117 std::string Features;
1118 if (STI.getTargetTriple().getOS() == Triple::AMDHSA) {
1119 // sramecc.
1121 Features += ":sramecc-";
1123 Features += ":sramecc+";
1124 // xnack.
1126 Features += ":xnack-";
1128 Features += ":xnack+";
1129 }
1130
1131 StreamRep << Processor << Features;
1132
1133 return StringRep;
1134}
1135
1136unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
1137 if (STI->getFeatureBits().test(FeatureWavefrontSize16))
1138 return 16;
1139 if (STI->getFeatureBits().test(FeatureWavefrontSize32))
1140 return 32;
1141
1142 return 64;
1143}
1144
1146 unsigned BytesPerCU = getAddressableLocalMemorySize(STI);
1147
1148 // "Per CU" really means "per whatever functional block the waves of a
1149 // workgroup must share". So the effective local memory size is doubled in
1150 // WGP mode on gfx10.
1151 if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
1152 BytesPerCU *= 2;
1153
1154 return BytesPerCU;
1155}
1156
1158 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize32768))
1159 return 32768;
1160 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize65536))
1161 return 65536;
1162 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize163840))
1163 return 163840;
1164 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize327680))
1165 return 327680;
1166 return 32768;
1167}
1168
1169unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
1170 // "Per CU" really means "per whatever functional block the waves of a
1171 // workgroup must share".
1172
1173 // GFX12.5 only supports CU mode, which contains four SIMDs.
1174 if (isGFX1250(*STI)) {
1175 assert(STI->getFeatureBits().test(FeatureCuMode));
1176 return 4;
1177 }
1178
1179 // For gfx10 in CU mode the functional block is the CU, which contains
1180 // two SIMDs.
1181 if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode))
1182 return 2;
1183
1184 // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP
1185 // contains two CUs, so a total of four SIMDs.
1186 return 4;
1187}
1188
1190 unsigned FlatWorkGroupSize) {
1191 assert(FlatWorkGroupSize != 0);
1192 if (!STI->getTargetTriple().isAMDGCN())
1193 return 8;
1194 unsigned MaxWaves = getMaxWavesPerEU(STI) * getEUsPerCU(STI);
1195 unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
1196 if (N == 1) {
1197 // Single-wave workgroups don't consume barrier resources.
1198 return MaxWaves;
1199 }
1200
1201 unsigned MaxBarriers = 16;
1202 if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
1203 MaxBarriers = 32;
1204
1205 return std::min(MaxWaves / N, MaxBarriers);
1206}
1207
1208unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) { return 1; }
1209
1210unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) {
1211 // FIXME: Need to take scratch memory into account.
1212 if (isGFX90A(*STI))
1213 return 8;
1214 if (!isGFX10Plus(*STI))
1215 return 10;
1216 return hasGFX10_3Insts(*STI) ? 16 : 20;
1217}
1218
1220 unsigned FlatWorkGroupSize) {
1221 return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize),
1222 getEUsPerCU(STI));
1223}
1224
1225unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) { return 1; }
1226
1228 // Some subtargets allow encoding 2048, but this isn't tested or supported.
1229 return 1024;
1230}
1231
1233 unsigned FlatWorkGroupSize) {
1234 return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI));
1235}
1236
1239 if (Version.Major >= 10)
1240 return getAddressableNumSGPRs(STI);
1241 if (Version.Major >= 8)
1242 return 16;
1243 return 8;
1244}
1245
1246unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) { return 8; }
1247
1248unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
1250 if (Version.Major >= 8)
1251 return 800;
1252 return 512;
1253}
1254
1256 if (STI->getFeatureBits().test(FeatureSGPRInitBug))
1258
1260 if (Version.Major >= 10)
1261 return 106;
1262 if (Version.Major >= 8)
1263 return 102;
1264 return 104;
1265}
1266
1267unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
1268 assert(WavesPerEU != 0);
1269
1271 if (Version.Major >= 10)
1272 return 0;
1273
1274 if (WavesPerEU >= getMaxWavesPerEU(STI))
1275 return 0;
1276
1277 unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
1278 if (STI->getFeatureBits().test(FeatureTrapHandler))
1279 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
1280 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
1281 return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
1282}
1283
1284unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1285 bool Addressable) {
1286 assert(WavesPerEU != 0);
1287
1288 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
1290 if (Version.Major >= 10)
1291 return Addressable ? AddressableNumSGPRs : 108;
1292 if (Version.Major >= 8 && !Addressable)
1293 AddressableNumSGPRs = 112;
1294 unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
1295 if (STI->getFeatureBits().test(FeatureTrapHandler))
1296 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
1297 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
1298 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
1299}
1300
1301unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1302 bool FlatScrUsed, bool XNACKUsed) {
1303 unsigned ExtraSGPRs = 0;
1304 if (VCCUsed)
1305 ExtraSGPRs = 2;
1306
1308 if (Version.Major >= 10)
1309 return ExtraSGPRs;
1310
1311 if (Version.Major < 8) {
1312 if (FlatScrUsed)
1313 ExtraSGPRs = 4;
1314 } else {
1315 if (XNACKUsed)
1316 ExtraSGPRs = 4;
1317
1318 if (FlatScrUsed ||
1319 STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch))
1320 ExtraSGPRs = 6;
1321 }
1322
1323 return ExtraSGPRs;
1324}
1325
1326unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1327 bool FlatScrUsed) {
1328 return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
1329 STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
1330}
1331
1332static unsigned getGranulatedNumRegisterBlocks(unsigned NumRegs,
1333 unsigned Granule) {
1334 return divideCeil(std::max(1u, NumRegs), Granule);
1335}
1336
1337unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
1338 // SGPRBlocks is actual number of SGPR blocks minus 1.
1340 1;
1341}
1342
1344 unsigned DynamicVGPRBlockSize,
1345 std::optional<bool> EnableWavefrontSize32) {
1346 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1347 return 8;
1348
1349 if (DynamicVGPRBlockSize != 0)
1350 return DynamicVGPRBlockSize;
1351
1352 // Temporarily check the subtarget feature, until we fully switch to using
1353 // attributes.
1354 if (STI->getFeatureBits().test(FeatureDynamicVGPR))
1355 return STI->getFeatureBits().test(FeatureDynamicVGPRBlockSize32) ? 32 : 16;
1356
1357 bool IsWave32 = EnableWavefrontSize32
1358 ? *EnableWavefrontSize32
1359 : STI->getFeatureBits().test(FeatureWavefrontSize32);
1360
1361 if (STI->getFeatureBits().test(Feature1_5xVGPRs))
1362 return IsWave32 ? 24 : 12;
1363
1364 if (hasGFX10_3Insts(*STI))
1365 return IsWave32 ? 16 : 8;
1366
1367 return IsWave32 ? 8 : 4;
1368}
1369
1371 std::optional<bool> EnableWavefrontSize32) {
1372 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1373 return 8;
1374
1375 bool IsWave32 = EnableWavefrontSize32
1376 ? *EnableWavefrontSize32
1377 : STI->getFeatureBits().test(FeatureWavefrontSize32);
1378
1379 return IsWave32 ? 8 : 4;
1380}
1381
1382unsigned getArchVGPRAllocGranule() { return 4; }
1383
1384unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
1385 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1386 return 512;
1387 if (!isGFX10Plus(*STI))
1388 return 256;
1389 bool IsWave32 = STI->getFeatureBits().test(FeatureWavefrontSize32);
1390 if (STI->getFeatureBits().test(Feature1_5xVGPRs))
1391 return IsWave32 ? 1536 : 768;
1392 return IsWave32 ? 1024 : 512;
1393}
1394
1395unsigned getAddressableNumArchVGPRs(const MCSubtargetInfo *STI) { return 256; }
1396
1398 unsigned DynamicVGPRBlockSize) {
1399 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1400 return 512;
1401
1402 // Temporarily check the subtarget feature, until we fully switch to using
1403 // attributes.
1404 if (DynamicVGPRBlockSize != 0 ||
1405 STI->getFeatureBits().test(FeatureDynamicVGPR))
1406 // On GFX12 we can allocate at most 8 blocks of VGPRs.
1407 return 8 * getVGPRAllocGranule(STI, DynamicVGPRBlockSize);
1408 return getAddressableNumArchVGPRs(STI);
1409}
1410
1412 unsigned NumVGPRs,
1413 unsigned DynamicVGPRBlockSize) {
1415 NumVGPRs, getVGPRAllocGranule(STI, DynamicVGPRBlockSize),
1417}
1418
1419unsigned getNumWavesPerEUWithNumVGPRs(unsigned NumVGPRs, unsigned Granule,
1420 unsigned MaxWaves,
1421 unsigned TotalNumVGPRs) {
1422 if (NumVGPRs < Granule)
1423 return MaxWaves;
1424 unsigned RoundedRegs = alignTo(NumVGPRs, Granule);
1425 return std::min(std::max(TotalNumVGPRs / RoundedRegs, 1u), MaxWaves);
1426}
1427
1428unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves,
1430 if (Gen >= AMDGPUSubtarget::GFX10)
1431 return MaxWaves;
1432
1434 if (SGPRs <= 80)
1435 return 10;
1436 if (SGPRs <= 88)
1437 return 9;
1438 if (SGPRs <= 100)
1439 return 8;
1440 return 7;
1441 }
1442 if (SGPRs <= 48)
1443 return 10;
1444 if (SGPRs <= 56)
1445 return 9;
1446 if (SGPRs <= 64)
1447 return 8;
1448 if (SGPRs <= 72)
1449 return 7;
1450 if (SGPRs <= 80)
1451 return 6;
1452 return 5;
1453}
1454
1455unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1456 unsigned DynamicVGPRBlockSize) {
1457 assert(WavesPerEU != 0);
1458
1459 unsigned MaxWavesPerEU = getMaxWavesPerEU(STI);
1460 if (WavesPerEU >= MaxWavesPerEU)
1461 return 0;
1462
1463 unsigned TotNumVGPRs = getTotalNumVGPRs(STI);
1464 unsigned AddrsableNumVGPRs =
1465 getAddressableNumVGPRs(STI, DynamicVGPRBlockSize);
1466 unsigned Granule = getVGPRAllocGranule(STI, DynamicVGPRBlockSize);
1467 unsigned MaxNumVGPRs = alignDown(TotNumVGPRs / WavesPerEU, Granule);
1468
1469 if (MaxNumVGPRs == alignDown(TotNumVGPRs / MaxWavesPerEU, Granule))
1470 return 0;
1471
1472 unsigned MinWavesPerEU = getNumWavesPerEUWithNumVGPRs(STI, AddrsableNumVGPRs,
1473 DynamicVGPRBlockSize);
1474 if (WavesPerEU < MinWavesPerEU)
1475 return getMinNumVGPRs(STI, MinWavesPerEU, DynamicVGPRBlockSize);
1476
1477 unsigned MaxNumVGPRsNext = alignDown(TotNumVGPRs / (WavesPerEU + 1), Granule);
1478 unsigned MinNumVGPRs = 1 + std::min(MaxNumVGPRs - Granule, MaxNumVGPRsNext);
1479 return std::min(MinNumVGPRs, AddrsableNumVGPRs);
1480}
1481
1482unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1483 unsigned DynamicVGPRBlockSize) {
1484 assert(WavesPerEU != 0);
1485
1486 unsigned MaxNumVGPRs =
1487 alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
1488 getVGPRAllocGranule(STI, DynamicVGPRBlockSize));
1489 unsigned AddressableNumVGPRs =
1490 getAddressableNumVGPRs(STI, DynamicVGPRBlockSize);
1491 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
1492}
1493
1494unsigned getEncodedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
1495 std::optional<bool> EnableWavefrontSize32) {
1497 NumVGPRs, getVGPREncodingGranule(STI, EnableWavefrontSize32)) -
1498 1;
1499}
1500
1502 unsigned NumVGPRs,
1503 unsigned DynamicVGPRBlockSize,
1504 std::optional<bool> EnableWavefrontSize32) {
1506 NumVGPRs,
1507 getVGPRAllocGranule(STI, DynamicVGPRBlockSize, EnableWavefrontSize32));
1508}
1509} // end namespace IsaInfo
1510
1512 const MCSubtargetInfo *STI) {
1514 KernelCode.amd_kernel_code_version_major = 1;
1515 KernelCode.amd_kernel_code_version_minor = 2;
1516 KernelCode.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
1517 KernelCode.amd_machine_version_major = Version.Major;
1518 KernelCode.amd_machine_version_minor = Version.Minor;
1519 KernelCode.amd_machine_version_stepping = Version.Stepping;
1521 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
1522 KernelCode.wavefront_size = 5;
1524 } else {
1525 KernelCode.wavefront_size = 6;
1526 }
1527
1528 // If the code object does not support indirect functions, then the value must
1529 // be 0xffffffff.
1530 KernelCode.call_convention = -1;
1531
1532 // These alignment values are specified in powers of two, so alignment =
1533 // 2^n. The minimum alignment is 2^4 = 16.
1534 KernelCode.kernarg_segment_alignment = 4;
1535 KernelCode.group_segment_alignment = 4;
1536 KernelCode.private_segment_alignment = 4;
1537
1538 if (Version.Major >= 10) {
1539 KernelCode.compute_pgm_resource_registers |=
1540 S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
1542 }
1543}
1544
1547}
1548
1551}
1552
1554 unsigned AS = GV->getAddressSpace();
1555 return AS == AMDGPUAS::CONSTANT_ADDRESS ||
1557}
1558
1560 return TT.getArch() == Triple::r600;
1561}
1562
1563static bool isValidRegPrefix(char C) {
1564 return C == 'v' || C == 's' || C == 'a';
1565}
1566
1567std::tuple<char, unsigned, unsigned>
1569 StringRef RegName = Constraint;
1570 if (!RegName.consume_front("{") || !RegName.consume_back("}"))
1571 return {};
1572
1573 char Kind = RegName.front();
1574 if (!isValidRegPrefix(Kind))
1575 return {};
1576
1577 RegName = RegName.drop_front();
1578 if (RegName.consume_front("[")) {
1579 unsigned Idx, End;
1580 bool Failed = RegName.consumeInteger(10, Idx);
1581 Failed |= !RegName.consume_front(":");
1582 Failed |= RegName.consumeInteger(10, End);
1583 Failed |= !RegName.consume_back("]");
1584 if (!Failed) {
1585 unsigned NumRegs = End - Idx + 1;
1586 if (NumRegs > 1)
1587 return {Kind, Idx, NumRegs};
1588 }
1589 } else {
1590 unsigned Idx;
1591 bool Failed = RegName.getAsInteger(10, Idx);
1592 if (!Failed)
1593 return {Kind, Idx, 1};
1594 }
1595
1596 return {};
1597}
1598
1599std::pair<unsigned, unsigned>
1601 std::pair<unsigned, unsigned> Default,
1602 bool OnlyFirstRequired) {
1603 if (auto Attr = getIntegerPairAttribute(F, Name, OnlyFirstRequired))
1604 return {Attr->first, Attr->second.value_or(Default.second)};
1605 return Default;
1606}
1607
1608std::optional<std::pair<unsigned, std::optional<unsigned>>>
1610 bool OnlyFirstRequired) {
1611 Attribute A = F.getFnAttribute(Name);
1612 if (!A.isStringAttribute())
1613 return std::nullopt;
1614
1615 LLVMContext &Ctx = F.getContext();
1616 std::pair<unsigned, std::optional<unsigned>> Ints;
1617 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
1618 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
1619 Ctx.emitError("can't parse first integer attribute " + Name);
1620 return std::nullopt;
1621 }
1622 unsigned Second = 0;
1623 if (Strs.second.trim().getAsInteger(0, Second)) {
1624 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
1625 Ctx.emitError("can't parse second integer attribute " + Name);
1626 return std::nullopt;
1627 }
1628 } else {
1629 Ints.second = Second;
1630 }
1631
1632 return Ints;
1633}
1634
1636 unsigned Size,
1637 unsigned DefaultVal) {
1638 std::optional<SmallVector<unsigned>> R =
1640 return R.has_value() ? *R : SmallVector<unsigned>(Size, DefaultVal);
1641}
1642
1643std::optional<SmallVector<unsigned>>
1645 assert(Size > 2);
1646 LLVMContext &Ctx = F.getContext();
1647
1648 Attribute A = F.getFnAttribute(Name);
1649 if (!A.isValid())
1650 return std::nullopt;
1651 if (!A.isStringAttribute()) {
1652 Ctx.emitError(Name + " is not a string attribute");
1653 return std::nullopt;
1654 }
1655
1657
1658 StringRef S = A.getValueAsString();
1659 unsigned i = 0;
1660 for (; !S.empty() && i < Size; i++) {
1661 std::pair<StringRef, StringRef> Strs = S.split(',');
1662 unsigned IntVal;
1663 if (Strs.first.trim().getAsInteger(0, IntVal)) {
1664 Ctx.emitError("can't parse integer attribute " + Strs.first + " in " +
1665 Name);
1666 return std::nullopt;
1667 }
1668 Vals[i] = IntVal;
1669 S = Strs.second;
1670 }
1671
1672 if (!S.empty() || i < Size) {
1673 Ctx.emitError("attribute " + Name +
1674 " has incorrect number of integers; expected " +
1675 llvm::utostr(Size));
1676 return std::nullopt;
1677 }
1678 return Vals;
1679}
1680
1681bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val) {
1682 assert((MD.getNumOperands() % 2 == 0) && "invalid number of operands!");
1683 for (unsigned I = 0, E = MD.getNumOperands() / 2; I != E; ++I) {
1684 auto Low =
1685 mdconst::extract<ConstantInt>(MD.getOperand(2 * I + 0))->getValue();
1686 auto High =
1687 mdconst::extract<ConstantInt>(MD.getOperand(2 * I + 1))->getValue();
1688 // There are two types of [A; B) ranges:
1689 // A < B, e.g. [4; 5) which is a range that only includes 4.
1690 // A > B, e.g. [5; 4) which is a range that wraps around and includes
1691 // everything except 4.
1692 if (Low.ult(High)) {
1693 if (Low.ule(Val) && High.ugt(Val))
1694 return true;
1695 } else {
1696 if (Low.uge(Val) && High.ult(Val))
1697 return true;
1698 }
1699 }
1700
1701 return false;
1702}
1703
1705 return (1 << (getVmcntBitWidthLo(Version.Major) +
1706 getVmcntBitWidthHi(Version.Major))) -
1707 1;
1708}
1709
1711 return (1 << getLoadcntBitWidth(Version.Major)) - 1;
1712}
1713
1715 return (1 << getSamplecntBitWidth(Version.Major)) - 1;
1716}
1717
1719 return (1 << getBvhcntBitWidth(Version.Major)) - 1;
1720}
1721
1723 return (1 << getExpcntBitWidth(Version.Major)) - 1;
1724}
1725
1727 return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
1728}
1729
1731 return (1 << getDscntBitWidth(Version.Major)) - 1;
1732}
1733
1735 return (1 << getKmcntBitWidth(Version.Major)) - 1;
1736}
1737
1739 return (1 << getXcntBitWidth(Version.Major, Version.Minor)) - 1;
1740}
1741
1743 return (1 << getStorecntBitWidth(Version.Major)) - 1;
1744}
1745
1747 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(Version.Major),
1748 getVmcntBitWidthLo(Version.Major));
1749 unsigned Expcnt = getBitMask(getExpcntBitShift(Version.Major),
1750 getExpcntBitWidth(Version.Major));
1751 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(Version.Major),
1752 getLgkmcntBitWidth(Version.Major));
1753 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(Version.Major),
1754 getVmcntBitWidthHi(Version.Major));
1755 return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
1756}
1757
1758unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1759 unsigned VmcntLo = unpackBits(Waitcnt, getVmcntBitShiftLo(Version.Major),
1760 getVmcntBitWidthLo(Version.Major));
1761 unsigned VmcntHi = unpackBits(Waitcnt, getVmcntBitShiftHi(Version.Major),
1762 getVmcntBitWidthHi(Version.Major));
1763 return VmcntLo | VmcntHi << getVmcntBitWidthLo(Version.Major);
1764}
1765
1766unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
1767 return unpackBits(Waitcnt, getExpcntBitShift(Version.Major),
1768 getExpcntBitWidth(Version.Major));
1769}
1770
1771unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1772 return unpackBits(Waitcnt, getLgkmcntBitShift(Version.Major),
1773 getLgkmcntBitWidth(Version.Major));
1774}
1775
1776void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt,
1777 unsigned &Expcnt, unsigned &Lgkmcnt) {
1778 Vmcnt = decodeVmcnt(Version, Waitcnt);
1779 Expcnt = decodeExpcnt(Version, Waitcnt);
1780 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
1781}
1782
1783Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
1784 Waitcnt Decoded;
1785 Decoded.LoadCnt = decodeVmcnt(Version, Encoded);
1786 Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
1787 Decoded.DsCnt = decodeLgkmcnt(Version, Encoded);
1788 return Decoded;
1789}
1790
1791unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
1792 unsigned Vmcnt) {
1793 Waitcnt = packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(Version.Major),
1794 getVmcntBitWidthLo(Version.Major));
1795 return packBits(Vmcnt >> getVmcntBitWidthLo(Version.Major), Waitcnt,
1796 getVmcntBitShiftHi(Version.Major),
1797 getVmcntBitWidthHi(Version.Major));
1798}
1799
1800unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
1801 unsigned Expcnt) {
1802 return packBits(Expcnt, Waitcnt, getExpcntBitShift(Version.Major),
1803 getExpcntBitWidth(Version.Major));
1804}
1805
1806unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
1807 unsigned Lgkmcnt) {
1808 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(Version.Major),
1809 getLgkmcntBitWidth(Version.Major));
1810}
1811
1812unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt,
1813 unsigned Expcnt, unsigned Lgkmcnt) {
1814 unsigned Waitcnt = getWaitcntBitMask(Version);
1816 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
1817 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
1818 return Waitcnt;
1819}
1820
1821unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1822 return encodeWaitcnt(Version, Decoded.LoadCnt, Decoded.ExpCnt, Decoded.DsCnt);
1823}
1824
1826 bool IsStore) {
1827 unsigned Dscnt = getBitMask(getDscntBitShift(Version.Major),
1828 getDscntBitWidth(Version.Major));
1829 if (IsStore) {
1830 unsigned Storecnt = getBitMask(getLoadcntStorecntBitShift(Version.Major),
1831 getStorecntBitWidth(Version.Major));
1832 return Dscnt | Storecnt;
1833 }
1834 unsigned Loadcnt = getBitMask(getLoadcntStorecntBitShift(Version.Major),
1835 getLoadcntBitWidth(Version.Major));
1836 return Dscnt | Loadcnt;
1837}
1838
1839Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt) {
1840 Waitcnt Decoded;
1841 Decoded.LoadCnt =
1842 unpackBits(LoadcntDscnt, getLoadcntStorecntBitShift(Version.Major),
1843 getLoadcntBitWidth(Version.Major));
1844 Decoded.DsCnt = unpackBits(LoadcntDscnt, getDscntBitShift(Version.Major),
1845 getDscntBitWidth(Version.Major));
1846 return Decoded;
1847}
1848
1849Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt) {
1850 Waitcnt Decoded;
1851 Decoded.StoreCnt =
1852 unpackBits(StorecntDscnt, getLoadcntStorecntBitShift(Version.Major),
1853 getStorecntBitWidth(Version.Major));
1854 Decoded.DsCnt = unpackBits(StorecntDscnt, getDscntBitShift(Version.Major),
1855 getDscntBitWidth(Version.Major));
1856 return Decoded;
1857}
1858
1859static unsigned encodeLoadcnt(const IsaVersion &Version, unsigned Waitcnt,
1860 unsigned Loadcnt) {
1861 return packBits(Loadcnt, Waitcnt, getLoadcntStorecntBitShift(Version.Major),
1862 getLoadcntBitWidth(Version.Major));
1863}
1864
1865static unsigned encodeStorecnt(const IsaVersion &Version, unsigned Waitcnt,
1866 unsigned Storecnt) {
1867 return packBits(Storecnt, Waitcnt, getLoadcntStorecntBitShift(Version.Major),
1868 getStorecntBitWidth(Version.Major));
1869}
1870
1871static unsigned encodeDscnt(const IsaVersion &Version, unsigned Waitcnt,
1872 unsigned Dscnt) {
1873 return packBits(Dscnt, Waitcnt, getDscntBitShift(Version.Major),
1874 getDscntBitWidth(Version.Major));
1875}
1876
1877static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt,
1878 unsigned Dscnt) {
1879 unsigned Waitcnt = getCombinedCountBitMask(Version, false);
1880 Waitcnt = encodeLoadcnt(Version, Waitcnt, Loadcnt);
1882 return Waitcnt;
1883}
1884
1885unsigned encodeLoadcntDscnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1886 return encodeLoadcntDscnt(Version, Decoded.LoadCnt, Decoded.DsCnt);
1887}
1888
1890 unsigned Storecnt, unsigned Dscnt) {
1891 unsigned Waitcnt = getCombinedCountBitMask(Version, true);
1892 Waitcnt = encodeStorecnt(Version, Waitcnt, Storecnt);
1894 return Waitcnt;
1895}
1896
1898 const Waitcnt &Decoded) {
1899 return encodeStorecntDscnt(Version, Decoded.StoreCnt, Decoded.DsCnt);
1900}
1901
1902//===----------------------------------------------------------------------===//
1903// Custom Operand Values
1904//===----------------------------------------------------------------------===//
1905
1907 int Size,
1908 const MCSubtargetInfo &STI) {
1909 unsigned Enc = 0;
1910 for (int Idx = 0; Idx < Size; ++Idx) {
1911 const auto &Op = Opr[Idx];
1912 if (Op.isSupported(STI))
1913 Enc |= Op.encode(Op.Default);
1914 }
1915 return Enc;
1916}
1917
1919 int Size, unsigned Code,
1920 bool &HasNonDefaultVal,
1921 const MCSubtargetInfo &STI) {
1922 unsigned UsedOprMask = 0;
1923 HasNonDefaultVal = false;
1924 for (int Idx = 0; Idx < Size; ++Idx) {
1925 const auto &Op = Opr[Idx];
1926 if (!Op.isSupported(STI))
1927 continue;
1928 UsedOprMask |= Op.getMask();
1929 unsigned Val = Op.decode(Code);
1930 if (!Op.isValid(Val))
1931 return false;
1932 HasNonDefaultVal |= (Val != Op.Default);
1933 }
1934 return (Code & ~UsedOprMask) == 0;
1935}
1936
1937static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size,
1938 unsigned Code, int &Idx, StringRef &Name,
1939 unsigned &Val, bool &IsDefault,
1940 const MCSubtargetInfo &STI) {
1941 while (Idx < Size) {
1942 const auto &Op = Opr[Idx++];
1943 if (Op.isSupported(STI)) {
1944 Name = Op.Name;
1945 Val = Op.decode(Code);
1946 IsDefault = (Val == Op.Default);
1947 return true;
1948 }
1949 }
1950
1951 return false;
1952}
1953
1955 int64_t InputVal) {
1956 if (InputVal < 0 || InputVal > Op.Max)
1957 return OPR_VAL_INVALID;
1958 return Op.encode(InputVal);
1959}
1960
1961static int encodeCustomOperand(const CustomOperandVal *Opr, int Size,
1962 const StringRef Name, int64_t InputVal,
1963 unsigned &UsedOprMask,
1964 const MCSubtargetInfo &STI) {
1965 int InvalidId = OPR_ID_UNKNOWN;
1966 for (int Idx = 0; Idx < Size; ++Idx) {
1967 const auto &Op = Opr[Idx];
1968 if (Op.Name == Name) {
1969 if (!Op.isSupported(STI)) {
1970 InvalidId = OPR_ID_UNSUPPORTED;
1971 continue;
1972 }
1973 auto OprMask = Op.getMask();
1974 if (OprMask & UsedOprMask)
1975 return OPR_ID_DUPLICATE;
1976 UsedOprMask |= OprMask;
1977 return encodeCustomOperandVal(Op, InputVal);
1978 }
1979 }
1980 return InvalidId;
1981}
1982
1983//===----------------------------------------------------------------------===//
1984// DepCtr
1985//===----------------------------------------------------------------------===//
1986
1987namespace DepCtr {
1988
1990 static int Default = -1;
1991 if (Default == -1)
1993 return Default;
1994}
1995
1996bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal,
1997 const MCSubtargetInfo &STI) {
1999 HasNonDefaultVal, STI);
2000}
2001
2002bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val,
2003 bool &IsDefault, const MCSubtargetInfo &STI) {
2004 return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val,
2005 IsDefault, STI);
2006}
2007
2008int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask,
2009 const MCSubtargetInfo &STI) {
2010 return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask,
2011 STI);
2012}
2013
2014unsigned decodeFieldVmVsrc(unsigned Encoded) {
2015 return unpackBits(Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2016}
2017
2018unsigned decodeFieldVaVdst(unsigned Encoded) {
2019 return unpackBits(Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2020}
2021
2022unsigned decodeFieldSaSdst(unsigned Encoded) {
2023 return unpackBits(Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2024}
2025
2026unsigned decodeFieldVaSdst(unsigned Encoded) {
2027 return unpackBits(Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2028}
2029
2030unsigned decodeFieldVaVcc(unsigned Encoded) {
2031 return unpackBits(Encoded, getVaVccBitShift(), getVaVccBitWidth());
2032}
2033
2034unsigned decodeFieldVaSsrc(unsigned Encoded) {
2035 return unpackBits(Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2036}
2037
2038unsigned decodeFieldHoldCnt(unsigned Encoded) {
2039 return unpackBits(Encoded, getHoldCntBitShift(), getHoldCntWidth());
2040}
2041
2042unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc) {
2043 return packBits(VmVsrc, Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2044}
2045
2046unsigned encodeFieldVmVsrc(unsigned VmVsrc) {
2047 return encodeFieldVmVsrc(0xffff, VmVsrc);
2048}
2049
2050unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst) {
2051 return packBits(VaVdst, Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2052}
2053
2054unsigned encodeFieldVaVdst(unsigned VaVdst) {
2055 return encodeFieldVaVdst(0xffff, VaVdst);
2056}
2057
2058unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst) {
2059 return packBits(SaSdst, Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2060}
2061
2062unsigned encodeFieldSaSdst(unsigned SaSdst) {
2063 return encodeFieldSaSdst(0xffff, SaSdst);
2064}
2065
2066unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst) {
2067 return packBits(VaSdst, Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2068}
2069
2070unsigned encodeFieldVaSdst(unsigned VaSdst) {
2071 return encodeFieldVaSdst(0xffff, VaSdst);
2072}
2073
2074unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc) {
2075 return packBits(VaVcc, Encoded, getVaVccBitShift(), getVaVccBitWidth());
2076}
2077
2078unsigned encodeFieldVaVcc(unsigned VaVcc) {
2079 return encodeFieldVaVcc(0xffff, VaVcc);
2080}
2081
2082unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc) {
2083 return packBits(VaSsrc, Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2084}
2085
2086unsigned encodeFieldVaSsrc(unsigned VaSsrc) {
2087 return encodeFieldVaSsrc(0xffff, VaSsrc);
2088}
2089
2090unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt) {
2091 return packBits(HoldCnt, Encoded, getHoldCntBitShift(), getHoldCntWidth());
2092}
2093
2094unsigned encodeFieldHoldCnt(unsigned HoldCnt) {
2095 return encodeFieldHoldCnt(0xffff, HoldCnt);
2096}
2097
2098} // namespace DepCtr
2099
2100//===----------------------------------------------------------------------===//
2101// exp tgt
2102//===----------------------------------------------------------------------===//
2103
2104namespace Exp {
2105
2106struct ExpTgt {
2108 unsigned Tgt;
2109 unsigned MaxIndex;
2110};
2111
2112// clang-format off
2113static constexpr ExpTgt ExpTgtInfo[] = {
2114 {{"null"}, ET_NULL, ET_NULL_MAX_IDX},
2115 {{"mrtz"}, ET_MRTZ, ET_MRTZ_MAX_IDX},
2116 {{"prim"}, ET_PRIM, ET_PRIM_MAX_IDX},
2117 {{"mrt"}, ET_MRT0, ET_MRT_MAX_IDX},
2118 {{"pos"}, ET_POS0, ET_POS_MAX_IDX},
2119 {{"dual_src_blend"},ET_DUAL_SRC_BLEND0, ET_DUAL_SRC_BLEND_MAX_IDX},
2120 {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX},
2121};
2122// clang-format on
2123
2124bool getTgtName(unsigned Id, StringRef &Name, int &Index) {
2125 for (const ExpTgt &Val : ExpTgtInfo) {
2126 if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
2127 Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
2128 Name = Val.Name;
2129 return true;
2130 }
2131 }
2132 return false;
2133}
2134
2135unsigned getTgtId(const StringRef Name) {
2136
2137 for (const ExpTgt &Val : ExpTgtInfo) {
2138 if (Val.MaxIndex == 0 && Name == Val.Name)
2139 return Val.Tgt;
2140
2141 if (Val.MaxIndex > 0 && Name.starts_with(Val.Name)) {
2142 StringRef Suffix = Name.drop_front(Val.Name.size());
2143
2144 unsigned Id;
2145 if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex)
2146 return ET_INVALID;
2147
2148 // Disable leading zeroes
2149 if (Suffix.size() > 1 && Suffix[0] == '0')
2150 return ET_INVALID;
2151
2152 return Val.Tgt + Id;
2153 }
2154 }
2155 return ET_INVALID;
2156}
2157
2158bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) {
2159 switch (Id) {
2160 case ET_NULL:
2161 return !isGFX11Plus(STI);
2162 case ET_POS4:
2163 case ET_PRIM:
2164 return isGFX10Plus(STI);
2165 case ET_DUAL_SRC_BLEND0:
2166 case ET_DUAL_SRC_BLEND1:
2167 return isGFX11Plus(STI);
2168 default:
2169 if (Id >= ET_PARAM0 && Id <= ET_PARAM31)
2170 return !isGFX11Plus(STI);
2171 return true;
2172 }
2173}
2174
2175} // namespace Exp
2176
2177//===----------------------------------------------------------------------===//
2178// MTBUF Format
2179//===----------------------------------------------------------------------===//
2180
2181namespace MTBUFFormat {
2182
2183int64_t getDfmt(const StringRef Name) {
2184 for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) {
2185 if (Name == DfmtSymbolic[Id])
2186 return Id;
2187 }
2188 return DFMT_UNDEF;
2189}
2190
2192 assert(Id <= DFMT_MAX);
2193 return DfmtSymbolic[Id];
2194}
2195
2197 if (isSI(STI) || isCI(STI))
2198 return NfmtSymbolicSICI;
2199 if (isVI(STI) || isGFX9(STI))
2200 return NfmtSymbolicVI;
2201 return NfmtSymbolicGFX10;
2202}
2203
2204int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) {
2205 const auto *lookupTable = getNfmtLookupTable(STI);
2206 for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) {
2207 if (Name == lookupTable[Id])
2208 return Id;
2209 }
2210 return NFMT_UNDEF;
2211}
2212
2213StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) {
2214 assert(Id <= NFMT_MAX);
2215 return getNfmtLookupTable(STI)[Id];
2216}
2217
2218bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) {
2219 unsigned Dfmt;
2220 unsigned Nfmt;
2221 decodeDfmtNfmt(Id, Dfmt, Nfmt);
2222 return isValidNfmt(Nfmt, STI);
2223}
2224
2225bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) {
2226 return !getNfmtName(Id, STI).empty();
2227}
2228
2229int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) {
2230 return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT);
2231}
2232
2233void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) {
2234 Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK;
2235 Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK;
2236}
2237
2239 if (isGFX11Plus(STI)) {
2240 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
2241 if (Name == UfmtSymbolicGFX11[Id])
2242 return Id;
2243 }
2244 } else {
2245 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
2246 if (Name == UfmtSymbolicGFX10[Id])
2247 return Id;
2248 }
2249 }
2250 return UFMT_UNDEF;
2251}
2252
2254 if (isValidUnifiedFormat(Id, STI))
2255 return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id];
2256 return "";
2257}
2258
2259bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI) {
2260 return isGFX10(STI) ? Id <= UfmtGFX10::UFMT_LAST : Id <= UfmtGFX11::UFMT_LAST;
2261}
2262
2263int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt,
2264 const MCSubtargetInfo &STI) {
2265 int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt);
2266 if (isGFX11Plus(STI)) {
2267 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
2268 if (Fmt == DfmtNfmt2UFmtGFX11[Id])
2269 return Id;
2270 }
2271 } else {
2272 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
2273 if (Fmt == DfmtNfmt2UFmtGFX10[Id])
2274 return Id;
2275 }
2276 }
2277 return UFMT_UNDEF;
2278}
2279
2280bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) {
2281 return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX);
2282}
2283
2285 if (isGFX10Plus(STI))
2286 return UFMT_DEFAULT;
2287 return DFMT_NFMT_DEFAULT;
2288}
2289
2290} // namespace MTBUFFormat
2291
2292//===----------------------------------------------------------------------===//
2293// SendMsg
2294//===----------------------------------------------------------------------===//
2295
2296namespace SendMsg {
2297
2300}
2301
2302bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI) {
2303 return (MsgId & ~(getMsgIdMask(STI))) == 0;
2304}
2305
2306bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI,
2307 bool Strict) {
2308 assert(isValidMsgId(MsgId, STI));
2309
2310 if (!Strict)
2311 return 0 <= OpId && isUInt<OP_WIDTH_>(OpId);
2312
2313 if (msgRequiresOp(MsgId, STI)) {
2314 if (MsgId == ID_GS_PreGFX11 && OpId == OP_GS_NOP)
2315 return false;
2316
2317 return !getMsgOpName(MsgId, OpId, STI).empty();
2318 }
2319
2320 return OpId == OP_NONE_;
2321}
2322
2323bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
2324 const MCSubtargetInfo &STI, bool Strict) {
2325 assert(isValidMsgOp(MsgId, OpId, STI, Strict));
2326
2327 if (!Strict)
2328 return 0 <= StreamId && isUInt<STREAM_ID_WIDTH_>(StreamId);
2329
2330 if (!isGFX11Plus(STI)) {
2331 switch (MsgId) {
2332 case ID_GS_PreGFX11:
2335 return (OpId == OP_GS_NOP)
2338 }
2339 }
2340 return StreamId == STREAM_ID_NONE_;
2341}
2342
2343bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI) {
2344 return MsgId == ID_SYSMSG ||
2345 (!isGFX11Plus(STI) &&
2346 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11));
2347}
2348
2349bool msgSupportsStream(int64_t MsgId, int64_t OpId,
2350 const MCSubtargetInfo &STI) {
2351 return !isGFX11Plus(STI) &&
2352 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) &&
2353 OpId != OP_GS_NOP;
2354}
2355
2356void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId,
2357 uint16_t &StreamId, const MCSubtargetInfo &STI) {
2358 MsgId = Val & getMsgIdMask(STI);
2359 if (isGFX11Plus(STI)) {
2360 OpId = 0;
2361 StreamId = 0;
2362 } else {
2363 OpId = (Val & OP_MASK_) >> OP_SHIFT_;
2365 }
2366}
2367
2369 return MsgId | (OpId << OP_SHIFT_) | (StreamId << STREAM_ID_SHIFT_);
2370}
2371
2372} // namespace SendMsg
2373
2374//===----------------------------------------------------------------------===//
2375//
2376//===----------------------------------------------------------------------===//
2377
2379 return F.getFnAttributeAsParsedInteger("InitialPSInputAddr", 0);
2380}
2381
2383 // As a safe default always respond as if PS has color exports.
2384 return F.getFnAttributeAsParsedInteger(
2385 "amdgpu-color-export",
2386 F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0;
2387}
2388
2390 return F.getFnAttributeAsParsedInteger("amdgpu-depth-export", 0) != 0;
2391}
2392
2394 unsigned BlockSize =
2395 F.getFnAttributeAsParsedInteger("amdgpu-dynamic-vgpr-block-size", 0);
2396
2397 if (BlockSize == 16 || BlockSize == 32)
2398 return BlockSize;
2399
2400 return 0;
2401}
2402
2403bool hasXNACK(const MCSubtargetInfo &STI) {
2404 return STI.hasFeature(AMDGPU::FeatureXNACK);
2405}
2406
2407bool hasSRAMECC(const MCSubtargetInfo &STI) {
2408 return STI.hasFeature(AMDGPU::FeatureSRAMECC);
2409}
2410
2412 return STI.hasFeature(AMDGPU::FeatureMIMG_R128) &&
2413 !STI.hasFeature(AMDGPU::FeatureR128A16);
2414}
2415
2416bool hasA16(const MCSubtargetInfo &STI) {
2417 return STI.hasFeature(AMDGPU::FeatureA16);
2418}
2419
2420bool hasG16(const MCSubtargetInfo &STI) {
2421 return STI.hasFeature(AMDGPU::FeatureG16);
2422}
2423
2425 return !STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) && !isCI(STI) &&
2426 !isSI(STI);
2427}
2428
2429bool hasGDS(const MCSubtargetInfo &STI) {
2430 return STI.hasFeature(AMDGPU::FeatureGDS);
2431}
2432
2433unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler) {
2434 auto Version = getIsaVersion(STI.getCPU());
2435 if (Version.Major == 10)
2436 return Version.Minor >= 3 ? 13 : 5;
2437 if (Version.Major == 11)
2438 return 5;
2439 if (Version.Major >= 12)
2440 return HasSampler ? 4 : 5;
2441 return 0;
2442}
2443
2445 if (isGFX1250(STI))
2446 return 32;
2447 return 16;
2448}
2449
2450bool isSI(const MCSubtargetInfo &STI) {
2451 return STI.hasFeature(AMDGPU::FeatureSouthernIslands);
2452}
2453
2454bool isCI(const MCSubtargetInfo &STI) {
2455 return STI.hasFeature(AMDGPU::FeatureSeaIslands);
2456}
2457
2458bool isVI(const MCSubtargetInfo &STI) {
2459 return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
2460}
2461
2462bool isGFX9(const MCSubtargetInfo &STI) {
2463 return STI.hasFeature(AMDGPU::FeatureGFX9);
2464}
2465
2467 return isGFX9(STI) || isGFX10(STI);
2468}
2469
2471 return isGFX9(STI) || isGFX10(STI) || isGFX11(STI);
2472}
2473
2475 return isVI(STI) || isGFX9(STI) || isGFX10(STI);
2476}
2477
2478bool isGFX8Plus(const MCSubtargetInfo &STI) {
2479 return isVI(STI) || isGFX9Plus(STI);
2480}
2481
2482bool isGFX9Plus(const MCSubtargetInfo &STI) {
2483 return isGFX9(STI) || isGFX10Plus(STI);
2484}
2485
2486bool isNotGFX9Plus(const MCSubtargetInfo &STI) { return !isGFX9Plus(STI); }
2487
2488bool isGFX10(const MCSubtargetInfo &STI) {
2489 return STI.hasFeature(AMDGPU::FeatureGFX10);
2490}
2491
2493 return isGFX10(STI) || isGFX11(STI);
2494}
2495
2497 return isGFX10(STI) || isGFX11Plus(STI);
2498}
2499
2500bool isGFX11(const MCSubtargetInfo &STI) {
2501 return STI.hasFeature(AMDGPU::FeatureGFX11);
2502}
2503
2505 return isGFX11(STI) || isGFX12Plus(STI);
2506}
2507
2508bool isGFX12(const MCSubtargetInfo &STI) {
2509 return STI.getFeatureBits()[AMDGPU::FeatureGFX12];
2510}
2511
2512bool isGFX12Plus(const MCSubtargetInfo &STI) { return isGFX12(STI); }
2513
2514bool isNotGFX12Plus(const MCSubtargetInfo &STI) { return !isGFX12Plus(STI); }
2515
2516bool isGFX1250(const MCSubtargetInfo &STI) {
2517 return STI.getFeatureBits()[AMDGPU::FeatureGFX1250Insts];
2518}
2519
2521 if (isGFX1250(STI))
2522 return false;
2523 return isGFX10Plus(STI);
2524}
2525
2526bool isNotGFX11Plus(const MCSubtargetInfo &STI) { return !isGFX11Plus(STI); }
2527
2529 return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI);
2530}
2531
2533 return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI);
2534}
2535
2537 return STI.hasFeature(AMDGPU::FeatureGCN3Encoding);
2538}
2539
2541 return STI.hasFeature(AMDGPU::FeatureGFX10_AEncoding);
2542}
2543
2545 return STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding);
2546}
2547
2549 return STI.hasFeature(AMDGPU::FeatureGFX10_3Insts);
2550}
2551
2553 return isGFX10_BEncoding(STI) && !isGFX12Plus(STI);
2554}
2555
2556bool isGFX90A(const MCSubtargetInfo &STI) {
2557 return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
2558}
2559
2560bool isGFX940(const MCSubtargetInfo &STI) {
2561 return STI.hasFeature(AMDGPU::FeatureGFX940Insts);
2562}
2563
2565 return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
2566}
2567
2569 return STI.hasFeature(AMDGPU::FeatureMAIInsts);
2570}
2571
2572bool hasVOPD(const MCSubtargetInfo &STI) {
2573 return STI.hasFeature(AMDGPU::FeatureVOPD);
2574}
2575
2577 return STI.hasFeature(AMDGPU::FeatureDPPSrc1SGPR);
2578}
2579
2581 return STI.hasFeature(AMDGPU::FeatureKernargPreload);
2582}
2583
2584int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR,
2585 int32_t ArgNumVGPR) {
2586 if (has90AInsts && ArgNumAGPR)
2587 return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
2588 return std::max(ArgNumVGPR, ArgNumAGPR);
2589}
2590
2592 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
2593 const MCRegister FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0);
2594 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
2595 Reg == AMDGPU::SCC;
2596}
2597
2599 return MRI.getEncodingValue(Reg) & AMDGPU::HWEncoding::IS_HI16;
2600}
2601
2602#define MAP_REG2REG \
2603 using namespace AMDGPU; \
2604 switch (Reg.id()) { \
2605 default: \
2606 return Reg; \
2607 CASE_CI_VI(FLAT_SCR) \
2608 CASE_CI_VI(FLAT_SCR_LO) \
2609 CASE_CI_VI(FLAT_SCR_HI) \
2610 CASE_VI_GFX9PLUS(TTMP0) \
2611 CASE_VI_GFX9PLUS(TTMP1) \
2612 CASE_VI_GFX9PLUS(TTMP2) \
2613 CASE_VI_GFX9PLUS(TTMP3) \
2614 CASE_VI_GFX9PLUS(TTMP4) \
2615 CASE_VI_GFX9PLUS(TTMP5) \
2616 CASE_VI_GFX9PLUS(TTMP6) \
2617 CASE_VI_GFX9PLUS(TTMP7) \
2618 CASE_VI_GFX9PLUS(TTMP8) \
2619 CASE_VI_GFX9PLUS(TTMP9) \
2620 CASE_VI_GFX9PLUS(TTMP10) \
2621 CASE_VI_GFX9PLUS(TTMP11) \
2622 CASE_VI_GFX9PLUS(TTMP12) \
2623 CASE_VI_GFX9PLUS(TTMP13) \
2624 CASE_VI_GFX9PLUS(TTMP14) \
2625 CASE_VI_GFX9PLUS(TTMP15) \
2626 CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
2627 CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
2628 CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
2629 CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
2630 CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
2631 CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
2632 CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
2633 CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
2634 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
2635 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
2636 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
2637 CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
2638 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
2639 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
2640 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2641 CASE_VI_GFX9PLUS( \
2642 TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2643 CASE_GFXPRE11_GFX11PLUS(M0) \
2644 CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
2645 CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
2646 }
2647
2648#define CASE_CI_VI(node) \
2649 assert(!isSI(STI)); \
2650 case node: \
2651 return isCI(STI) ? node##_ci : node##_vi;
2652
2653#define CASE_VI_GFX9PLUS(node) \
2654 case node: \
2655 return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
2656
2657#define CASE_GFXPRE11_GFX11PLUS(node) \
2658 case node: \
2659 return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
2660
2661#define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
2662 case node: \
2663 return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
2664
2666 if (STI.getTargetTriple().getArch() == Triple::r600)
2667 return Reg;
2669}
2670
2671#undef CASE_CI_VI
2672#undef CASE_VI_GFX9PLUS
2673#undef CASE_GFXPRE11_GFX11PLUS
2674#undef CASE_GFXPRE11_GFX11PLUS_TO
2675
2676#define CASE_CI_VI(node) \
2677 case node##_ci: \
2678 case node##_vi: \
2679 return node;
2680#define CASE_VI_GFX9PLUS(node) \
2681 case node##_vi: \
2682 case node##_gfx9plus: \
2683 return node;
2684#define CASE_GFXPRE11_GFX11PLUS(node) \
2685 case node##_gfx11plus: \
2686 case node##_gfxpre11: \
2687 return node;
2688#define CASE_GFXPRE11_GFX11PLUS_TO(node, result)
2689
2691
2692bool isInlineValue(unsigned Reg) {
2693 switch (Reg) {
2694 case AMDGPU::SRC_SHARED_BASE_LO:
2695 case AMDGPU::SRC_SHARED_BASE:
2696 case AMDGPU::SRC_SHARED_LIMIT_LO:
2697 case AMDGPU::SRC_SHARED_LIMIT:
2698 case AMDGPU::SRC_PRIVATE_BASE_LO:
2699 case AMDGPU::SRC_PRIVATE_BASE:
2700 case AMDGPU::SRC_PRIVATE_LIMIT_LO:
2701 case AMDGPU::SRC_PRIVATE_LIMIT:
2702 case AMDGPU::SRC_FLAT_SCRATCH_BASE_LO:
2703 case AMDGPU::SRC_FLAT_SCRATCH_BASE_HI:
2704 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
2705 return true;
2706 case AMDGPU::SRC_VCCZ:
2707 case AMDGPU::SRC_EXECZ:
2708 case AMDGPU::SRC_SCC:
2709 return true;
2710 case AMDGPU::SGPR_NULL:
2711 return true;
2712 default:
2713 return false;
2714 }
2715}
2716
2717#undef CASE_CI_VI
2718#undef CASE_VI_GFX9PLUS
2719#undef CASE_GFXPRE11_GFX11PLUS
2720#undef CASE_GFXPRE11_GFX11PLUS_TO
2721#undef MAP_REG2REG
2722
2723bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2724 assert(OpNo < Desc.NumOperands);
2725 unsigned OpType = Desc.operands()[OpNo].OperandType;
2726 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
2727 OpType <= AMDGPU::OPERAND_SRC_LAST;
2728}
2729
2730bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2731 assert(OpNo < Desc.NumOperands);
2732 unsigned OpType = Desc.operands()[OpNo].OperandType;
2733 return OpType >= AMDGPU::OPERAND_KIMM_FIRST &&
2734 OpType <= AMDGPU::OPERAND_KIMM_LAST;
2735}
2736
2737bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2738 assert(OpNo < Desc.NumOperands);
2739 unsigned OpType = Desc.operands()[OpNo].OperandType;
2740 switch (OpType) {
2753 return true;
2754 default:
2755 return false;
2756 }
2757}
2758
2759bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2760 assert(OpNo < Desc.NumOperands);
2761 unsigned OpType = Desc.operands()[OpNo].OperandType;
2762 return (OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
2766}
2767
2768// Avoid using MCRegisterClass::getSize, since that function will go away
2769// (move from MC* level to Target* level). Return size in bits.
2770unsigned getRegBitWidth(unsigned RCID) {
2771 switch (RCID) {
2772 case AMDGPU::VGPR_16RegClassID:
2773 case AMDGPU::VGPR_16_Lo128RegClassID:
2774 case AMDGPU::SGPR_LO16RegClassID:
2775 case AMDGPU::AGPR_LO16RegClassID:
2776 return 16;
2777 case AMDGPU::SGPR_32RegClassID:
2778 case AMDGPU::VGPR_32RegClassID:
2779 case AMDGPU::VRegOrLds_32RegClassID:
2780 case AMDGPU::AGPR_32RegClassID:
2781 case AMDGPU::VS_32RegClassID:
2782 case AMDGPU::AV_32RegClassID:
2783 case AMDGPU::SReg_32RegClassID:
2784 case AMDGPU::SReg_32_XM0RegClassID:
2785 case AMDGPU::SRegOrLds_32RegClassID:
2786 return 32;
2787 case AMDGPU::SGPR_64RegClassID:
2788 case AMDGPU::VS_64RegClassID:
2789 case AMDGPU::SReg_64RegClassID:
2790 case AMDGPU::VReg_64RegClassID:
2791 case AMDGPU::AReg_64RegClassID:
2792 case AMDGPU::SReg_64_XEXECRegClassID:
2793 case AMDGPU::VReg_64_Align2RegClassID:
2794 case AMDGPU::AReg_64_Align2RegClassID:
2795 case AMDGPU::AV_64RegClassID:
2796 case AMDGPU::AV_64_Align2RegClassID:
2797 return 64;
2798 case AMDGPU::SGPR_96RegClassID:
2799 case AMDGPU::SReg_96RegClassID:
2800 case AMDGPU::VReg_96RegClassID:
2801 case AMDGPU::AReg_96RegClassID:
2802 case AMDGPU::VReg_96_Align2RegClassID:
2803 case AMDGPU::AReg_96_Align2RegClassID:
2804 case AMDGPU::AV_96RegClassID:
2805 case AMDGPU::AV_96_Align2RegClassID:
2806 return 96;
2807 case AMDGPU::SGPR_128RegClassID:
2808 case AMDGPU::SReg_128RegClassID:
2809 case AMDGPU::VReg_128RegClassID:
2810 case AMDGPU::AReg_128RegClassID:
2811 case AMDGPU::VReg_128_Align2RegClassID:
2812 case AMDGPU::AReg_128_Align2RegClassID:
2813 case AMDGPU::AV_128RegClassID:
2814 case AMDGPU::AV_128_Align2RegClassID:
2815 case AMDGPU::SReg_128_XNULLRegClassID:
2816 return 128;
2817 case AMDGPU::SGPR_160RegClassID:
2818 case AMDGPU::SReg_160RegClassID:
2819 case AMDGPU::VReg_160RegClassID:
2820 case AMDGPU::AReg_160RegClassID:
2821 case AMDGPU::VReg_160_Align2RegClassID:
2822 case AMDGPU::AReg_160_Align2RegClassID:
2823 case AMDGPU::AV_160RegClassID:
2824 case AMDGPU::AV_160_Align2RegClassID:
2825 return 160;
2826 case AMDGPU::SGPR_192RegClassID:
2827 case AMDGPU::SReg_192RegClassID:
2828 case AMDGPU::VReg_192RegClassID:
2829 case AMDGPU::AReg_192RegClassID:
2830 case AMDGPU::VReg_192_Align2RegClassID:
2831 case AMDGPU::AReg_192_Align2RegClassID:
2832 case AMDGPU::AV_192RegClassID:
2833 case AMDGPU::AV_192_Align2RegClassID:
2834 return 192;
2835 case AMDGPU::SGPR_224RegClassID:
2836 case AMDGPU::SReg_224RegClassID:
2837 case AMDGPU::VReg_224RegClassID:
2838 case AMDGPU::AReg_224RegClassID:
2839 case AMDGPU::VReg_224_Align2RegClassID:
2840 case AMDGPU::AReg_224_Align2RegClassID:
2841 case AMDGPU::AV_224RegClassID:
2842 case AMDGPU::AV_224_Align2RegClassID:
2843 return 224;
2844 case AMDGPU::SGPR_256RegClassID:
2845 case AMDGPU::SReg_256RegClassID:
2846 case AMDGPU::VReg_256RegClassID:
2847 case AMDGPU::AReg_256RegClassID:
2848 case AMDGPU::VReg_256_Align2RegClassID:
2849 case AMDGPU::AReg_256_Align2RegClassID:
2850 case AMDGPU::AV_256RegClassID:
2851 case AMDGPU::AV_256_Align2RegClassID:
2852 case AMDGPU::SReg_256_XNULLRegClassID:
2853 return 256;
2854 case AMDGPU::SGPR_288RegClassID:
2855 case AMDGPU::SReg_288RegClassID:
2856 case AMDGPU::VReg_288RegClassID:
2857 case AMDGPU::AReg_288RegClassID:
2858 case AMDGPU::VReg_288_Align2RegClassID:
2859 case AMDGPU::AReg_288_Align2RegClassID:
2860 case AMDGPU::AV_288RegClassID:
2861 case AMDGPU::AV_288_Align2RegClassID:
2862 return 288;
2863 case AMDGPU::SGPR_320RegClassID:
2864 case AMDGPU::SReg_320RegClassID:
2865 case AMDGPU::VReg_320RegClassID:
2866 case AMDGPU::AReg_320RegClassID:
2867 case AMDGPU::VReg_320_Align2RegClassID:
2868 case AMDGPU::AReg_320_Align2RegClassID:
2869 case AMDGPU::AV_320RegClassID:
2870 case AMDGPU::AV_320_Align2RegClassID:
2871 return 320;
2872 case AMDGPU::SGPR_352RegClassID:
2873 case AMDGPU::SReg_352RegClassID:
2874 case AMDGPU::VReg_352RegClassID:
2875 case AMDGPU::AReg_352RegClassID:
2876 case AMDGPU::VReg_352_Align2RegClassID:
2877 case AMDGPU::AReg_352_Align2RegClassID:
2878 case AMDGPU::AV_352RegClassID:
2879 case AMDGPU::AV_352_Align2RegClassID:
2880 return 352;
2881 case AMDGPU::SGPR_384RegClassID:
2882 case AMDGPU::SReg_384RegClassID:
2883 case AMDGPU::VReg_384RegClassID:
2884 case AMDGPU::AReg_384RegClassID:
2885 case AMDGPU::VReg_384_Align2RegClassID:
2886 case AMDGPU::AReg_384_Align2RegClassID:
2887 case AMDGPU::AV_384RegClassID:
2888 case AMDGPU::AV_384_Align2RegClassID:
2889 return 384;
2890 case AMDGPU::SGPR_512RegClassID:
2891 case AMDGPU::SReg_512RegClassID:
2892 case AMDGPU::VReg_512RegClassID:
2893 case AMDGPU::AReg_512RegClassID:
2894 case AMDGPU::VReg_512_Align2RegClassID:
2895 case AMDGPU::AReg_512_Align2RegClassID:
2896 case AMDGPU::AV_512RegClassID:
2897 case AMDGPU::AV_512_Align2RegClassID:
2898 return 512;
2899 case AMDGPU::SGPR_1024RegClassID:
2900 case AMDGPU::SReg_1024RegClassID:
2901 case AMDGPU::VReg_1024RegClassID:
2902 case AMDGPU::AReg_1024RegClassID:
2903 case AMDGPU::VReg_1024_Align2RegClassID:
2904 case AMDGPU::AReg_1024_Align2RegClassID:
2905 case AMDGPU::AV_1024RegClassID:
2906 case AMDGPU::AV_1024_Align2RegClassID:
2907 return 1024;
2908 default:
2909 llvm_unreachable("Unexpected register class");
2910 }
2911}
2912
2913unsigned getRegBitWidth(const MCRegisterClass &RC) {
2914 return getRegBitWidth(RC.getID());
2915}
2916
2918 unsigned OpNo) {
2919 assert(OpNo < Desc.NumOperands);
2920 unsigned RCID = Desc.operands()[OpNo].RegClass;
2921 return getRegBitWidth(RCID) / 8;
2922}
2923
2924bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
2926 return true;
2927
2928 uint64_t Val = static_cast<uint64_t>(Literal);
2929 return (Val == llvm::bit_cast<uint64_t>(0.0)) ||
2930 (Val == llvm::bit_cast<uint64_t>(1.0)) ||
2931 (Val == llvm::bit_cast<uint64_t>(-1.0)) ||
2932 (Val == llvm::bit_cast<uint64_t>(0.5)) ||
2933 (Val == llvm::bit_cast<uint64_t>(-0.5)) ||
2934 (Val == llvm::bit_cast<uint64_t>(2.0)) ||
2935 (Val == llvm::bit_cast<uint64_t>(-2.0)) ||
2936 (Val == llvm::bit_cast<uint64_t>(4.0)) ||
2937 (Val == llvm::bit_cast<uint64_t>(-4.0)) ||
2938 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
2939}
2940
2941bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
2943 return true;
2944
2945 // The actual type of the operand does not seem to matter as long
2946 // as the bits match one of the inline immediate values. For example:
2947 //
2948 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
2949 // so it is a legal inline immediate.
2950 //
2951 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
2952 // floating-point, so it is a legal inline immediate.
2953
2954 uint32_t Val = static_cast<uint32_t>(Literal);
2955 return (Val == llvm::bit_cast<uint32_t>(0.0f)) ||
2956 (Val == llvm::bit_cast<uint32_t>(1.0f)) ||
2957 (Val == llvm::bit_cast<uint32_t>(-1.0f)) ||
2958 (Val == llvm::bit_cast<uint32_t>(0.5f)) ||
2959 (Val == llvm::bit_cast<uint32_t>(-0.5f)) ||
2960 (Val == llvm::bit_cast<uint32_t>(2.0f)) ||
2961 (Val == llvm::bit_cast<uint32_t>(-2.0f)) ||
2962 (Val == llvm::bit_cast<uint32_t>(4.0f)) ||
2963 (Val == llvm::bit_cast<uint32_t>(-4.0f)) ||
2964 (Val == 0x3e22f983 && HasInv2Pi);
2965}
2966
2967bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi) {
2968 if (!HasInv2Pi)
2969 return false;
2971 return true;
2972 uint16_t Val = static_cast<uint16_t>(Literal);
2973 return Val == 0x3F00 || // 0.5
2974 Val == 0xBF00 || // -0.5
2975 Val == 0x3F80 || // 1.0
2976 Val == 0xBF80 || // -1.0
2977 Val == 0x4000 || // 2.0
2978 Val == 0xC000 || // -2.0
2979 Val == 0x4080 || // 4.0
2980 Val == 0xC080 || // -4.0
2981 Val == 0x3E22; // 1.0 / (2.0 * pi)
2982}
2983
2984bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi) {
2985 return isInlinableLiteral32(Literal, HasInv2Pi);
2986}
2987
2988bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi) {
2989 if (!HasInv2Pi)
2990 return false;
2992 return true;
2993 uint16_t Val = static_cast<uint16_t>(Literal);
2994 return Val == 0x3C00 || // 1.0
2995 Val == 0xBC00 || // -1.0
2996 Val == 0x3800 || // 0.5
2997 Val == 0xB800 || // -0.5
2998 Val == 0x4000 || // 2.0
2999 Val == 0xC000 || // -2.0
3000 Val == 0x4400 || // 4.0
3001 Val == 0xC400 || // -4.0
3002 Val == 0x3118; // 1/2pi
3003}
3004
3005std::optional<unsigned> getInlineEncodingV216(bool IsFloat, uint32_t Literal) {
3006 // Unfortunately, the Instruction Set Architecture Reference Guide is
3007 // misleading about how the inline operands work for (packed) 16-bit
3008 // instructions. In a nutshell, the actual HW behavior is:
3009 //
3010 // - integer encodings (-16 .. 64) are always produced as sign-extended
3011 // 32-bit values
3012 // - float encodings are produced as:
3013 // - for F16 instructions: corresponding half-precision float values in
3014 // the LSBs, 0 in the MSBs
3015 // - for UI16 instructions: corresponding single-precision float value
3016 int32_t Signed = static_cast<int32_t>(Literal);
3017 if (Signed >= 0 && Signed <= 64)
3018 return 128 + Signed;
3019
3020 if (Signed >= -16 && Signed <= -1)
3021 return 192 + std::abs(Signed);
3022
3023 if (IsFloat) {
3024 // clang-format off
3025 switch (Literal) {
3026 case 0x3800: return 240; // 0.5
3027 case 0xB800: return 241; // -0.5
3028 case 0x3C00: return 242; // 1.0
3029 case 0xBC00: return 243; // -1.0
3030 case 0x4000: return 244; // 2.0
3031 case 0xC000: return 245; // -2.0
3032 case 0x4400: return 246; // 4.0
3033 case 0xC400: return 247; // -4.0
3034 case 0x3118: return 248; // 1.0 / (2.0 * pi)
3035 default: break;
3036 }
3037 // clang-format on
3038 } else {
3039 // clang-format off
3040 switch (Literal) {
3041 case 0x3F000000: return 240; // 0.5
3042 case 0xBF000000: return 241; // -0.5
3043 case 0x3F800000: return 242; // 1.0
3044 case 0xBF800000: return 243; // -1.0
3045 case 0x40000000: return 244; // 2.0
3046 case 0xC0000000: return 245; // -2.0
3047 case 0x40800000: return 246; // 4.0
3048 case 0xC0800000: return 247; // -4.0
3049 case 0x3E22F983: return 248; // 1.0 / (2.0 * pi)
3050 default: break;
3051 }
3052 // clang-format on
3053 }
3054
3055 return {};
3056}
3057
3058// Encoding of the literal as an inline constant for a V_PK_*_IU16 instruction
3059// or nullopt.
3060std::optional<unsigned> getInlineEncodingV2I16(uint32_t Literal) {
3061 return getInlineEncodingV216(false, Literal);
3062}
3063
3064// Encoding of the literal as an inline constant for a V_PK_*_BF16 instruction
3065// or nullopt.
3066std::optional<unsigned> getInlineEncodingV2BF16(uint32_t Literal) {
3067 int32_t Signed = static_cast<int32_t>(Literal);
3068 if (Signed >= 0 && Signed <= 64)
3069 return 128 + Signed;
3070
3071 if (Signed >= -16 && Signed <= -1)
3072 return 192 + std::abs(Signed);
3073
3074 // clang-format off
3075 switch (Literal) {
3076 case 0x3F00: return 240; // 0.5
3077 case 0xBF00: return 241; // -0.5
3078 case 0x3F80: return 242; // 1.0
3079 case 0xBF80: return 243; // -1.0
3080 case 0x4000: return 244; // 2.0
3081 case 0xC000: return 245; // -2.0
3082 case 0x4080: return 246; // 4.0
3083 case 0xC080: return 247; // -4.0
3084 case 0x3E22: return 248; // 1.0 / (2.0 * pi)
3085 default: break;
3086 }
3087 // clang-format on
3088
3089 return std::nullopt;
3090}
3091
3092// Encoding of the literal as an inline constant for a V_PK_*_F16 instruction
3093// or nullopt.
3094std::optional<unsigned> getInlineEncodingV2F16(uint32_t Literal) {
3095 return getInlineEncodingV216(true, Literal);
3096}
3097
3098// Whether the given literal can be inlined for a V_PK_* instruction.
3100 switch (OpType) {
3103 return getInlineEncodingV216(false, Literal).has_value();
3106 return getInlineEncodingV216(true, Literal).has_value();
3111 return false;
3112 default:
3113 llvm_unreachable("bad packed operand type");
3114 }
3115}
3116
3117// Whether the given literal can be inlined for a V_PK_*_IU16 instruction.
3119 return getInlineEncodingV2I16(Literal).has_value();
3120}
3121
3122// Whether the given literal can be inlined for a V_PK_*_BF16 instruction.
3124 return getInlineEncodingV2BF16(Literal).has_value();
3125}
3126
3127// Whether the given literal can be inlined for a V_PK_*_F16 instruction.
3129 return getInlineEncodingV2F16(Literal).has_value();
3130}
3131
3132bool isValid32BitLiteral(uint64_t Val, bool IsFP64) {
3133 if (IsFP64)
3134 return !Lo_32(Val);
3135
3136 return isUInt<32>(Val) || isInt<32>(Val);
3137}
3138
3140 const Function *F = A->getParent();
3141
3142 // Arguments to compute shaders are never a source of divergence.
3143 CallingConv::ID CC = F->getCallingConv();
3144 switch (CC) {
3147 return true;
3158 // For non-compute shaders, SGPR inputs are marked with either inreg or
3159 // byval. Everything else is in VGPRs.
3160 return A->hasAttribute(Attribute::InReg) ||
3161 A->hasAttribute(Attribute::ByVal);
3162 default:
3163 // TODO: treat i1 as divergent?
3164 return A->hasAttribute(Attribute::InReg);
3165 }
3166}
3167
3168bool isArgPassedInSGPR(const CallBase *CB, unsigned ArgNo) {
3169 // Arguments to compute shaders are never a source of divergence.
3171 switch (CC) {
3174 return true;
3185 // For non-compute shaders, SGPR inputs are marked with either inreg or
3186 // byval. Everything else is in VGPRs.
3187 return CB->paramHasAttr(ArgNo, Attribute::InReg) ||
3188 CB->paramHasAttr(ArgNo, Attribute::ByVal);
3189 default:
3190 return CB->paramHasAttr(ArgNo, Attribute::InReg);
3191 }
3192}
3193
3194static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
3195 return isGCN3Encoding(ST) || isGFX10Plus(ST);
3196}
3197
3199 int64_t EncodedOffset) {
3200 if (isGFX12Plus(ST))
3201 return isUInt<23>(EncodedOffset);
3202
3203 return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset)
3204 : isUInt<8>(EncodedOffset);
3205}
3206
3208 int64_t EncodedOffset, bool IsBuffer) {
3209 if (isGFX12Plus(ST))
3210 return isInt<24>(EncodedOffset);
3211
3212 return !IsBuffer && hasSMRDSignedImmOffset(ST) && isInt<21>(EncodedOffset);
3213}
3214
3215static bool isDwordAligned(uint64_t ByteOffset) {
3216 return (ByteOffset & 3) == 0;
3217}
3218
3220 uint64_t ByteOffset) {
3221 if (hasSMEMByteOffset(ST))
3222 return ByteOffset;
3223
3224 assert(isDwordAligned(ByteOffset));
3225 return ByteOffset >> 2;
3226}
3227
3228std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
3229 int64_t ByteOffset, bool IsBuffer,
3230 bool HasSOffset) {
3231 // For unbuffered smem loads, it is illegal for the Immediate Offset to be
3232 // negative if the resulting (Offset + (M0 or SOffset or zero) is negative.
3233 // Handle case where SOffset is not present.
3234 if (!IsBuffer && !HasSOffset && ByteOffset < 0 && hasSMRDSignedImmOffset(ST))
3235 return std::nullopt;
3236
3237 if (isGFX12Plus(ST)) // 24 bit signed offsets
3238 return isInt<24>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3239 : std::nullopt;
3240
3241 // The signed version is always a byte offset.
3242 if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
3244 return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3245 : std::nullopt;
3246 }
3247
3248 if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
3249 return std::nullopt;
3250
3251 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
3252 return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
3253 ? std::optional<int64_t>(EncodedOffset)
3254 : std::nullopt;
3255}
3256
3257std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
3258 int64_t ByteOffset) {
3259 if (!isCI(ST) || !isDwordAligned(ByteOffset))
3260 return std::nullopt;
3261
3262 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
3263 return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset)
3264 : std::nullopt;
3265}
3266
3268 if (AMDGPU::isGFX10(ST))
3269 return 12;
3270
3271 if (AMDGPU::isGFX12(ST))
3272 return 24;
3273 return 13;
3274}
3275
3276namespace {
3277
3278struct SourceOfDivergence {
3279 unsigned Intr;
3280};
3281const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
3282
3283struct AlwaysUniform {
3284 unsigned Intr;
3285};
3286const AlwaysUniform *lookupAlwaysUniform(unsigned Intr);
3287
3288#define GET_SourcesOfDivergence_IMPL
3289#define GET_UniformIntrinsics_IMPL
3290#define GET_Gfx9BufferFormat_IMPL
3291#define GET_Gfx10BufferFormat_IMPL
3292#define GET_Gfx11PlusBufferFormat_IMPL
3293
3294#include "AMDGPUGenSearchableTables.inc"
3295
3296} // end anonymous namespace
3297
3298bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
3299 return lookupSourceOfDivergence(IntrID);
3300}
3301
3302bool isIntrinsicAlwaysUniform(unsigned IntrID) {
3303 return lookupAlwaysUniform(IntrID);
3304}
3305
3307 uint8_t NumComponents,
3308 uint8_t NumFormat,
3309 const MCSubtargetInfo &STI) {
3310 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(
3311 BitsPerComp, NumComponents, NumFormat)
3312 : isGFX10(STI)
3313 ? getGfx10BufferFormatInfo(BitsPerComp, NumComponents, NumFormat)
3314 : getGfx9BufferFormatInfo(BitsPerComp, NumComponents, NumFormat);
3315}
3316
3318 const MCSubtargetInfo &STI) {
3319 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(Format)
3320 : isGFX10(STI) ? getGfx10BufferFormatInfo(Format)
3321 : getGfx9BufferFormatInfo(Format);
3322}
3323
3324bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode) {
3325 uint64_t TSFlags = MII.get(Opcode).TSFlags;
3326
3327 if (TSFlags & SIInstrFlags::SMRD)
3328 return !getSMEMIsBuffer(Opcode);
3329 if (!(TSFlags & SIInstrFlags::FLAT))
3330 return false;
3331
3332 // Only SV and SVS modes are supported.
3333 if (TSFlags & SIInstrFlags::FlatScratch)
3334 return hasNamedOperand(Opcode, OpName::vaddr);
3335
3336 // Only GVS mode is supported.
3337 return hasNamedOperand(Opcode, OpName::vaddr) &&
3338 hasNamedOperand(Opcode, OpName::saddr);
3339
3340 return false;
3341}
3342
3344 for (auto OpName : {OpName::vdst, OpName::src0, OpName::src1, OpName::src2}) {
3345 int Idx = getNamedOperandIdx(OpDesc.getOpcode(), OpName);
3346 if (Idx == -1)
3347 continue;
3348
3349 if (OpDesc.operands()[Idx].RegClass == AMDGPU::VReg_64RegClassID ||
3350 OpDesc.operands()[Idx].RegClass == AMDGPU::VReg_64_Align2RegClassID)
3351 return true;
3352 }
3353
3354 return false;
3355}
3356
3357bool isDPALU_DPP32BitOpc(unsigned Opc) {
3358 switch (Opc) {
3359 case AMDGPU::V_MUL_LO_U32_e64:
3360 case AMDGPU::V_MUL_LO_U32_e64_dpp:
3361 case AMDGPU::V_MUL_LO_U32_e64_dpp_gfx1250:
3362 case AMDGPU::V_MUL_HI_U32_e64:
3363 case AMDGPU::V_MUL_HI_U32_e64_dpp:
3364 case AMDGPU::V_MUL_HI_U32_e64_dpp_gfx1250:
3365 case AMDGPU::V_MUL_HI_I32_e64:
3366 case AMDGPU::V_MUL_HI_I32_e64_dpp:
3367 case AMDGPU::V_MUL_HI_I32_e64_dpp_gfx1250:
3368 case AMDGPU::V_MAD_U32_e64:
3369 case AMDGPU::V_MAD_U32_e64_dpp:
3370 case AMDGPU::V_MAD_U32_e64_dpp_gfx1250:
3371 return true;
3372 default:
3373 return false;
3374 }
3375}
3376
3377bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCSubtargetInfo &ST) {
3378 if (!ST.hasFeature(AMDGPU::FeatureDPALU_DPP))
3379 return false;
3380
3381 if (isDPALU_DPP32BitOpc(OpDesc.getOpcode()))
3382 return ST.hasFeature(AMDGPU::FeatureGFX1250Insts);
3383
3384 return hasAny64BitVGPROperands(OpDesc);
3385}
3386
3388 return ST.hasFeature(AMDGPU::FeatureAddressableLocalMemorySize327680) ? 256
3389 : 128;
3390}
3391
3392bool isPackedFP32Inst(unsigned Opc) {
3393 switch (Opc) {
3394 case AMDGPU::V_PK_ADD_F32:
3395 case AMDGPU::V_PK_ADD_F32_gfx12:
3396 case AMDGPU::V_PK_MUL_F32:
3397 case AMDGPU::V_PK_MUL_F32_gfx12:
3398 case AMDGPU::V_PK_FMA_F32:
3399 case AMDGPU::V_PK_FMA_F32_gfx12:
3400 return true;
3401 default:
3402 return false;
3403 }
3404}
3405
3406} // namespace AMDGPU
3407
3410 switch (S) {
3412 OS << "Unsupported";
3413 break;
3415 OS << "Any";
3416 break;
3418 OS << "Off";
3419 break;
3421 OS << "On";
3422 break;
3423 }
3424 return OS;
3425}
3426
3427} // namespace llvm
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static llvm::cl::opt< unsigned > DefaultAMDHSACodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden, llvm::cl::init(llvm::AMDGPU::AMDHSA_COV6), llvm::cl::desc("Set default AMDHSA Code Object Version (module flag " "or asm directive still take priority if present)"))
#define MAP_REG2REG
unsigned Intr
Provides AMDGPU specific target descriptions.
MC layer struct for AMDGPUMCKernelCodeT, provides MCExpr functionality where required.
@ AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
std::string Name
uint32_t Index
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
#define RegName(no)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Register Reg
Register const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
uint64_t High
#define S_00B848_MEM_ORDERED(x)
Definition: SIDefines.h:1213
#define S_00B848_WGP_MODE(x)
Definition: SIDefines.h:1210
#define S_00B848_FWD_PROGRESS(x)
Definition: SIDefines.h:1216
unsigned unsigned DefaultVal
raw_pwrite_stream & OS
This file contains some functions that are useful when dealing with strings.
static const int BlockSize
Definition: TarWriter.cpp:33
TargetIDSetting getXnackSetting() const
AMDGPUTargetID(const MCSubtargetInfo &STI)
void setTargetIDFromTargetIDStream(StringRef TargetID)
TargetIDSetting getSramEccSetting() const
unsigned getIndexInParsedOperands(unsigned CompOprIdx) const
unsigned getIndexOfDstInParsedOperands() const
unsigned getIndexOfSrcInParsedOperands(unsigned CompSrcIdx) const
unsigned getCompParsedSrcOperandsNum() const
std::optional< unsigned > getInvalidCompOperandIndex(std::function< unsigned(unsigned, unsigned)> GetRegIdx, const MCRegisterInfo &MRI, bool SkipSrc=false, bool AllowSameVGPR=false, bool VOPD3=false) const
std::array< unsigned, Component::MAX_OPR_NUM > RegIndices
Definition: Any.h:28
This class represents an incoming formal argument to a Function.
Definition: Argument.h:32
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1116
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1406
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
This class represents an Operation in the Expression.
constexpr bool test(unsigned I) const
unsigned getAddressSpace() const
Definition: GlobalValue.h:207
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:199
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:238
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:240
bool mayStore() const
Return true if this instruction could possibly modify memory.
Definition: MCInstrDesc.h:446
bool mayLoad() const
Return true if this instruction could possibly read memory.
Definition: MCInstrDesc.h:440
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:249
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:220
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:231
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:27
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:64
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
StringRef getCPU() const
Metadata node.
Definition: Metadata.h:1077
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1445
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1451
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
Definition: StringRef.h:862
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:710
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:480
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:233
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:151
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:154
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:281
Manages the enabling and disabling of subtarget specific features.
const std::vector< std::string > & getFeatures() const
Returns the vector of individual subtarget features.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:47
OSType getOS() const
Get the parsed operating system type of this triple.
Definition: Triple.h:417
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:408
bool isAMDGCN() const
Tests whether the target is AMDGCN.
Definition: Triple.h:901
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:662
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
unsigned decodeFieldVaVcc(unsigned Encoded)
unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc)
unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt)
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc)
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned decodeFieldSaSdst(unsigned Encoded)
unsigned decodeFieldVaSdst(unsigned Encoded)
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
unsigned decodeFieldVaSsrc(unsigned Encoded)
int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst)
const CustomOperandVal DepCtrInfo[]
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
unsigned decodeFieldVaVdst(unsigned Encoded)
unsigned decodeFieldHoldCnt(unsigned Encoded)
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst)
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
static constexpr ExpTgt ExpTgtInfo[]
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
unsigned getTgtId(const StringRef Name)
constexpr uint32_t VersionMinor
HSA metadata minor version.
constexpr uint32_t VersionMajor
HSA metadata major version.
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, std::optional< bool > EnableWavefrontSize32)
unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI)
unsigned getArchVGPRAllocGranule()
For subtargets with a unified VGPR file and mixed ArchVGPR/AGPR usage, returns the allocation granule...
unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getWavefrontSize(const MCSubtargetInfo *STI)
unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize)
unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, bool FlatScrUsed, bool XNACKUsed)
unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI)
unsigned getLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getAddressableLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getEUsPerCU(const MCSubtargetInfo *STI)
unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI)
unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
static TargetIDSetting getTargetIDSettingFromFeatureString(StringRef FeatureString)
unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, bool Addressable)
unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs)
unsigned getMinWavesPerEU(const MCSubtargetInfo *STI)
unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI)
unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getAllocatedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getEncodedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, std::optional< bool > EnableWavefrontSize32)
unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves, AMDGPUSubtarget::Generation Gen)
static unsigned getGranulatedNumRegisterBlocks(unsigned NumRegs, unsigned Granule)
unsigned getAddressableNumArchVGPRs(const MCSubtargetInfo *STI)
unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI)
unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize)
StringLiteral const UfmtSymbolicGFX11[]
bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI)
unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI)
StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI)
unsigned const DfmtNfmt2UFmtGFX10[]
StringLiteral const DfmtSymbolic[]
static StringLiteral const * getNfmtLookupTable(const MCSubtargetInfo &STI)
bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI)
StringLiteral const NfmtSymbolicGFX10[]
bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI)
int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt, const MCSubtargetInfo &STI)
StringRef getDfmtName(unsigned Id)
int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt)
int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI)
bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI)
StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI)
unsigned const DfmtNfmt2UFmtGFX11[]
StringLiteral const NfmtSymbolicVI[]
StringLiteral const NfmtSymbolicSICI[]
int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI)
int64_t getDfmt(const StringRef Name)
StringLiteral const UfmtSymbolicGFX10[]
void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt)
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
StringRef getMsgOpName(int64_t MsgId, uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a sendmsg operation.
static uint64_t getMsgIdMask(const MCSubtargetInfo &STI)
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
constexpr unsigned VOPD_VGPR_BANK_MASKS[]
constexpr unsigned COMPONENTS_NUM
constexpr unsigned VOPD3_VGPR_BANK_MASKS[]
bool isPackedFP32Inst(unsigned Opc)
bool isGCN3Encoding(const MCSubtargetInfo &STI)
bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi)
bool isGFX10_BEncoding(const MCSubtargetInfo &STI)
bool isGFX10_GFX11(const MCSubtargetInfo &STI)
bool isInlinableLiteralV216(uint32_t Literal, uint8_t OpType)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, unsigned OpNo)
Get size of register operand.
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi)
bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset)
Convert ByteOffset to dwords if the subtarget uses dword SMRD immediate offsets.
static unsigned encodeStorecnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Storecnt)
MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
static bool hasSMEMByteOffset(const MCSubtargetInfo &ST)
bool isVOPCAsmOnly(unsigned Opc)
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
bool getMTBUFHasSrsrc(unsigned Opc)
std::optional< int64_t > getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, int64_t ByteOffset)
bool getWMMAIsXDL(unsigned Opc)
uint8_t wmmaScaleF8F6F4FormatToNumRegs(unsigned Fmt)
static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr, int Size, unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
bool isGFX10Before1030(const MCSubtargetInfo &STI)
bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo)
Does this operand support only inlinable literals?
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc)
const int OPR_ID_UNSUPPORTED
bool shouldEmitConstantsToTextSection(const Triple &TT)
bool isInlinableLiteralV2I16(uint32_t Literal)
int getMTBUFElements(unsigned Opc)
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
static int encodeCustomOperandVal(const CustomOperandVal &Op, int64_t InputVal)
unsigned getTemporalHintType(const MCInstrDesc TID)
int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, int32_t ArgNumVGPR)
bool isGFX10(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2BF16(uint32_t Literal)
unsigned getMaxNumUserSGPRs(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV216(bool IsFloat, uint32_t Literal)
FPType getFPDstSelType(unsigned Opc)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool hasA16(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset, bool IsBuffer)
bool isGFX12Plus(const MCSubtargetInfo &STI)
unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler)
bool hasPackedD16(const MCSubtargetInfo &STI)
unsigned getStorecntBitMask(const IsaVersion &Version)
unsigned getLdsDwGranularity(const MCSubtargetInfo &ST)
bool isGFX940(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2F16(uint32_t Literal)
bool isHsaAbi(const MCSubtargetInfo &STI)
bool isGFX11(const MCSubtargetInfo &STI)
const int OPR_VAL_INVALID
bool getSMEMIsBuffer(unsigned Opc)
bool isGFX10_3_GFX11(const MCSubtargetInfo &STI)
bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val)
Checks if Val is inside MD, a !range-like metadata.
uint8_t mfmaScaleF8F6F4FormatToNumRegs(unsigned EncodingVal)
unsigned getVOPDOpcode(unsigned Opc, bool VOPD3)
bool isGroupSegment(const GlobalValue *GV)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
bool getMTBUFHasSoffset(unsigned Opc)
bool hasXNACK(const MCSubtargetInfo &STI)
bool isValid32BitLiteral(uint64_t Val, bool IsFP64)
static unsigned getCombinedCountBitMask(const IsaVersion &Version, bool IsStore)
CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3)
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
bool isVOPC64DPP(unsigned Opc)
int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements)
bool getMAIIsGFX940XDL(unsigned Opc)
bool isSI(const MCSubtargetInfo &STI)
unsigned getDefaultAMDHSACodeObjectVersion()
bool isReadOnlySegment(const GlobalValue *GV)
bool isArgPassedInSGPR(const Argument *A)
bool isIntrinsicAlwaysUniform(unsigned IntrID)
int getMUBUFBaseOpcode(unsigned Opc)
unsigned getAMDHSACodeObjectVersion(const Module &M)
unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt)
unsigned getWaitcntBitMask(const IsaVersion &Version)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool getVOP3IsSingle(unsigned Opc)
bool isGFX9(const MCSubtargetInfo &STI)
bool isDPALU_DPP32BitOpc(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
static bool isDwordAligned(uint64_t ByteOffset)
unsigned getVOPDEncodingFamily(const MCSubtargetInfo &ST)
bool isGFX10_AEncoding(const MCSubtargetInfo &STI)
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this a KImm operand?
bool getHasColorExport(const Function &F)
int getMTBUFBaseOpcode(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
unsigned getSamplecntBitMask(const IsaVersion &Version)
unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion)
bool hasSRAMECC(const MCSubtargetInfo &STI)
bool getHasDepthExport(const Function &F)
bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI)
bool getMUBUFHasVAddr(unsigned Opc)
bool isTrue16Inst(unsigned Opc)
bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc)
std::pair< unsigned, unsigned > getVOPDComponents(unsigned VOPDOpcode)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
bool isGFX12(const MCSubtargetInfo &STI)
unsigned getInitialPSInputAddr(const Function &F)
unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Expcnt)
bool isAsyncStore(unsigned Opc)
unsigned getDynamicVGPRBlockSize(const Function &F)
bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this an AMDGPU specific source operand? These include registers, inline constants,...
unsigned getKmcntBitMask(const IsaVersion &Version)
unsigned getVmcntBitMask(const IsaVersion &Version)
bool isNotGFX10Plus(const MCSubtargetInfo &STI)
bool hasMAIInsts(const MCSubtargetInfo &STI)
unsigned getBitOp2(unsigned Opc)
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
unsigned getXcntBitMask(const IsaVersion &Version)
bool isGenericAtomic(unsigned Opc)
const MFMA_F8F6F4_Info * getWMMA_F8F6F4_WithFormatArgs(unsigned FmtA, unsigned FmtB, unsigned F8F8Opcode)
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
bool isGFX8Plus(const MCSubtargetInfo &STI)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
bool getMUBUFTfe(unsigned Opc)
unsigned getBvhcntBitMask(const IsaVersion &Version)
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCSubtargetInfo &ST)
bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST)
bool hasMIMG_R128(const MCSubtargetInfo &STI)
bool hasGFX10_3Insts(const MCSubtargetInfo &STI)
bool hasG16(const MCSubtargetInfo &STI)
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI)
bool getMUBUFHasSoffset(unsigned Opc)
bool isNotGFX11Plus(const MCSubtargetInfo &STI)
bool isGFX11Plus(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isInlineValue(unsigned Reg)
bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this floating-point operand?
std::tuple< char, unsigned, unsigned > parseAsmConstraintPhysReg(StringRef Constraint)
Returns a valid charcode or 0 in the first entry if this is a valid physical register constraint.
unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion)
static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr, int Size, const MCSubtargetInfo &STI)
static unsigned encodeLoadcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Loadcnt)
bool isGFX10Plus(const MCSubtargetInfo &STI)
static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size, unsigned Code, int &Idx, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
static bool isValidRegPrefix(char C)
std::optional< int64_t > getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset, bool IsBuffer, bool HasSOffset)
bool isGlobalSegment(const GlobalValue *GV)
@ OPERAND_KIMM_LAST
Definition: SIDefines.h:263
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
Definition: SIDefines.h:231
@ OPERAND_REG_INLINE_C_LAST
Definition: SIDefines.h:254
@ OPERAND_REG_IMM_V2FP16
Definition: SIDefines.h:209
@ OPERAND_REG_INLINE_C_FP64
Definition: SIDefines.h:222
@ OPERAND_REG_INLINE_C_V2BF16
Definition: SIDefines.h:224
@ OPERAND_REG_IMM_V2INT16
Definition: SIDefines.h:210
@ OPERAND_SRC_FIRST
Definition: SIDefines.h:259
@ OPERAND_REG_IMM_V2BF16
Definition: SIDefines.h:208
@ OPERAND_REG_INLINE_AC_FIRST
Definition: SIDefines.h:256
@ OPERAND_KIMM_FIRST
Definition: SIDefines.h:262
@ OPERAND_REG_IMM_FP16
Definition: SIDefines.h:207
@ OPERAND_REG_IMM_NOINLINE_V2FP16
Definition: SIDefines.h:211
@ OPERAND_REG_IMM_FP64
Definition: SIDefines.h:205
@ OPERAND_REG_INLINE_C_V2FP16
Definition: SIDefines.h:225
@ OPERAND_REG_INLINE_AC_FP32
Definition: SIDefines.h:237
@ OPERAND_REG_IMM_FP32
Definition: SIDefines.h:204
@ OPERAND_REG_INLINE_C_FIRST
Definition: SIDefines.h:253
@ OPERAND_REG_INLINE_C_FP32
Definition: SIDefines.h:221
@ OPERAND_REG_INLINE_AC_LAST
Definition: SIDefines.h:257
@ OPERAND_REG_INLINE_C_V2INT16
Definition: SIDefines.h:223
@ OPERAND_REG_IMM_V2FP32
Definition: SIDefines.h:213
@ OPERAND_REG_INLINE_AC_FP64
Definition: SIDefines.h:238
@ OPERAND_REG_INLINE_C_FP16
Definition: SIDefines.h:220
@ OPERAND_SRC_LAST
Definition: SIDefines.h:260
void initDefaultAMDKernelCodeT(AMDGPUMCKernelCodeT &KernelCode, const MCSubtargetInfo *STI)
bool isNotGFX9Plus(const MCSubtargetInfo &STI)
bool hasGDS(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset)
bool isGFX9Plus(const MCSubtargetInfo &STI)
bool hasDPPSrc1SGPR(const MCSubtargetInfo &STI)
const int OPR_ID_DUPLICATE
bool isVOPD(unsigned Opc)
VOPD::InstInfo getVOPDInstInfo(const MCInstrDesc &OpX, const MCInstrDesc &OpY)
unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Vmcnt)
unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt)
bool isCvt_F32_Fp8_Bf8_e64(unsigned Opc)
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
static unsigned encodeStorecntDscnt(const IsaVersion &Version, unsigned Storecnt, unsigned Dscnt)
bool isGFX1250(const MCSubtargetInfo &STI)
int getMCOpcode(uint16_t Opcode, unsigned Gen)
const MIMGBaseOpcodeInfo * getMIMGBaseOpcode(unsigned Opc)
bool isVI(const MCSubtargetInfo &STI)
bool isTensorStore(unsigned Opc)
bool getMUBUFIsBufferInv(unsigned Opc)
bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
static int encodeCustomOperand(const CustomOperandVal *Opr, int Size, const StringRef Name, int64_t InputVal, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned hasKernargPreload(const MCSubtargetInfo &STI)
bool supportsWGP(const MCSubtargetInfo &STI)
bool isMAC(unsigned Opc)
bool isCI(const MCSubtargetInfo &STI)
unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Lgkmcnt)
bool getVOP2IsSingle(unsigned Opc)
bool getMAIIsDGEMM(unsigned Opc)
Returns true if MAI operation is a double precision GEMM.
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
const int OPR_ID_UNKNOWN
unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion)
SmallVector< unsigned > getIntegerVecAttribute(const Function &F, StringRef Name, unsigned Size, unsigned DefaultVal)
int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels)
bool isNotGFX12Plus(const MCSubtargetInfo &STI)
bool getMTBUFHasVAddr(unsigned Opc)
unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt)
uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion)
std::pair< unsigned, unsigned > getIntegerPairAttribute(const Function &F, StringRef Name, std::pair< unsigned, unsigned > Default, bool OnlyFirstRequired)
unsigned getLoadcntBitMask(const IsaVersion &Version)
bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi)
bool hasVOPD(const MCSubtargetInfo &STI)
int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily, bool VOPD3)
static unsigned encodeDscnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Dscnt)
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
const MFMA_F8F6F4_Info * getMFMA_F8F6F4_WithFormatArgs(unsigned CBSZ, unsigned BLGP, unsigned F8F8Opcode)
unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion)
bool isGFX9_GFX10_GFX11(const MCSubtargetInfo &STI)
bool isGFX9_GFX10(const MCSubtargetInfo &STI)
int getMUBUFElements(unsigned Opc)
static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt, unsigned Dscnt)
const GcnBufferFormatInfo * getGcnBufferFormatInfo(uint8_t BitsPerComp, uint8_t NumComponents, uint8_t NumFormat, const MCSubtargetInfo &STI)
unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc)
bool isPermlane16(unsigned Opc)
bool getMUBUFHasSrsrc(unsigned Opc)
unsigned getDscntBitMask(const IsaVersion &Version)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:126
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
Definition: CallingConv.h:197
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
Definition: CallingConv.h:188
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
Definition: CallingConv.h:200
@ AMDGPU_Gfx
Used for AMD graphics targets.
Definition: CallingConv.h:232
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:249
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
Definition: CallingConv.h:206
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
Definition: CallingConv.h:191
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:245
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:194
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:144
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
Definition: CallingConv.h:218
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
Definition: CallingConv.h:213
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ ELFABIVERSION_AMDGPU_HSA_V4
Definition: ELF.h:384
@ ELFABIVERSION_AMDGPU_HSA_V5
Definition: ELF.h:385
@ ELFABIVERSION_AMDGPU_HSA_V6
Definition: ELF.h:386
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition: Error.h:198
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition: MathExtras.h:551
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition: Error.cpp:167
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
Definition: MathExtras.h:164
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition: MathExtras.h:399
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:312
@ AlwaysUniform
The result values are always uniform.
@ Default
The result values are uniform if and only if all operands are uniform.
#define N
AMD Kernel Code Object (amd_kernel_code_t).
Instruction set architecture version.
Definition: TargetParser.h:132
Represents the counter values to wait for in an s_waitcnt instruction.
Description of the encoding of one expression Op.