LLVM 22.0.0git
AMDGPUBaseInfo.cpp
Go to the documentation of this file.
1//===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AMDGPUBaseInfo.h"
10#include "AMDGPU.h"
11#include "AMDGPUAsmUtils.h"
12#include "AMDKernelCodeT.h"
17#include "llvm/IR/Attributes.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/Function.h"
20#include "llvm/IR/GlobalValue.h"
21#include "llvm/IR/IntrinsicsAMDGPU.h"
22#include "llvm/IR/IntrinsicsR600.h"
23#include "llvm/IR/LLVMContext.h"
24#include "llvm/IR/Metadata.h"
25#include "llvm/MC/MCInstrInfo.h"
30#include <optional>
31
32#define GET_INSTRINFO_NAMED_OPS
33#define GET_INSTRMAP_INFO
34#include "AMDGPUGenInstrInfo.inc"
35
37 "amdhsa-code-object-version", llvm::cl::Hidden,
39 llvm::cl::desc("Set default AMDHSA Code Object Version (module flag "
40 "or asm directive still take priority if present)"));
41
42namespace {
43
44/// \returns Bit mask for given bit \p Shift and bit \p Width.
45unsigned getBitMask(unsigned Shift, unsigned Width) {
46 return ((1 << Width) - 1) << Shift;
47}
48
49/// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
50///
51/// \returns Packed \p Dst.
52unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
53 unsigned Mask = getBitMask(Shift, Width);
54 return ((Src << Shift) & Mask) | (Dst & ~Mask);
55}
56
57/// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
58///
59/// \returns Unpacked bits.
60unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
61 return (Src & getBitMask(Shift, Width)) >> Shift;
62}
63
64/// \returns Vmcnt bit shift (lower bits).
65unsigned getVmcntBitShiftLo(unsigned VersionMajor) {
66 return VersionMajor >= 11 ? 10 : 0;
67}
68
69/// \returns Vmcnt bit width (lower bits).
70unsigned getVmcntBitWidthLo(unsigned VersionMajor) {
71 return VersionMajor >= 11 ? 6 : 4;
72}
73
74/// \returns Expcnt bit shift.
75unsigned getExpcntBitShift(unsigned VersionMajor) {
76 return VersionMajor >= 11 ? 0 : 4;
77}
78
79/// \returns Expcnt bit width.
80unsigned getExpcntBitWidth(unsigned VersionMajor) { return 3; }
81
82/// \returns Lgkmcnt bit shift.
83unsigned getLgkmcntBitShift(unsigned VersionMajor) {
84 return VersionMajor >= 11 ? 4 : 8;
85}
86
87/// \returns Lgkmcnt bit width.
88unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
89 return VersionMajor >= 10 ? 6 : 4;
90}
91
92/// \returns Vmcnt bit shift (higher bits).
93unsigned getVmcntBitShiftHi(unsigned VersionMajor) { return 14; }
94
95/// \returns Vmcnt bit width (higher bits).
96unsigned getVmcntBitWidthHi(unsigned VersionMajor) {
97 return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
98}
99
100/// \returns Loadcnt bit width
101unsigned getLoadcntBitWidth(unsigned VersionMajor) {
102 return VersionMajor >= 12 ? 6 : 0;
103}
104
105/// \returns Samplecnt bit width.
106unsigned getSamplecntBitWidth(unsigned VersionMajor) {
107 return VersionMajor >= 12 ? 6 : 0;
108}
109
110/// \returns Bvhcnt bit width.
111unsigned getBvhcntBitWidth(unsigned VersionMajor) {
112 return VersionMajor >= 12 ? 3 : 0;
113}
114
115/// \returns Dscnt bit width.
116unsigned getDscntBitWidth(unsigned VersionMajor) {
117 return VersionMajor >= 12 ? 6 : 0;
118}
119
120/// \returns Dscnt bit shift in combined S_WAIT instructions.
121unsigned getDscntBitShift(unsigned VersionMajor) { return 0; }
122
123/// \returns Storecnt or Vscnt bit width, depending on VersionMajor.
124unsigned getStorecntBitWidth(unsigned VersionMajor) {
125 return VersionMajor >= 10 ? 6 : 0;
126}
127
128/// \returns Kmcnt bit width.
129unsigned getKmcntBitWidth(unsigned VersionMajor) {
130 return VersionMajor >= 12 ? 5 : 0;
131}
132
133/// \returns Xcnt bit width.
134unsigned getXcntBitWidth(unsigned VersionMajor, unsigned VersionMinor) {
135 return VersionMajor == 12 && VersionMinor == 5 ? 6 : 0;
136}
137
138/// \returns shift for Loadcnt/Storecnt in combined S_WAIT instructions.
139unsigned getLoadcntStorecntBitShift(unsigned VersionMajor) {
140 return VersionMajor >= 12 ? 8 : 0;
141}
142
143/// \returns VaSdst bit width
144inline unsigned getVaSdstBitWidth() { return 3; }
145
146/// \returns VaSdst bit shift
147inline unsigned getVaSdstBitShift() { return 9; }
148
149/// \returns VmVsrc bit width
150inline unsigned getVmVsrcBitWidth() { return 3; }
151
152/// \returns VmVsrc bit shift
153inline unsigned getVmVsrcBitShift() { return 2; }
154
155/// \returns VaVdst bit width
156inline unsigned getVaVdstBitWidth() { return 4; }
157
158/// \returns VaVdst bit shift
159inline unsigned getVaVdstBitShift() { return 12; }
160
161/// \returns VaVcc bit width
162inline unsigned getVaVccBitWidth() { return 1; }
163
164/// \returns VaVcc bit shift
165inline unsigned getVaVccBitShift() { return 1; }
166
167/// \returns SaSdst bit width
168inline unsigned getSaSdstBitWidth() { return 1; }
169
170/// \returns SaSdst bit shift
171inline unsigned getSaSdstBitShift() { return 0; }
172
173/// \returns VaSsrc width
174inline unsigned getVaSsrcBitWidth() { return 1; }
175
176/// \returns VaSsrc bit shift
177inline unsigned getVaSsrcBitShift() { return 8; }
178
179/// \returns HoldCnt bit shift
180inline unsigned getHoldCntWidth() { return 1; }
181
182/// \returns HoldCnt bit shift
183inline unsigned getHoldCntBitShift() { return 7; }
184
185} // end anonymous namespace
186
187namespace llvm {
188
189namespace AMDGPU {
190
191/// \returns true if the target supports signed immediate offset for SMRD
192/// instructions.
194 return isGFX9Plus(ST);
195}
196
197/// \returns True if \p STI is AMDHSA.
198bool isHsaAbi(const MCSubtargetInfo &STI) {
199 return STI.getTargetTriple().getOS() == Triple::AMDHSA;
200}
201
204 M.getModuleFlag("amdhsa_code_object_version"))) {
205 return (unsigned)Ver->getZExtValue() / 100;
206 }
207
209}
210
214
215unsigned getAMDHSACodeObjectVersion(unsigned ABIVersion) {
216 switch (ABIVersion) {
218 return 4;
220 return 5;
222 return 6;
223 default:
225 }
226}
227
228uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion) {
229 if (T.getOS() != Triple::AMDHSA)
230 return 0;
231
232 switch (CodeObjectVersion) {
233 case 4:
235 case 5:
237 case 6:
239 default:
240 report_fatal_error("Unsupported AMDHSA Code Object Version " +
241 Twine(CodeObjectVersion));
242 }
243}
244
245unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion) {
246 switch (CodeObjectVersion) {
247 case AMDHSA_COV4:
248 return 48;
249 case AMDHSA_COV5:
250 case AMDHSA_COV6:
251 default:
253 }
254}
255
256// FIXME: All such magic numbers about the ABI should be in a
257// central TD file.
258unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion) {
259 switch (CodeObjectVersion) {
260 case AMDHSA_COV4:
261 return 24;
262 case AMDHSA_COV5:
263 case AMDHSA_COV6:
264 default:
266 }
267}
268
269unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion) {
270 switch (CodeObjectVersion) {
271 case AMDHSA_COV4:
272 return 32;
273 case AMDHSA_COV5:
274 case AMDHSA_COV6:
275 default:
277 }
278}
279
280unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion) {
281 switch (CodeObjectVersion) {
282 case AMDHSA_COV4:
283 return 40;
284 case AMDHSA_COV5:
285 case AMDHSA_COV6:
286 default:
288 }
289}
290
291#define GET_MIMGBaseOpcodesTable_IMPL
292#define GET_MIMGDimInfoTable_IMPL
293#define GET_MIMGInfoTable_IMPL
294#define GET_MIMGLZMappingTable_IMPL
295#define GET_MIMGMIPMappingTable_IMPL
296#define GET_MIMGBiasMappingTable_IMPL
297#define GET_MIMGOffsetMappingTable_IMPL
298#define GET_MIMGG16MappingTable_IMPL
299#define GET_MAIInstInfoTable_IMPL
300#define GET_WMMAInstInfoTable_IMPL
301#include "AMDGPUGenSearchableTables.inc"
302
303int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
304 unsigned VDataDwords, unsigned VAddrDwords) {
305 const MIMGInfo *Info =
306 getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, VDataDwords, VAddrDwords);
307 return Info ? Info->Opcode : -1;
308}
309
311 const MIMGInfo *Info = getMIMGInfo(Opc);
312 return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
313}
314
315int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
316 const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
317 const MIMGInfo *NewInfo =
318 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
319 NewChannels, OrigInfo->VAddrDwords);
320 return NewInfo ? NewInfo->Opcode : -1;
321}
322
323unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
324 const MIMGDimInfo *Dim, bool IsA16,
325 bool IsG16Supported) {
326 unsigned AddrWords = BaseOpcode->NumExtraArgs;
327 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
328 (BaseOpcode->LodOrClampOrMip ? 1 : 0);
329 if (IsA16)
330 AddrWords += divideCeil(AddrComponents, 2);
331 else
332 AddrWords += AddrComponents;
333
334 // Note: For subtargets that support A16 but not G16, enabling A16 also
335 // enables 16 bit gradients.
336 // For subtargets that support A16 (operand) and G16 (done with a different
337 // instruction encoding), they are independent.
338
339 if (BaseOpcode->Gradients) {
340 if ((IsA16 && !IsG16Supported) || BaseOpcode->G16)
341 // There are two gradients per coordinate, we pack them separately.
342 // For the 3d case,
343 // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv)
344 AddrWords += alignTo<2>(Dim->NumGradients / 2);
345 else
346 AddrWords += Dim->NumGradients;
347 }
348 return AddrWords;
349}
350
361
370
375
380
384
388
392
399
407
412
413#define GET_FP4FP8DstByteSelTable_DECL
414#define GET_FP4FP8DstByteSelTable_IMPL
415
420
426
427#define GET_MTBUFInfoTable_DECL
428#define GET_MTBUFInfoTable_IMPL
429#define GET_MUBUFInfoTable_DECL
430#define GET_MUBUFInfoTable_IMPL
431#define GET_SMInfoTable_DECL
432#define GET_SMInfoTable_IMPL
433#define GET_VOP1InfoTable_DECL
434#define GET_VOP1InfoTable_IMPL
435#define GET_VOP2InfoTable_DECL
436#define GET_VOP2InfoTable_IMPL
437#define GET_VOP3InfoTable_DECL
438#define GET_VOP3InfoTable_IMPL
439#define GET_VOPC64DPPTable_DECL
440#define GET_VOPC64DPPTable_IMPL
441#define GET_VOPC64DPP8Table_DECL
442#define GET_VOPC64DPP8Table_IMPL
443#define GET_VOPCAsmOnlyInfoTable_DECL
444#define GET_VOPCAsmOnlyInfoTable_IMPL
445#define GET_VOP3CAsmOnlyInfoTable_DECL
446#define GET_VOP3CAsmOnlyInfoTable_IMPL
447#define GET_VOPDComponentTable_DECL
448#define GET_VOPDComponentTable_IMPL
449#define GET_VOPDPairs_DECL
450#define GET_VOPDPairs_IMPL
451#define GET_VOPTrue16Table_DECL
452#define GET_VOPTrue16Table_IMPL
453#define GET_True16D16Table_IMPL
454#define GET_WMMAOpcode2AddrMappingTable_DECL
455#define GET_WMMAOpcode2AddrMappingTable_IMPL
456#define GET_WMMAOpcode3AddrMappingTable_DECL
457#define GET_WMMAOpcode3AddrMappingTable_IMPL
458#define GET_getMFMA_F8F6F4_WithSize_DECL
459#define GET_getMFMA_F8F6F4_WithSize_IMPL
460#define GET_isMFMA_F8F6F4Table_IMPL
461#define GET_isCvtScaleF32_F32F16ToF8F4Table_IMPL
462
463#include "AMDGPUGenSearchableTables.inc"
464
465int getMTBUFBaseOpcode(unsigned Opc) {
466 const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc);
467 return Info ? Info->BaseOpcode : -1;
468}
469
470int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) {
471 const MTBUFInfo *Info =
472 getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
473 return Info ? Info->Opcode : -1;
474}
475
476int getMTBUFElements(unsigned Opc) {
477 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
478 return Info ? Info->elements : 0;
479}
480
481bool getMTBUFHasVAddr(unsigned Opc) {
482 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
483 return Info && Info->has_vaddr;
484}
485
486bool getMTBUFHasSrsrc(unsigned Opc) {
487 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
488 return Info && Info->has_srsrc;
489}
490
491bool getMTBUFHasSoffset(unsigned Opc) {
492 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
493 return Info && Info->has_soffset;
494}
495
496int getMUBUFBaseOpcode(unsigned Opc) {
497 const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
498 return Info ? Info->BaseOpcode : -1;
499}
500
501int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) {
502 const MUBUFInfo *Info =
503 getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
504 return Info ? Info->Opcode : -1;
505}
506
507int getMUBUFElements(unsigned Opc) {
508 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
509 return Info ? Info->elements : 0;
510}
511
512bool getMUBUFHasVAddr(unsigned Opc) {
513 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
514 return Info && Info->has_vaddr;
515}
516
517bool getMUBUFHasSrsrc(unsigned Opc) {
518 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
519 return Info && Info->has_srsrc;
520}
521
522bool getMUBUFHasSoffset(unsigned Opc) {
523 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
524 return Info && Info->has_soffset;
525}
526
527bool getMUBUFIsBufferInv(unsigned Opc) {
528 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
529 return Info && Info->IsBufferInv;
530}
531
532bool getMUBUFTfe(unsigned Opc) {
533 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
534 return Info && Info->tfe;
535}
536
537bool getSMEMIsBuffer(unsigned Opc) {
538 const SMInfo *Info = getSMEMOpcodeHelper(Opc);
539 return Info && Info->IsBuffer;
540}
541
542bool getVOP1IsSingle(unsigned Opc) {
543 const VOPInfo *Info = getVOP1OpcodeHelper(Opc);
544 return !Info || Info->IsSingle;
545}
546
547bool getVOP2IsSingle(unsigned Opc) {
548 const VOPInfo *Info = getVOP2OpcodeHelper(Opc);
549 return !Info || Info->IsSingle;
550}
551
552bool getVOP3IsSingle(unsigned Opc) {
553 const VOPInfo *Info = getVOP3OpcodeHelper(Opc);
554 return !Info || Info->IsSingle;
555}
556
557bool isVOPC64DPP(unsigned Opc) {
558 return isVOPC64DPPOpcodeHelper(Opc) || isVOPC64DPP8OpcodeHelper(Opc);
559}
560
561bool isVOPCAsmOnly(unsigned Opc) { return isVOPCAsmOnlyOpcodeHelper(Opc); }
562
563bool getMAIIsDGEMM(unsigned Opc) {
564 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
565 return Info && Info->is_dgemm;
566}
567
568bool getMAIIsGFX940XDL(unsigned Opc) {
569 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
570 return Info && Info->is_gfx940_xdl;
571}
572
573bool getWMMAIsXDL(unsigned Opc) {
574 const WMMAInstInfo *Info = getWMMAInstInfoHelper(Opc);
575 return Info ? Info->is_wmma_xdl : false;
576}
577
579 switch (EncodingVal) {
582 return 6;
584 return 4;
587 default:
588 return 8;
589 }
590
591 llvm_unreachable("covered switch over mfma scale formats");
592}
593
595 unsigned BLGP,
596 unsigned F8F8Opcode) {
597 uint8_t SrcANumRegs = mfmaScaleF8F6F4FormatToNumRegs(CBSZ);
598 uint8_t SrcBNumRegs = mfmaScaleF8F6F4FormatToNumRegs(BLGP);
599 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
600}
601
603 switch (Fmt) {
606 return 16;
609 return 12;
611 return 8;
612 }
613
614 llvm_unreachable("covered switch over wmma scale formats");
615}
616
618 unsigned FmtB,
619 unsigned F8F8Opcode) {
620 uint8_t SrcANumRegs = wmmaScaleF8F6F4FormatToNumRegs(FmtA);
621 uint8_t SrcBNumRegs = wmmaScaleF8F6F4FormatToNumRegs(FmtB);
622 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
623}
624
626 if (ST.hasFeature(AMDGPU::FeatureGFX1250Insts))
628 if (ST.hasFeature(AMDGPU::FeatureGFX12Insts))
630 if (ST.hasFeature(AMDGPU::FeatureGFX11Insts))
632 llvm_unreachable("Subtarget generation does not support VOPD!");
633}
634
635CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3) {
636 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(Opc) : 0;
637 Opc = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : Opc;
638 const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
639 if (Info) {
640 // Check that Opc can be used as VOPDY for this encoding. V_MOV_B32 as a
641 // VOPDX is just a placeholder here, it is supported on all encodings.
642 // TODO: This can be optimized by creating tables of supported VOPDY
643 // opcodes per encoding.
644 unsigned VOPDMov = AMDGPU::getVOPDOpcode(AMDGPU::V_MOV_B32_e32, VOPD3);
645 bool CanBeVOPDY = getVOPDFull(VOPDMov, AMDGPU::getVOPDOpcode(Opc, VOPD3),
646 EncodingFamily, VOPD3) != -1;
647 return {VOPD3 ? Info->CanBeVOPD3X : Info->CanBeVOPDX, CanBeVOPDY};
648 }
649
650 return {false, false};
651}
652
653unsigned getVOPDOpcode(unsigned Opc, bool VOPD3) {
654 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(Opc) : 0;
655 Opc = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : Opc;
656 const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
657 return Info ? Info->VOPDOp : ~0u;
658}
659
660bool isVOPD(unsigned Opc) {
661 return AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0X);
662}
663
664bool isMAC(unsigned Opc) {
665 return Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
666 Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
667 Opc == AMDGPU::V_MAC_F32_e64_vi ||
668 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
669 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
670 Opc == AMDGPU::V_MAC_F16_e64_vi ||
671 Opc == AMDGPU::V_FMAC_F64_e64_gfx90a ||
672 Opc == AMDGPU::V_FMAC_F64_e64_gfx12 ||
673 Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
674 Opc == AMDGPU::V_FMAC_F32_e64_gfx11 ||
675 Opc == AMDGPU::V_FMAC_F32_e64_gfx12 ||
676 Opc == AMDGPU::V_FMAC_F32_e64_vi ||
677 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10 ||
678 Opc == AMDGPU::V_FMAC_DX9_ZERO_F32_e64_gfx11 ||
679 Opc == AMDGPU::V_FMAC_F16_e64_gfx10 ||
680 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx11 ||
681 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx11 ||
682 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx12 ||
683 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx12 ||
684 Opc == AMDGPU::V_DOT2C_F32_F16_e64_vi ||
685 Opc == AMDGPU::V_DOT2C_F32_BF16_e64_vi ||
686 Opc == AMDGPU::V_DOT2C_I32_I16_e64_vi ||
687 Opc == AMDGPU::V_DOT4C_I32_I8_e64_vi ||
688 Opc == AMDGPU::V_DOT8C_I32_I4_e64_vi;
689}
690
691bool isPermlane16(unsigned Opc) {
692 return Opc == AMDGPU::V_PERMLANE16_B32_gfx10 ||
693 Opc == AMDGPU::V_PERMLANEX16_B32_gfx10 ||
694 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx11 ||
695 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx11 ||
696 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx12 ||
697 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx12 ||
698 Opc == AMDGPU::V_PERMLANE16_VAR_B32_e64_gfx12 ||
699 Opc == AMDGPU::V_PERMLANEX16_VAR_B32_e64_gfx12;
700}
701
703 return Opc == AMDGPU::V_CVT_F32_BF8_e64_gfx12 ||
704 Opc == AMDGPU::V_CVT_F32_FP8_e64_gfx12 ||
705 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp_gfx12 ||
706 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp_gfx12 ||
707 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp8_gfx12 ||
708 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp8_gfx12 ||
709 Opc == AMDGPU::V_CVT_PK_F32_BF8_fake16_e64_gfx12 ||
710 Opc == AMDGPU::V_CVT_PK_F32_FP8_fake16_e64_gfx12 ||
711 Opc == AMDGPU::V_CVT_PK_F32_BF8_t16_e64_gfx12 ||
712 Opc == AMDGPU::V_CVT_PK_F32_FP8_t16_e64_gfx12;
713}
714
715bool isGenericAtomic(unsigned Opc) {
716 return Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP ||
717 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD ||
718 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB ||
719 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN ||
720 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN ||
721 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX ||
722 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX ||
723 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND ||
724 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR ||
725 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR ||
726 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC ||
727 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC ||
728 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD ||
729 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMIN ||
730 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX ||
731 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP ||
732 Opc == AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG;
733}
734
735bool isAsyncStore(unsigned Opc) {
736 return Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_gfx1250 ||
737 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_gfx1250 ||
738 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_gfx1250 ||
739 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_gfx1250 ||
740 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_SADDR_gfx1250 ||
741 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_SADDR_gfx1250 ||
742 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_SADDR_gfx1250 ||
743 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_SADDR_gfx1250;
744}
745
746bool isTensorStore(unsigned Opc) {
747 return Opc == TENSOR_STORE_FROM_LDS_gfx1250 ||
748 Opc == TENSOR_STORE_FROM_LDS_D2_gfx1250;
749}
750
751unsigned getTemporalHintType(const MCInstrDesc TID) {
754 unsigned Opc = TID.getOpcode();
755 // Async and Tensor store should have the temporal hint type of TH_TYPE_STORE
756 if (TID.mayStore() &&
757 (isAsyncStore(Opc) || isTensorStore(Opc) || !TID.mayLoad()))
758 return CPol::TH_TYPE_STORE;
759
760 // This will default to returning TH_TYPE_LOAD when neither MayStore nor
761 // MayLoad flag is present which is the case with instructions like
762 // image_get_resinfo.
763 return CPol::TH_TYPE_LOAD;
764}
765
766bool isTrue16Inst(unsigned Opc) {
767 const VOPTrue16Info *Info = getTrue16OpcodeHelper(Opc);
768 return Info && Info->IsTrue16;
769}
770
772 const FP4FP8DstByteSelInfo *Info = getFP4FP8DstByteSelHelper(Opc);
773 if (!Info)
774 return FPType::None;
775 if (Info->HasFP8DstByteSel)
776 return FPType::FP8;
777 if (Info->HasFP4DstByteSel)
778 return FPType::FP4;
779
780 return FPType::None;
781}
782
783unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc) {
784 const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom2AddrOpcode(Opc);
785 return Info ? Info->Opcode3Addr : ~0u;
786}
787
788unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc) {
789 const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom3AddrOpcode(Opc);
790 return Info ? Info->Opcode2Addr : ~0u;
791}
792
793// Wrapper for Tablegen'd function. enum Subtarget is not defined in any
794// header files, so we need to wrap it in a function that takes unsigned
795// instead.
796int getMCOpcode(uint16_t Opcode, unsigned Gen) {
797 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
798}
799
800unsigned getBitOp2(unsigned Opc) {
801 switch (Opc) {
802 default:
803 return 0;
804 case AMDGPU::V_AND_B32_e32:
805 return 0x40;
806 case AMDGPU::V_OR_B32_e32:
807 return 0x54;
808 case AMDGPU::V_XOR_B32_e32:
809 return 0x14;
810 case AMDGPU::V_XNOR_B32_e32:
811 return 0x41;
812 }
813}
814
815int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily,
816 bool VOPD3) {
817 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(OpY) : 0;
818 OpY = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : OpY;
819 const VOPDInfo *Info =
820 getVOPDInfoFromComponentOpcodes(OpX, OpY, EncodingFamily, VOPD3);
821 return Info ? Info->Opcode : -1;
822}
823
824std::pair<unsigned, unsigned> getVOPDComponents(unsigned VOPDOpcode) {
825 const VOPDInfo *Info = getVOPDOpcodeHelper(VOPDOpcode);
826 assert(Info);
827 const auto *OpX = getVOPDBaseFromComponent(Info->OpX);
828 const auto *OpY = getVOPDBaseFromComponent(Info->OpY);
829 assert(OpX && OpY);
830 return {OpX->BaseVOP, OpY->BaseVOP};
831}
832
833namespace VOPD {
834
835ComponentProps::ComponentProps(const MCInstrDesc &OpDesc, bool VOP3Layout) {
837
840 auto TiedIdx = OpDesc.getOperandConstraint(Component::SRC2, MCOI::TIED_TO);
841 assert(TiedIdx == -1 || TiedIdx == Component::DST);
842 HasSrc2Acc = TiedIdx != -1;
843 Opcode = OpDesc.getOpcode();
844
845 IsVOP3 = VOP3Layout || (OpDesc.TSFlags & SIInstrFlags::VOP3);
846 SrcOperandsNum = AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src2) ? 3
847 : AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::imm) ? 3
848 : AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src1) ? 2
849 : 1;
850 assert(SrcOperandsNum <= Component::MAX_SRC_NUM);
851
852 if (Opcode == AMDGPU::V_CNDMASK_B32_e32 ||
853 Opcode == AMDGPU::V_CNDMASK_B32_e64) {
854 // CNDMASK is an awkward exception, it has FP modifiers, but not FP
855 // operands.
856 NumVOPD3Mods = 2;
857 if (IsVOP3)
858 SrcOperandsNum = 3;
859 } else if (isSISrcFPOperand(OpDesc,
860 getNamedOperandIdx(Opcode, OpName::src0))) {
861 // All FP VOPD instructions have Neg modifiers for all operands except
862 // for tied src2.
863 NumVOPD3Mods = SrcOperandsNum;
864 if (HasSrc2Acc)
865 --NumVOPD3Mods;
866 }
867
868 if (OpDesc.TSFlags & SIInstrFlags::VOP3)
869 return;
870
871 auto OperandsNum = OpDesc.getNumOperands();
872 unsigned CompOprIdx;
873 for (CompOprIdx = Component::SRC1; CompOprIdx < OperandsNum; ++CompOprIdx) {
874 if (OpDesc.operands()[CompOprIdx].OperandType == AMDGPU::OPERAND_KIMM32) {
875 MandatoryLiteralIdx = CompOprIdx;
876 break;
877 }
878 }
879}
880
882 return getNamedOperandIdx(Opcode, OpName::bitop3);
883}
884
885unsigned ComponentInfo::getIndexInParsedOperands(unsigned CompOprIdx) const {
886 assert(CompOprIdx < Component::MAX_OPR_NUM);
887
888 if (CompOprIdx == Component::DST)
890
891 auto CompSrcIdx = CompOprIdx - Component::DST_NUM;
892 if (CompSrcIdx < getCompParsedSrcOperandsNum())
893 return getIndexOfSrcInParsedOperands(CompSrcIdx);
894
895 // The specified operand does not exist.
896 return 0;
897}
898
900 std::function<unsigned(unsigned, unsigned)> GetRegIdx,
901 const MCRegisterInfo &MRI, bool SkipSrc, bool AllowSameVGPR,
902 bool VOPD3) const {
903
904 auto OpXRegs = getRegIndices(ComponentIndex::X, GetRegIdx,
905 CompInfo[ComponentIndex::X].isVOP3());
906 auto OpYRegs = getRegIndices(ComponentIndex::Y, GetRegIdx,
907 CompInfo[ComponentIndex::Y].isVOP3());
908
909 const auto banksOverlap = [&MRI](MCRegister X, MCRegister Y,
910 unsigned BanksMask) -> bool {
911 MCRegister BaseX = MRI.getSubReg(X, AMDGPU::sub0);
912 MCRegister BaseY = MRI.getSubReg(Y, AMDGPU::sub0);
913 if (!BaseX)
914 BaseX = X;
915 if (!BaseY)
916 BaseY = Y;
917 if ((BaseX & BanksMask) == (BaseY & BanksMask))
918 return true;
919 if (BaseX != X /* This is 64-bit register */ &&
920 ((BaseX + 1) & BanksMask) == (BaseY & BanksMask))
921 return true;
922 if (BaseY != Y && (BaseX & BanksMask) == ((BaseY + 1) & BanksMask))
923 return true;
924
925 // If both are 64-bit bank conflict will be detected yet while checking
926 // the first subreg.
927 return false;
928 };
929
930 unsigned CompOprIdx;
931 for (CompOprIdx = 0; CompOprIdx < Component::MAX_OPR_NUM; ++CompOprIdx) {
932 unsigned BanksMasks = VOPD3 ? VOPD3_VGPR_BANK_MASKS[CompOprIdx]
933 : VOPD_VGPR_BANK_MASKS[CompOprIdx];
934 if (!OpXRegs[CompOprIdx] || !OpYRegs[CompOprIdx])
935 continue;
936
937 if (getVGPREncodingMSBs(OpXRegs[CompOprIdx], MRI) !=
938 getVGPREncodingMSBs(OpYRegs[CompOprIdx], MRI))
939 return CompOprIdx;
940
941 if (SkipSrc && CompOprIdx >= Component::DST_NUM)
942 continue;
943
944 if (CompOprIdx < Component::DST_NUM) {
945 // Even if we do not check vdst parity, vdst operands still shall not
946 // overlap.
947 if (MRI.regsOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx]))
948 return CompOprIdx;
949 if (VOPD3) // No need to check dst parity.
950 continue;
951 }
952
953 if (banksOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx], BanksMasks) &&
954 (!AllowSameVGPR || CompOprIdx < Component::DST_NUM ||
955 OpXRegs[CompOprIdx] != OpYRegs[CompOprIdx]))
956 return CompOprIdx;
957 }
958
959 return {};
960}
961
962// Return an array of VGPR registers [DST,SRC0,SRC1,SRC2] used
963// by the specified component. If an operand is unused
964// or is not a VGPR, the corresponding value is 0.
965//
966// GetRegIdx(Component, MCOperandIdx) must return a VGPR register index
967// for the specified component and MC operand. The callback must return 0
968// if the operand is not a register or not a VGPR.
970InstInfo::getRegIndices(unsigned CompIdx,
971 std::function<unsigned(unsigned, unsigned)> GetRegIdx,
972 bool VOPD3) const {
973 assert(CompIdx < COMPONENTS_NUM);
974
975 const auto &Comp = CompInfo[CompIdx];
977
978 RegIndices[DST] = GetRegIdx(CompIdx, Comp.getIndexOfDstInMCOperands());
979
980 for (unsigned CompOprIdx : {SRC0, SRC1, SRC2}) {
981 unsigned CompSrcIdx = CompOprIdx - DST_NUM;
982 RegIndices[CompOprIdx] =
983 Comp.hasRegSrcOperand(CompSrcIdx)
984 ? GetRegIdx(CompIdx,
985 Comp.getIndexOfSrcInMCOperands(CompSrcIdx, VOPD3))
986 : 0;
987 }
988 return RegIndices;
989}
990
991} // namespace VOPD
992
994 return VOPD::InstInfo(OpX, OpY);
995}
996
997VOPD::InstInfo getVOPDInstInfo(unsigned VOPDOpcode,
998 const MCInstrInfo *InstrInfo) {
999 auto [OpX, OpY] = getVOPDComponents(VOPDOpcode);
1000 const auto &OpXDesc = InstrInfo->get(OpX);
1001 const auto &OpYDesc = InstrInfo->get(OpY);
1002 bool VOPD3 = InstrInfo->get(VOPDOpcode).TSFlags & SIInstrFlags::VOPD3;
1004 VOPD::ComponentInfo OpYInfo(OpYDesc, OpXInfo, VOPD3);
1005 return VOPD::InstInfo(OpXInfo, OpYInfo);
1006}
1007
1008namespace IsaInfo {
1009
1011 : STI(STI), XnackSetting(TargetIDSetting::Any),
1012 SramEccSetting(TargetIDSetting::Any) {
1013 if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
1014 XnackSetting = TargetIDSetting::Unsupported;
1015 if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
1016 SramEccSetting = TargetIDSetting::Unsupported;
1017}
1018
1020 // Check if xnack or sramecc is explicitly enabled or disabled. In the
1021 // absence of the target features we assume we must generate code that can run
1022 // in any environment.
1023 SubtargetFeatures Features(FS);
1024 std::optional<bool> XnackRequested;
1025 std::optional<bool> SramEccRequested;
1026
1027 for (const std::string &Feature : Features.getFeatures()) {
1028 if (Feature == "+xnack")
1029 XnackRequested = true;
1030 else if (Feature == "-xnack")
1031 XnackRequested = false;
1032 else if (Feature == "+sramecc")
1033 SramEccRequested = true;
1034 else if (Feature == "-sramecc")
1035 SramEccRequested = false;
1036 }
1037
1038 bool XnackSupported = isXnackSupported();
1039 bool SramEccSupported = isSramEccSupported();
1040
1041 if (XnackRequested) {
1042 if (XnackSupported) {
1043 XnackSetting =
1044 *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off;
1045 } else {
1046 // If a specific xnack setting was requested and this GPU does not support
1047 // xnack emit a warning. Setting will remain set to "Unsupported".
1048 if (*XnackRequested) {
1049 errs() << "warning: xnack 'On' was requested for a processor that does "
1050 "not support it!\n";
1051 } else {
1052 errs() << "warning: xnack 'Off' was requested for a processor that "
1053 "does not support it!\n";
1054 }
1055 }
1056 }
1057
1058 if (SramEccRequested) {
1059 if (SramEccSupported) {
1060 SramEccSetting =
1061 *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off;
1062 } else {
1063 // If a specific sramecc setting was requested and this GPU does not
1064 // support sramecc emit a warning. Setting will remain set to
1065 // "Unsupported".
1066 if (*SramEccRequested) {
1067 errs() << "warning: sramecc 'On' was requested for a processor that "
1068 "does not support it!\n";
1069 } else {
1070 errs() << "warning: sramecc 'Off' was requested for a processor that "
1071 "does not support it!\n";
1072 }
1073 }
1074 }
1075}
1076
1077static TargetIDSetting
1079 if (FeatureString.ends_with("-"))
1080 return TargetIDSetting::Off;
1081 if (FeatureString.ends_with("+"))
1082 return TargetIDSetting::On;
1083
1084 llvm_unreachable("Malformed feature string");
1085}
1086
1088 SmallVector<StringRef, 3> TargetIDSplit;
1089 TargetID.split(TargetIDSplit, ':');
1090
1091 for (const auto &FeatureString : TargetIDSplit) {
1092 if (FeatureString.starts_with("xnack"))
1093 XnackSetting = getTargetIDSettingFromFeatureString(FeatureString);
1094 if (FeatureString.starts_with("sramecc"))
1095 SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString);
1096 }
1097}
1098
1099std::string AMDGPUTargetID::toString() const {
1100 std::string StringRep;
1101 raw_string_ostream StreamRep(StringRep);
1102
1103 auto TargetTriple = STI.getTargetTriple();
1104 auto Version = getIsaVersion(STI.getCPU());
1105
1106 StreamRep << TargetTriple.getArchName() << '-' << TargetTriple.getVendorName()
1107 << '-' << TargetTriple.getOSName() << '-'
1108 << TargetTriple.getEnvironmentName() << '-';
1109
1110 std::string Processor;
1111 // TODO: Following else statement is present here because we used various
1112 // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803').
1113 // Remove once all aliases are removed from GCNProcessors.td.
1114 if (Version.Major >= 9)
1115 Processor = STI.getCPU().str();
1116 else
1117 Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) +
1118 Twine(Version.Stepping))
1119 .str();
1120
1121 std::string Features;
1122 if (STI.getTargetTriple().getOS() == Triple::AMDHSA) {
1123 // sramecc.
1125 Features += ":sramecc-";
1127 Features += ":sramecc+";
1128 // xnack.
1130 Features += ":xnack-";
1132 Features += ":xnack+";
1133 }
1134
1135 StreamRep << Processor << Features;
1136
1137 return StringRep;
1138}
1139
1140unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
1141 if (STI->getFeatureBits().test(FeatureWavefrontSize16))
1142 return 16;
1143 if (STI->getFeatureBits().test(FeatureWavefrontSize32))
1144 return 32;
1145
1146 return 64;
1147}
1148
1150 unsigned BytesPerCU = getAddressableLocalMemorySize(STI);
1151
1152 // "Per CU" really means "per whatever functional block the waves of a
1153 // workgroup must share". So the effective local memory size is doubled in
1154 // WGP mode on gfx10.
1155 if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
1156 BytesPerCU *= 2;
1157
1158 return BytesPerCU;
1159}
1160
1162 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize32768))
1163 return 32768;
1164 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize65536))
1165 return 65536;
1166 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize163840))
1167 return 163840;
1168 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize327680))
1169 return 327680;
1170 return 32768;
1171}
1172
1173unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
1174 // "Per CU" really means "per whatever functional block the waves of a
1175 // workgroup must share".
1176
1177 // GFX12.5 only supports CU mode, which contains four SIMDs.
1178 if (isGFX1250(*STI)) {
1179 assert(STI->getFeatureBits().test(FeatureCuMode));
1180 return 4;
1181 }
1182
1183 // For gfx10 in CU mode the functional block is the CU, which contains
1184 // two SIMDs.
1185 if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode))
1186 return 2;
1187
1188 // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP
1189 // contains two CUs, so a total of four SIMDs.
1190 return 4;
1191}
1192
1194 unsigned FlatWorkGroupSize) {
1195 assert(FlatWorkGroupSize != 0);
1196 if (!STI->getTargetTriple().isAMDGCN())
1197 return 8;
1198 unsigned MaxWaves = getMaxWavesPerEU(STI) * getEUsPerCU(STI);
1199 unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
1200 if (N == 1) {
1201 // Single-wave workgroups don't consume barrier resources.
1202 return MaxWaves;
1203 }
1204
1205 unsigned MaxBarriers = 16;
1206 if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
1207 MaxBarriers = 32;
1208
1209 return std::min(MaxWaves / N, MaxBarriers);
1210}
1211
1212unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) { return 1; }
1213
1214unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) {
1215 // FIXME: Need to take scratch memory into account.
1216 if (isGFX90A(*STI))
1217 return 8;
1218 if (!isGFX10Plus(*STI))
1219 return 10;
1220 return hasGFX10_3Insts(*STI) ? 16 : 20;
1221}
1222
1224 unsigned FlatWorkGroupSize) {
1225 return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize),
1226 getEUsPerCU(STI));
1227}
1228
1229unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) { return 1; }
1230
1232 // Some subtargets allow encoding 2048, but this isn't tested or supported.
1233 return 1024;
1234}
1235
1237 unsigned FlatWorkGroupSize) {
1238 return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI));
1239}
1240
1243 if (Version.Major >= 10)
1244 return getAddressableNumSGPRs(STI);
1245 if (Version.Major >= 8)
1246 return 16;
1247 return 8;
1248}
1249
1250unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) { return 8; }
1251
1252unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
1254 if (Version.Major >= 8)
1255 return 800;
1256 return 512;
1257}
1258
1260 if (STI->getFeatureBits().test(FeatureSGPRInitBug))
1262
1264 if (Version.Major >= 10)
1265 return 106;
1266 if (Version.Major >= 8)
1267 return 102;
1268 return 104;
1269}
1270
1271unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
1272 assert(WavesPerEU != 0);
1273
1275 if (Version.Major >= 10)
1276 return 0;
1277
1278 if (WavesPerEU >= getMaxWavesPerEU(STI))
1279 return 0;
1280
1281 unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
1282 if (STI->getFeatureBits().test(FeatureTrapHandler))
1283 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
1284 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
1285 return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
1286}
1287
1288unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1289 bool Addressable) {
1290 assert(WavesPerEU != 0);
1291
1292 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
1294 if (Version.Major >= 10)
1295 return Addressable ? AddressableNumSGPRs : 108;
1296 if (Version.Major >= 8 && !Addressable)
1297 AddressableNumSGPRs = 112;
1298 unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
1299 if (STI->getFeatureBits().test(FeatureTrapHandler))
1300 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
1301 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
1302 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
1303}
1304
1305unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1306 bool FlatScrUsed, bool XNACKUsed) {
1307 unsigned ExtraSGPRs = 0;
1308 if (VCCUsed)
1309 ExtraSGPRs = 2;
1310
1312 if (Version.Major >= 10)
1313 return ExtraSGPRs;
1314
1315 if (Version.Major < 8) {
1316 if (FlatScrUsed)
1317 ExtraSGPRs = 4;
1318 } else {
1319 if (XNACKUsed)
1320 ExtraSGPRs = 4;
1321
1322 if (FlatScrUsed ||
1323 STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch))
1324 ExtraSGPRs = 6;
1325 }
1326
1327 return ExtraSGPRs;
1328}
1329
1330unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1331 bool FlatScrUsed) {
1332 return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
1333 STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
1334}
1335
1336static unsigned getGranulatedNumRegisterBlocks(unsigned NumRegs,
1337 unsigned Granule) {
1338 return divideCeil(std::max(1u, NumRegs), Granule);
1339}
1340
1341unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
1342 // SGPRBlocks is actual number of SGPR blocks minus 1.
1344 1;
1345}
1346
1348 unsigned DynamicVGPRBlockSize,
1349 std::optional<bool> EnableWavefrontSize32) {
1350 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1351 return 8;
1352
1353 if (DynamicVGPRBlockSize != 0)
1354 return DynamicVGPRBlockSize;
1355
1356 bool IsWave32 = EnableWavefrontSize32
1357 ? *EnableWavefrontSize32
1358 : STI->getFeatureBits().test(FeatureWavefrontSize32);
1359
1360 if (STI->getFeatureBits().test(Feature1_5xVGPRs))
1361 return IsWave32 ? 24 : 12;
1362
1363 if (hasGFX10_3Insts(*STI))
1364 return IsWave32 ? 16 : 8;
1365
1366 return IsWave32 ? 8 : 4;
1367}
1368
1370 std::optional<bool> EnableWavefrontSize32) {
1371 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1372 return 8;
1373
1374 bool IsWave32 = EnableWavefrontSize32
1375 ? *EnableWavefrontSize32
1376 : STI->getFeatureBits().test(FeatureWavefrontSize32);
1377
1378 if (STI->getFeatureBits().test(Feature1024AddressableVGPRs))
1379 return IsWave32 ? 16 : 8;
1380
1381 return IsWave32 ? 8 : 4;
1382}
1383
1384unsigned getArchVGPRAllocGranule() { return 4; }
1385
1386unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
1387 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1388 return 512;
1389 if (!isGFX10Plus(*STI))
1390 return 256;
1391 bool IsWave32 = STI->getFeatureBits().test(FeatureWavefrontSize32);
1392 if (STI->getFeatureBits().test(Feature1_5xVGPRs))
1393 return IsWave32 ? 1536 : 768;
1394 return IsWave32 ? 1024 : 512;
1395}
1396
1398 const auto &Features = STI->getFeatureBits();
1399 if (Features.test(Feature1024AddressableVGPRs))
1400 return Features.test(FeatureWavefrontSize32) ? 1024 : 512;
1401 return 256;
1402}
1403
1405 unsigned DynamicVGPRBlockSize) {
1406 const auto &Features = STI->getFeatureBits();
1407 if (Features.test(FeatureGFX90AInsts))
1408 return 512;
1409
1410 if (DynamicVGPRBlockSize != 0)
1411 // On GFX12 we can allocate at most 8 blocks of VGPRs.
1412 return 8 * getVGPRAllocGranule(STI, DynamicVGPRBlockSize);
1413 return getAddressableNumArchVGPRs(STI);
1414}
1415
1417 unsigned NumVGPRs,
1418 unsigned DynamicVGPRBlockSize) {
1420 NumVGPRs, getVGPRAllocGranule(STI, DynamicVGPRBlockSize),
1422}
1423
1424unsigned getNumWavesPerEUWithNumVGPRs(unsigned NumVGPRs, unsigned Granule,
1425 unsigned MaxWaves,
1426 unsigned TotalNumVGPRs) {
1427 if (NumVGPRs < Granule)
1428 return MaxWaves;
1429 unsigned RoundedRegs = alignTo(NumVGPRs, Granule);
1430 return std::min(std::max(TotalNumVGPRs / RoundedRegs, 1u), MaxWaves);
1431}
1432
1433unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves,
1435 if (Gen >= AMDGPUSubtarget::GFX10)
1436 return MaxWaves;
1437
1439 if (SGPRs <= 80)
1440 return 10;
1441 if (SGPRs <= 88)
1442 return 9;
1443 if (SGPRs <= 100)
1444 return 8;
1445 return 7;
1446 }
1447 if (SGPRs <= 48)
1448 return 10;
1449 if (SGPRs <= 56)
1450 return 9;
1451 if (SGPRs <= 64)
1452 return 8;
1453 if (SGPRs <= 72)
1454 return 7;
1455 if (SGPRs <= 80)
1456 return 6;
1457 return 5;
1458}
1459
1460unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1461 unsigned DynamicVGPRBlockSize) {
1462 assert(WavesPerEU != 0);
1463
1464 unsigned MaxWavesPerEU = getMaxWavesPerEU(STI);
1465 if (WavesPerEU >= MaxWavesPerEU)
1466 return 0;
1467
1468 unsigned TotNumVGPRs = getTotalNumVGPRs(STI);
1469 unsigned AddrsableNumVGPRs =
1470 getAddressableNumVGPRs(STI, DynamicVGPRBlockSize);
1471 unsigned Granule = getVGPRAllocGranule(STI, DynamicVGPRBlockSize);
1472 unsigned MaxNumVGPRs = alignDown(TotNumVGPRs / WavesPerEU, Granule);
1473
1474 if (MaxNumVGPRs == alignDown(TotNumVGPRs / MaxWavesPerEU, Granule))
1475 return 0;
1476
1477 unsigned MinWavesPerEU = getNumWavesPerEUWithNumVGPRs(STI, AddrsableNumVGPRs,
1478 DynamicVGPRBlockSize);
1479 if (WavesPerEU < MinWavesPerEU)
1480 return getMinNumVGPRs(STI, MinWavesPerEU, DynamicVGPRBlockSize);
1481
1482 unsigned MaxNumVGPRsNext = alignDown(TotNumVGPRs / (WavesPerEU + 1), Granule);
1483 unsigned MinNumVGPRs = 1 + std::min(MaxNumVGPRs - Granule, MaxNumVGPRsNext);
1484 return std::min(MinNumVGPRs, AddrsableNumVGPRs);
1485}
1486
1487unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1488 unsigned DynamicVGPRBlockSize) {
1489 assert(WavesPerEU != 0);
1490
1491 unsigned MaxNumVGPRs =
1492 alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
1493 getVGPRAllocGranule(STI, DynamicVGPRBlockSize));
1494 unsigned AddressableNumVGPRs =
1495 getAddressableNumVGPRs(STI, DynamicVGPRBlockSize);
1496 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
1497}
1498
1499unsigned getEncodedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
1500 std::optional<bool> EnableWavefrontSize32) {
1502 NumVGPRs, getVGPREncodingGranule(STI, EnableWavefrontSize32)) -
1503 1;
1504}
1505
1507 unsigned NumVGPRs,
1508 unsigned DynamicVGPRBlockSize,
1509 std::optional<bool> EnableWavefrontSize32) {
1511 NumVGPRs,
1512 getVGPRAllocGranule(STI, DynamicVGPRBlockSize, EnableWavefrontSize32));
1513}
1514} // end namespace IsaInfo
1515
1517 const MCSubtargetInfo *STI) {
1519 KernelCode.amd_kernel_code_version_major = 1;
1520 KernelCode.amd_kernel_code_version_minor = 2;
1521 KernelCode.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
1522 KernelCode.amd_machine_version_major = Version.Major;
1523 KernelCode.amd_machine_version_minor = Version.Minor;
1524 KernelCode.amd_machine_version_stepping = Version.Stepping;
1526 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
1527 KernelCode.wavefront_size = 5;
1529 } else {
1530 KernelCode.wavefront_size = 6;
1531 }
1532
1533 // If the code object does not support indirect functions, then the value must
1534 // be 0xffffffff.
1535 KernelCode.call_convention = -1;
1536
1537 // These alignment values are specified in powers of two, so alignment =
1538 // 2^n. The minimum alignment is 2^4 = 16.
1539 KernelCode.kernarg_segment_alignment = 4;
1540 KernelCode.group_segment_alignment = 4;
1541 KernelCode.private_segment_alignment = 4;
1542
1543 if (Version.Major >= 10) {
1544 KernelCode.compute_pgm_resource_registers |=
1545 S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
1547 }
1548}
1549
1552}
1553
1556}
1557
1559 unsigned AS = GV->getAddressSpace();
1560 return AS == AMDGPUAS::CONSTANT_ADDRESS ||
1562}
1563
1565 return TT.getArch() == Triple::r600;
1566}
1567
1568static bool isValidRegPrefix(char C) {
1569 return C == 'v' || C == 's' || C == 'a';
1570}
1571
1572std::tuple<char, unsigned, unsigned> parseAsmPhysRegName(StringRef RegName) {
1573 char Kind = RegName.front();
1574 if (!isValidRegPrefix(Kind))
1575 return {};
1576
1577 RegName = RegName.drop_front();
1578 if (RegName.consume_front("[")) {
1579 unsigned Idx, End;
1580 bool Failed = RegName.consumeInteger(10, Idx);
1581 Failed |= !RegName.consume_front(":");
1582 Failed |= RegName.consumeInteger(10, End);
1583 Failed |= !RegName.consume_back("]");
1584 if (!Failed) {
1585 unsigned NumRegs = End - Idx + 1;
1586 if (NumRegs > 1)
1587 return {Kind, Idx, NumRegs};
1588 }
1589 } else {
1590 unsigned Idx;
1591 bool Failed = RegName.getAsInteger(10, Idx);
1592 if (!Failed)
1593 return {Kind, Idx, 1};
1594 }
1595
1596 return {};
1597}
1598
1599std::tuple<char, unsigned, unsigned>
1601 StringRef RegName = Constraint;
1602 if (!RegName.consume_front("{") || !RegName.consume_back("}"))
1603 return {};
1605}
1606
1607std::pair<unsigned, unsigned>
1609 std::pair<unsigned, unsigned> Default,
1610 bool OnlyFirstRequired) {
1611 if (auto Attr = getIntegerPairAttribute(F, Name, OnlyFirstRequired))
1612 return {Attr->first, Attr->second.value_or(Default.second)};
1613 return Default;
1614}
1615
1616std::optional<std::pair<unsigned, std::optional<unsigned>>>
1618 bool OnlyFirstRequired) {
1619 Attribute A = F.getFnAttribute(Name);
1620 if (!A.isStringAttribute())
1621 return std::nullopt;
1622
1623 LLVMContext &Ctx = F.getContext();
1624 std::pair<unsigned, std::optional<unsigned>> Ints;
1625 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
1626 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
1627 Ctx.emitError("can't parse first integer attribute " + Name);
1628 return std::nullopt;
1629 }
1630 unsigned Second = 0;
1631 if (Strs.second.trim().getAsInteger(0, Second)) {
1632 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
1633 Ctx.emitError("can't parse second integer attribute " + Name);
1634 return std::nullopt;
1635 }
1636 } else {
1637 Ints.second = Second;
1638 }
1639
1640 return Ints;
1641}
1642
1644 unsigned Size,
1645 unsigned DefaultVal) {
1646 std::optional<SmallVector<unsigned>> R =
1648 return R.has_value() ? *R : SmallVector<unsigned>(Size, DefaultVal);
1649}
1650
1651std::optional<SmallVector<unsigned>>
1653 assert(Size > 2);
1654 LLVMContext &Ctx = F.getContext();
1655
1656 Attribute A = F.getFnAttribute(Name);
1657 if (!A.isValid())
1658 return std::nullopt;
1659 if (!A.isStringAttribute()) {
1660 Ctx.emitError(Name + " is not a string attribute");
1661 return std::nullopt;
1662 }
1663
1665
1666 StringRef S = A.getValueAsString();
1667 unsigned i = 0;
1668 for (; !S.empty() && i < Size; i++) {
1669 std::pair<StringRef, StringRef> Strs = S.split(',');
1670 unsigned IntVal;
1671 if (Strs.first.trim().getAsInteger(0, IntVal)) {
1672 Ctx.emitError("can't parse integer attribute " + Strs.first + " in " +
1673 Name);
1674 return std::nullopt;
1675 }
1676 Vals[i] = IntVal;
1677 S = Strs.second;
1678 }
1679
1680 if (!S.empty() || i < Size) {
1681 Ctx.emitError("attribute " + Name +
1682 " has incorrect number of integers; expected " +
1684 return std::nullopt;
1685 }
1686 return Vals;
1687}
1688
1689bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val) {
1690 assert((MD.getNumOperands() % 2 == 0) && "invalid number of operands!");
1691 for (unsigned I = 0, E = MD.getNumOperands() / 2; I != E; ++I) {
1692 auto Low =
1693 mdconst::extract<ConstantInt>(MD.getOperand(2 * I + 0))->getValue();
1694 auto High =
1695 mdconst::extract<ConstantInt>(MD.getOperand(2 * I + 1))->getValue();
1696 // There are two types of [A; B) ranges:
1697 // A < B, e.g. [4; 5) which is a range that only includes 4.
1698 // A > B, e.g. [5; 4) which is a range that wraps around and includes
1699 // everything except 4.
1700 if (Low.ult(High)) {
1701 if (Low.ule(Val) && High.ugt(Val))
1702 return true;
1703 } else {
1704 if (Low.uge(Val) && High.ult(Val))
1705 return true;
1706 }
1707 }
1708
1709 return false;
1710}
1711
1713 return (1 << (getVmcntBitWidthLo(Version.Major) +
1714 getVmcntBitWidthHi(Version.Major))) -
1715 1;
1716}
1717
1719 return (1 << getLoadcntBitWidth(Version.Major)) - 1;
1720}
1721
1723 return (1 << getSamplecntBitWidth(Version.Major)) - 1;
1724}
1725
1727 return (1 << getBvhcntBitWidth(Version.Major)) - 1;
1728}
1729
1731 return (1 << getExpcntBitWidth(Version.Major)) - 1;
1732}
1733
1735 return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
1736}
1737
1739 return (1 << getDscntBitWidth(Version.Major)) - 1;
1740}
1741
1743 return (1 << getKmcntBitWidth(Version.Major)) - 1;
1744}
1745
1747 return (1 << getXcntBitWidth(Version.Major, Version.Minor)) - 1;
1748}
1749
1751 return (1 << getStorecntBitWidth(Version.Major)) - 1;
1752}
1753
1755 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(Version.Major),
1756 getVmcntBitWidthLo(Version.Major));
1757 unsigned Expcnt = getBitMask(getExpcntBitShift(Version.Major),
1758 getExpcntBitWidth(Version.Major));
1759 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(Version.Major),
1760 getLgkmcntBitWidth(Version.Major));
1761 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(Version.Major),
1762 getVmcntBitWidthHi(Version.Major));
1763 return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
1764}
1765
1766unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1767 unsigned VmcntLo = unpackBits(Waitcnt, getVmcntBitShiftLo(Version.Major),
1768 getVmcntBitWidthLo(Version.Major));
1769 unsigned VmcntHi = unpackBits(Waitcnt, getVmcntBitShiftHi(Version.Major),
1770 getVmcntBitWidthHi(Version.Major));
1771 return VmcntLo | VmcntHi << getVmcntBitWidthLo(Version.Major);
1772}
1773
1774unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
1775 return unpackBits(Waitcnt, getExpcntBitShift(Version.Major),
1776 getExpcntBitWidth(Version.Major));
1777}
1778
1779unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1780 return unpackBits(Waitcnt, getLgkmcntBitShift(Version.Major),
1781 getLgkmcntBitWidth(Version.Major));
1782}
1783
1784void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt,
1785 unsigned &Expcnt, unsigned &Lgkmcnt) {
1786 Vmcnt = decodeVmcnt(Version, Waitcnt);
1787 Expcnt = decodeExpcnt(Version, Waitcnt);
1788 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
1789}
1790
1791Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
1792 Waitcnt Decoded;
1793 Decoded.LoadCnt = decodeVmcnt(Version, Encoded);
1794 Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
1795 Decoded.DsCnt = decodeLgkmcnt(Version, Encoded);
1796 return Decoded;
1797}
1798
1799unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
1800 unsigned Vmcnt) {
1801 Waitcnt = packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(Version.Major),
1802 getVmcntBitWidthLo(Version.Major));
1803 return packBits(Vmcnt >> getVmcntBitWidthLo(Version.Major), Waitcnt,
1804 getVmcntBitShiftHi(Version.Major),
1805 getVmcntBitWidthHi(Version.Major));
1806}
1807
1808unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
1809 unsigned Expcnt) {
1810 return packBits(Expcnt, Waitcnt, getExpcntBitShift(Version.Major),
1811 getExpcntBitWidth(Version.Major));
1812}
1813
1814unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
1815 unsigned Lgkmcnt) {
1816 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(Version.Major),
1817 getLgkmcntBitWidth(Version.Major));
1818}
1819
1820unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt,
1821 unsigned Expcnt, unsigned Lgkmcnt) {
1822 unsigned Waitcnt = getWaitcntBitMask(Version);
1824 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
1825 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
1826 return Waitcnt;
1827}
1828
1829unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1830 return encodeWaitcnt(Version, Decoded.LoadCnt, Decoded.ExpCnt, Decoded.DsCnt);
1831}
1832
1834 bool IsStore) {
1835 unsigned Dscnt = getBitMask(getDscntBitShift(Version.Major),
1836 getDscntBitWidth(Version.Major));
1837 if (IsStore) {
1838 unsigned Storecnt = getBitMask(getLoadcntStorecntBitShift(Version.Major),
1839 getStorecntBitWidth(Version.Major));
1840 return Dscnt | Storecnt;
1841 }
1842 unsigned Loadcnt = getBitMask(getLoadcntStorecntBitShift(Version.Major),
1843 getLoadcntBitWidth(Version.Major));
1844 return Dscnt | Loadcnt;
1845}
1846
1847Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt) {
1848 Waitcnt Decoded;
1849 Decoded.LoadCnt =
1850 unpackBits(LoadcntDscnt, getLoadcntStorecntBitShift(Version.Major),
1851 getLoadcntBitWidth(Version.Major));
1852 Decoded.DsCnt = unpackBits(LoadcntDscnt, getDscntBitShift(Version.Major),
1853 getDscntBitWidth(Version.Major));
1854 return Decoded;
1855}
1856
1857Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt) {
1858 Waitcnt Decoded;
1859 Decoded.StoreCnt =
1860 unpackBits(StorecntDscnt, getLoadcntStorecntBitShift(Version.Major),
1861 getStorecntBitWidth(Version.Major));
1862 Decoded.DsCnt = unpackBits(StorecntDscnt, getDscntBitShift(Version.Major),
1863 getDscntBitWidth(Version.Major));
1864 return Decoded;
1865}
1866
1867static unsigned encodeLoadcnt(const IsaVersion &Version, unsigned Waitcnt,
1868 unsigned Loadcnt) {
1869 return packBits(Loadcnt, Waitcnt, getLoadcntStorecntBitShift(Version.Major),
1870 getLoadcntBitWidth(Version.Major));
1871}
1872
1873static unsigned encodeStorecnt(const IsaVersion &Version, unsigned Waitcnt,
1874 unsigned Storecnt) {
1875 return packBits(Storecnt, Waitcnt, getLoadcntStorecntBitShift(Version.Major),
1876 getStorecntBitWidth(Version.Major));
1877}
1878
1879static unsigned encodeDscnt(const IsaVersion &Version, unsigned Waitcnt,
1880 unsigned Dscnt) {
1881 return packBits(Dscnt, Waitcnt, getDscntBitShift(Version.Major),
1882 getDscntBitWidth(Version.Major));
1883}
1884
1885static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt,
1886 unsigned Dscnt) {
1887 unsigned Waitcnt = getCombinedCountBitMask(Version, false);
1888 Waitcnt = encodeLoadcnt(Version, Waitcnt, Loadcnt);
1890 return Waitcnt;
1891}
1892
1893unsigned encodeLoadcntDscnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1894 return encodeLoadcntDscnt(Version, Decoded.LoadCnt, Decoded.DsCnt);
1895}
1896
1898 unsigned Storecnt, unsigned Dscnt) {
1899 unsigned Waitcnt = getCombinedCountBitMask(Version, true);
1900 Waitcnt = encodeStorecnt(Version, Waitcnt, Storecnt);
1902 return Waitcnt;
1903}
1904
1906 const Waitcnt &Decoded) {
1907 return encodeStorecntDscnt(Version, Decoded.StoreCnt, Decoded.DsCnt);
1908}
1909
1910//===----------------------------------------------------------------------===//
1911// Custom Operand Values
1912//===----------------------------------------------------------------------===//
1913
1915 int Size,
1916 const MCSubtargetInfo &STI) {
1917 unsigned Enc = 0;
1918 for (int Idx = 0; Idx < Size; ++Idx) {
1919 const auto &Op = Opr[Idx];
1920 if (Op.isSupported(STI))
1921 Enc |= Op.encode(Op.Default);
1922 }
1923 return Enc;
1924}
1925
1927 int Size, unsigned Code,
1928 bool &HasNonDefaultVal,
1929 const MCSubtargetInfo &STI) {
1930 unsigned UsedOprMask = 0;
1931 HasNonDefaultVal = false;
1932 for (int Idx = 0; Idx < Size; ++Idx) {
1933 const auto &Op = Opr[Idx];
1934 if (!Op.isSupported(STI))
1935 continue;
1936 UsedOprMask |= Op.getMask();
1937 unsigned Val = Op.decode(Code);
1938 if (!Op.isValid(Val))
1939 return false;
1940 HasNonDefaultVal |= (Val != Op.Default);
1941 }
1942 return (Code & ~UsedOprMask) == 0;
1943}
1944
1945static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size,
1946 unsigned Code, int &Idx, StringRef &Name,
1947 unsigned &Val, bool &IsDefault,
1948 const MCSubtargetInfo &STI) {
1949 while (Idx < Size) {
1950 const auto &Op = Opr[Idx++];
1951 if (Op.isSupported(STI)) {
1952 Name = Op.Name;
1953 Val = Op.decode(Code);
1954 IsDefault = (Val == Op.Default);
1955 return true;
1956 }
1957 }
1958
1959 return false;
1960}
1961
1963 int64_t InputVal) {
1964 if (InputVal < 0 || InputVal > Op.Max)
1965 return OPR_VAL_INVALID;
1966 return Op.encode(InputVal);
1967}
1968
1969static int encodeCustomOperand(const CustomOperandVal *Opr, int Size,
1970 const StringRef Name, int64_t InputVal,
1971 unsigned &UsedOprMask,
1972 const MCSubtargetInfo &STI) {
1973 int InvalidId = OPR_ID_UNKNOWN;
1974 for (int Idx = 0; Idx < Size; ++Idx) {
1975 const auto &Op = Opr[Idx];
1976 if (Op.Name == Name) {
1977 if (!Op.isSupported(STI)) {
1978 InvalidId = OPR_ID_UNSUPPORTED;
1979 continue;
1980 }
1981 auto OprMask = Op.getMask();
1982 if (OprMask & UsedOprMask)
1983 return OPR_ID_DUPLICATE;
1984 UsedOprMask |= OprMask;
1985 return encodeCustomOperandVal(Op, InputVal);
1986 }
1987 }
1988 return InvalidId;
1989}
1990
1991//===----------------------------------------------------------------------===//
1992// DepCtr
1993//===----------------------------------------------------------------------===//
1994
1995namespace DepCtr {
1996
1998 static int Default = -1;
1999 if (Default == -1)
2001 return Default;
2002}
2003
2004bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal,
2005 const MCSubtargetInfo &STI) {
2007 HasNonDefaultVal, STI);
2008}
2009
2010bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val,
2011 bool &IsDefault, const MCSubtargetInfo &STI) {
2012 return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val,
2013 IsDefault, STI);
2014}
2015
2016int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask,
2017 const MCSubtargetInfo &STI) {
2018 return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask,
2019 STI);
2020}
2021
2022unsigned decodeFieldVmVsrc(unsigned Encoded) {
2023 return unpackBits(Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2024}
2025
2026unsigned decodeFieldVaVdst(unsigned Encoded) {
2027 return unpackBits(Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2028}
2029
2030unsigned decodeFieldSaSdst(unsigned Encoded) {
2031 return unpackBits(Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2032}
2033
2034unsigned decodeFieldVaSdst(unsigned Encoded) {
2035 return unpackBits(Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2036}
2037
2038unsigned decodeFieldVaVcc(unsigned Encoded) {
2039 return unpackBits(Encoded, getVaVccBitShift(), getVaVccBitWidth());
2040}
2041
2042unsigned decodeFieldVaSsrc(unsigned Encoded) {
2043 return unpackBits(Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2044}
2045
2046unsigned decodeFieldHoldCnt(unsigned Encoded) {
2047 return unpackBits(Encoded, getHoldCntBitShift(), getHoldCntWidth());
2048}
2049
2050unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc) {
2051 return packBits(VmVsrc, Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2052}
2053
2054unsigned encodeFieldVmVsrc(unsigned VmVsrc) {
2055 return encodeFieldVmVsrc(0xffff, VmVsrc);
2056}
2057
2058unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst) {
2059 return packBits(VaVdst, Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2060}
2061
2062unsigned encodeFieldVaVdst(unsigned VaVdst) {
2063 return encodeFieldVaVdst(0xffff, VaVdst);
2064}
2065
2066unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst) {
2067 return packBits(SaSdst, Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2068}
2069
2070unsigned encodeFieldSaSdst(unsigned SaSdst) {
2071 return encodeFieldSaSdst(0xffff, SaSdst);
2072}
2073
2074unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst) {
2075 return packBits(VaSdst, Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2076}
2077
2078unsigned encodeFieldVaSdst(unsigned VaSdst) {
2079 return encodeFieldVaSdst(0xffff, VaSdst);
2080}
2081
2082unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc) {
2083 return packBits(VaVcc, Encoded, getVaVccBitShift(), getVaVccBitWidth());
2084}
2085
2086unsigned encodeFieldVaVcc(unsigned VaVcc) {
2087 return encodeFieldVaVcc(0xffff, VaVcc);
2088}
2089
2090unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc) {
2091 return packBits(VaSsrc, Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2092}
2093
2094unsigned encodeFieldVaSsrc(unsigned VaSsrc) {
2095 return encodeFieldVaSsrc(0xffff, VaSsrc);
2096}
2097
2098unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt) {
2099 return packBits(HoldCnt, Encoded, getHoldCntBitShift(), getHoldCntWidth());
2100}
2101
2102unsigned encodeFieldHoldCnt(unsigned HoldCnt) {
2103 return encodeFieldHoldCnt(0xffff, HoldCnt);
2104}
2105
2106} // namespace DepCtr
2107
2108//===----------------------------------------------------------------------===//
2109// exp tgt
2110//===----------------------------------------------------------------------===//
2111
2112namespace Exp {
2113
2114struct ExpTgt {
2116 unsigned Tgt;
2117 unsigned MaxIndex;
2118};
2119
2120// clang-format off
2121static constexpr ExpTgt ExpTgtInfo[] = {
2122 {{"null"}, ET_NULL, ET_NULL_MAX_IDX},
2123 {{"mrtz"}, ET_MRTZ, ET_MRTZ_MAX_IDX},
2124 {{"prim"}, ET_PRIM, ET_PRIM_MAX_IDX},
2125 {{"mrt"}, ET_MRT0, ET_MRT_MAX_IDX},
2126 {{"pos"}, ET_POS0, ET_POS_MAX_IDX},
2127 {{"dual_src_blend"},ET_DUAL_SRC_BLEND0, ET_DUAL_SRC_BLEND_MAX_IDX},
2128 {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX},
2129};
2130// clang-format on
2131
2132bool getTgtName(unsigned Id, StringRef &Name, int &Index) {
2133 for (const ExpTgt &Val : ExpTgtInfo) {
2134 if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
2135 Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
2136 Name = Val.Name;
2137 return true;
2138 }
2139 }
2140 return false;
2141}
2142
2143unsigned getTgtId(const StringRef Name) {
2144
2145 for (const ExpTgt &Val : ExpTgtInfo) {
2146 if (Val.MaxIndex == 0 && Name == Val.Name)
2147 return Val.Tgt;
2148
2149 if (Val.MaxIndex > 0 && Name.starts_with(Val.Name)) {
2150 StringRef Suffix = Name.drop_front(Val.Name.size());
2151
2152 unsigned Id;
2153 if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex)
2154 return ET_INVALID;
2155
2156 // Disable leading zeroes
2157 if (Suffix.size() > 1 && Suffix[0] == '0')
2158 return ET_INVALID;
2159
2160 return Val.Tgt + Id;
2161 }
2162 }
2163 return ET_INVALID;
2164}
2165
2166bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) {
2167 switch (Id) {
2168 case ET_NULL:
2169 return !isGFX11Plus(STI);
2170 case ET_POS4:
2171 case ET_PRIM:
2172 return isGFX10Plus(STI);
2173 case ET_DUAL_SRC_BLEND0:
2174 case ET_DUAL_SRC_BLEND1:
2175 return isGFX11Plus(STI);
2176 default:
2177 if (Id >= ET_PARAM0 && Id <= ET_PARAM31)
2178 return !isGFX11Plus(STI);
2179 return true;
2180 }
2181}
2182
2183} // namespace Exp
2184
2185//===----------------------------------------------------------------------===//
2186// MTBUF Format
2187//===----------------------------------------------------------------------===//
2188
2189namespace MTBUFFormat {
2190
2191int64_t getDfmt(const StringRef Name) {
2192 for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) {
2193 if (Name == DfmtSymbolic[Id])
2194 return Id;
2195 }
2196 return DFMT_UNDEF;
2197}
2198
2200 assert(Id <= DFMT_MAX);
2201 return DfmtSymbolic[Id];
2202}
2203
2205 if (isSI(STI) || isCI(STI))
2206 return NfmtSymbolicSICI;
2207 if (isVI(STI) || isGFX9(STI))
2208 return NfmtSymbolicVI;
2209 return NfmtSymbolicGFX10;
2210}
2211
2212int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) {
2213 const auto *lookupTable = getNfmtLookupTable(STI);
2214 for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) {
2215 if (Name == lookupTable[Id])
2216 return Id;
2217 }
2218 return NFMT_UNDEF;
2219}
2220
2221StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) {
2222 assert(Id <= NFMT_MAX);
2223 return getNfmtLookupTable(STI)[Id];
2224}
2225
2226bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) {
2227 unsigned Dfmt;
2228 unsigned Nfmt;
2229 decodeDfmtNfmt(Id, Dfmt, Nfmt);
2230 return isValidNfmt(Nfmt, STI);
2231}
2232
2233bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) {
2234 return !getNfmtName(Id, STI).empty();
2235}
2236
2237int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) {
2238 return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT);
2239}
2240
2241void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) {
2242 Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK;
2243 Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK;
2244}
2245
2246int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI) {
2247 if (isGFX11Plus(STI)) {
2248 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
2249 if (Name == UfmtSymbolicGFX11[Id])
2250 return Id;
2251 }
2252 } else {
2253 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
2254 if (Name == UfmtSymbolicGFX10[Id])
2255 return Id;
2256 }
2257 }
2258 return UFMT_UNDEF;
2259}
2260
2262 if (isValidUnifiedFormat(Id, STI))
2263 return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id];
2264 return "";
2265}
2266
2267bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI) {
2268 return isGFX10(STI) ? Id <= UfmtGFX10::UFMT_LAST : Id <= UfmtGFX11::UFMT_LAST;
2269}
2270
2271int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt,
2272 const MCSubtargetInfo &STI) {
2273 int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt);
2274 if (isGFX11Plus(STI)) {
2275 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
2276 if (Fmt == DfmtNfmt2UFmtGFX11[Id])
2277 return Id;
2278 }
2279 } else {
2280 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
2281 if (Fmt == DfmtNfmt2UFmtGFX10[Id])
2282 return Id;
2283 }
2284 }
2285 return UFMT_UNDEF;
2286}
2287
2288bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) {
2289 return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX);
2290}
2291
2293 if (isGFX10Plus(STI))
2294 return UFMT_DEFAULT;
2295 return DFMT_NFMT_DEFAULT;
2296}
2297
2298} // namespace MTBUFFormat
2299
2300//===----------------------------------------------------------------------===//
2301// SendMsg
2302//===----------------------------------------------------------------------===//
2303
2304namespace SendMsg {
2305
2309
2310bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI) {
2311 return (MsgId & ~(getMsgIdMask(STI))) == 0;
2312}
2313
2314bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI,
2315 bool Strict) {
2316 assert(isValidMsgId(MsgId, STI));
2317
2318 if (!Strict)
2319 return 0 <= OpId && isUInt<OP_WIDTH_>(OpId);
2320
2321 if (msgRequiresOp(MsgId, STI)) {
2322 if (MsgId == ID_GS_PreGFX11 && OpId == OP_GS_NOP)
2323 return false;
2324
2325 return !getMsgOpName(MsgId, OpId, STI).empty();
2326 }
2327
2328 return OpId == OP_NONE_;
2329}
2330
2331bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
2332 const MCSubtargetInfo &STI, bool Strict) {
2333 assert(isValidMsgOp(MsgId, OpId, STI, Strict));
2334
2335 if (!Strict)
2337
2338 if (!isGFX11Plus(STI)) {
2339 switch (MsgId) {
2340 case ID_GS_PreGFX11:
2343 return (OpId == OP_GS_NOP)
2346 }
2347 }
2348 return StreamId == STREAM_ID_NONE_;
2349}
2350
2351bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI) {
2352 return MsgId == ID_SYSMSG ||
2353 (!isGFX11Plus(STI) &&
2354 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11));
2355}
2356
2357bool msgSupportsStream(int64_t MsgId, int64_t OpId,
2358 const MCSubtargetInfo &STI) {
2359 return !isGFX11Plus(STI) &&
2360 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) &&
2361 OpId != OP_GS_NOP;
2362}
2363
2364void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId,
2365 uint16_t &StreamId, const MCSubtargetInfo &STI) {
2366 MsgId = Val & getMsgIdMask(STI);
2367 if (isGFX11Plus(STI)) {
2368 OpId = 0;
2369 StreamId = 0;
2370 } else {
2371 OpId = (Val & OP_MASK_) >> OP_SHIFT_;
2373 }
2374}
2375
2377 return MsgId | (OpId << OP_SHIFT_) | (StreamId << STREAM_ID_SHIFT_);
2378}
2379
2380} // namespace SendMsg
2381
2382//===----------------------------------------------------------------------===//
2383//
2384//===----------------------------------------------------------------------===//
2385
2387 return F.getFnAttributeAsParsedInteger("InitialPSInputAddr", 0);
2388}
2389
2391 // As a safe default always respond as if PS has color exports.
2392 return F.getFnAttributeAsParsedInteger(
2393 "amdgpu-color-export",
2394 F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0;
2395}
2396
2398 return F.getFnAttributeAsParsedInteger("amdgpu-depth-export", 0) != 0;
2399}
2400
2402 unsigned BlockSize =
2403 F.getFnAttributeAsParsedInteger("amdgpu-dynamic-vgpr-block-size", 0);
2404
2405 if (BlockSize == 16 || BlockSize == 32)
2406 return BlockSize;
2407
2408 return 0;
2409}
2410
2411bool hasXNACK(const MCSubtargetInfo &STI) {
2412 return STI.hasFeature(AMDGPU::FeatureXNACK);
2413}
2414
2415bool hasSRAMECC(const MCSubtargetInfo &STI) {
2416 return STI.hasFeature(AMDGPU::FeatureSRAMECC);
2417}
2418
2420 return STI.hasFeature(AMDGPU::FeatureMIMG_R128) &&
2421 !STI.hasFeature(AMDGPU::FeatureR128A16);
2422}
2423
2424bool hasA16(const MCSubtargetInfo &STI) {
2425 return STI.hasFeature(AMDGPU::FeatureA16);
2426}
2427
2428bool hasG16(const MCSubtargetInfo &STI) {
2429 return STI.hasFeature(AMDGPU::FeatureG16);
2430}
2431
2433 return !STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) && !isCI(STI) &&
2434 !isSI(STI);
2435}
2436
2437bool hasGDS(const MCSubtargetInfo &STI) {
2438 return STI.hasFeature(AMDGPU::FeatureGDS);
2439}
2440
2441unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler) {
2442 auto Version = getIsaVersion(STI.getCPU());
2443 if (Version.Major == 10)
2444 return Version.Minor >= 3 ? 13 : 5;
2445 if (Version.Major == 11)
2446 return 5;
2447 if (Version.Major >= 12)
2448 return HasSampler ? 4 : 5;
2449 return 0;
2450}
2451
2453 if (isGFX1250(STI))
2454 return 32;
2455 return 16;
2456}
2457
2458bool isSI(const MCSubtargetInfo &STI) {
2459 return STI.hasFeature(AMDGPU::FeatureSouthernIslands);
2460}
2461
2462bool isCI(const MCSubtargetInfo &STI) {
2463 return STI.hasFeature(AMDGPU::FeatureSeaIslands);
2464}
2465
2466bool isVI(const MCSubtargetInfo &STI) {
2467 return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
2468}
2469
2470bool isGFX9(const MCSubtargetInfo &STI) {
2471 return STI.hasFeature(AMDGPU::FeatureGFX9);
2472}
2473
2475 return isGFX9(STI) || isGFX10(STI);
2476}
2477
2479 return isGFX9(STI) || isGFX10(STI) || isGFX11(STI);
2480}
2481
2483 return isVI(STI) || isGFX9(STI) || isGFX10(STI);
2484}
2485
2486bool isGFX8Plus(const MCSubtargetInfo &STI) {
2487 return isVI(STI) || isGFX9Plus(STI);
2488}
2489
2490bool isGFX9Plus(const MCSubtargetInfo &STI) {
2491 return isGFX9(STI) || isGFX10Plus(STI);
2492}
2493
2494bool isNotGFX9Plus(const MCSubtargetInfo &STI) { return !isGFX9Plus(STI); }
2495
2496bool isGFX10(const MCSubtargetInfo &STI) {
2497 return STI.hasFeature(AMDGPU::FeatureGFX10);
2498}
2499
2501 return isGFX10(STI) || isGFX11(STI);
2502}
2503
2505 return isGFX10(STI) || isGFX11Plus(STI);
2506}
2507
2508bool isGFX11(const MCSubtargetInfo &STI) {
2509 return STI.hasFeature(AMDGPU::FeatureGFX11);
2510}
2511
2513 return isGFX11(STI) || isGFX12Plus(STI);
2514}
2515
2516bool isGFX12(const MCSubtargetInfo &STI) {
2517 return STI.getFeatureBits()[AMDGPU::FeatureGFX12];
2518}
2519
2520bool isGFX12Plus(const MCSubtargetInfo &STI) { return isGFX12(STI); }
2521
2522bool isNotGFX12Plus(const MCSubtargetInfo &STI) { return !isGFX12Plus(STI); }
2523
2524bool isGFX1250(const MCSubtargetInfo &STI) {
2525 return STI.getFeatureBits()[AMDGPU::FeatureGFX1250Insts];
2526}
2527
2529 if (isGFX1250(STI))
2530 return false;
2531 return isGFX10Plus(STI);
2532}
2533
2534bool isNotGFX11Plus(const MCSubtargetInfo &STI) { return !isGFX11Plus(STI); }
2535
2537 return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI);
2538}
2539
2541 return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI);
2542}
2543
2545 return STI.hasFeature(AMDGPU::FeatureGCN3Encoding);
2546}
2547
2549 return STI.hasFeature(AMDGPU::FeatureGFX10_AEncoding);
2550}
2551
2553 return STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding);
2554}
2555
2557 return STI.hasFeature(AMDGPU::FeatureGFX10_3Insts);
2558}
2559
2561 return isGFX10_BEncoding(STI) && !isGFX12Plus(STI);
2562}
2563
2564bool isGFX90A(const MCSubtargetInfo &STI) {
2565 return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
2566}
2567
2568bool isGFX940(const MCSubtargetInfo &STI) {
2569 return STI.hasFeature(AMDGPU::FeatureGFX940Insts);
2570}
2571
2573 return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
2574}
2575
2577 return STI.hasFeature(AMDGPU::FeatureMAIInsts);
2578}
2579
2580bool hasVOPD(const MCSubtargetInfo &STI) {
2581 return STI.hasFeature(AMDGPU::FeatureVOPD);
2582}
2583
2585 return STI.hasFeature(AMDGPU::FeatureDPPSrc1SGPR);
2586}
2587
2589 return STI.hasFeature(AMDGPU::FeatureKernargPreload);
2590}
2591
2592int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR,
2593 int32_t ArgNumVGPR) {
2594 if (has90AInsts && ArgNumAGPR)
2595 return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
2596 return std::max(ArgNumVGPR, ArgNumAGPR);
2597}
2598
2600 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
2601 const MCRegister FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0);
2602 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
2603 Reg == AMDGPU::SCC;
2604}
2605
2607 return MRI.getEncodingValue(Reg) & AMDGPU::HWEncoding::IS_HI16;
2608}
2609
2610#define MAP_REG2REG \
2611 using namespace AMDGPU; \
2612 switch (Reg.id()) { \
2613 default: \
2614 return Reg; \
2615 CASE_CI_VI(FLAT_SCR) \
2616 CASE_CI_VI(FLAT_SCR_LO) \
2617 CASE_CI_VI(FLAT_SCR_HI) \
2618 CASE_VI_GFX9PLUS(TTMP0) \
2619 CASE_VI_GFX9PLUS(TTMP1) \
2620 CASE_VI_GFX9PLUS(TTMP2) \
2621 CASE_VI_GFX9PLUS(TTMP3) \
2622 CASE_VI_GFX9PLUS(TTMP4) \
2623 CASE_VI_GFX9PLUS(TTMP5) \
2624 CASE_VI_GFX9PLUS(TTMP6) \
2625 CASE_VI_GFX9PLUS(TTMP7) \
2626 CASE_VI_GFX9PLUS(TTMP8) \
2627 CASE_VI_GFX9PLUS(TTMP9) \
2628 CASE_VI_GFX9PLUS(TTMP10) \
2629 CASE_VI_GFX9PLUS(TTMP11) \
2630 CASE_VI_GFX9PLUS(TTMP12) \
2631 CASE_VI_GFX9PLUS(TTMP13) \
2632 CASE_VI_GFX9PLUS(TTMP14) \
2633 CASE_VI_GFX9PLUS(TTMP15) \
2634 CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
2635 CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
2636 CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
2637 CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
2638 CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
2639 CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
2640 CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
2641 CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
2642 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
2643 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
2644 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
2645 CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
2646 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
2647 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
2648 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2649 CASE_VI_GFX9PLUS( \
2650 TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2651 CASE_GFXPRE11_GFX11PLUS(M0) \
2652 CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
2653 CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
2654 }
2655
2656#define CASE_CI_VI(node) \
2657 assert(!isSI(STI)); \
2658 case node: \
2659 return isCI(STI) ? node##_ci : node##_vi;
2660
2661#define CASE_VI_GFX9PLUS(node) \
2662 case node: \
2663 return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
2664
2665#define CASE_GFXPRE11_GFX11PLUS(node) \
2666 case node: \
2667 return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
2668
2669#define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
2670 case node: \
2671 return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
2672
2674 if (STI.getTargetTriple().getArch() == Triple::r600)
2675 return Reg;
2677}
2678
2679#undef CASE_CI_VI
2680#undef CASE_VI_GFX9PLUS
2681#undef CASE_GFXPRE11_GFX11PLUS
2682#undef CASE_GFXPRE11_GFX11PLUS_TO
2683
2684#define CASE_CI_VI(node) \
2685 case node##_ci: \
2686 case node##_vi: \
2687 return node;
2688#define CASE_VI_GFX9PLUS(node) \
2689 case node##_vi: \
2690 case node##_gfx9plus: \
2691 return node;
2692#define CASE_GFXPRE11_GFX11PLUS(node) \
2693 case node##_gfx11plus: \
2694 case node##_gfxpre11: \
2695 return node;
2696#define CASE_GFXPRE11_GFX11PLUS_TO(node, result)
2697
2699
2700bool isInlineValue(unsigned Reg) {
2701 switch (Reg) {
2702 case AMDGPU::SRC_SHARED_BASE_LO:
2703 case AMDGPU::SRC_SHARED_BASE:
2704 case AMDGPU::SRC_SHARED_LIMIT_LO:
2705 case AMDGPU::SRC_SHARED_LIMIT:
2706 case AMDGPU::SRC_PRIVATE_BASE_LO:
2707 case AMDGPU::SRC_PRIVATE_BASE:
2708 case AMDGPU::SRC_PRIVATE_LIMIT_LO:
2709 case AMDGPU::SRC_PRIVATE_LIMIT:
2710 case AMDGPU::SRC_FLAT_SCRATCH_BASE_LO:
2711 case AMDGPU::SRC_FLAT_SCRATCH_BASE_HI:
2712 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
2713 return true;
2714 case AMDGPU::SRC_VCCZ:
2715 case AMDGPU::SRC_EXECZ:
2716 case AMDGPU::SRC_SCC:
2717 return true;
2718 case AMDGPU::SGPR_NULL:
2719 return true;
2720 default:
2721 return false;
2722 }
2723}
2724
2725#undef CASE_CI_VI
2726#undef CASE_VI_GFX9PLUS
2727#undef CASE_GFXPRE11_GFX11PLUS
2728#undef CASE_GFXPRE11_GFX11PLUS_TO
2729#undef MAP_REG2REG
2730
2731bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2732 assert(OpNo < Desc.NumOperands);
2733 unsigned OpType = Desc.operands()[OpNo].OperandType;
2734 return OpType >= AMDGPU::OPERAND_KIMM_FIRST &&
2735 OpType <= AMDGPU::OPERAND_KIMM_LAST;
2736}
2737
2738bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2739 assert(OpNo < Desc.NumOperands);
2740 unsigned OpType = Desc.operands()[OpNo].OperandType;
2741 switch (OpType) {
2754 return true;
2755 default:
2756 return false;
2757 }
2758}
2759
2760bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2761 assert(OpNo < Desc.NumOperands);
2762 unsigned OpType = Desc.operands()[OpNo].OperandType;
2763 return (OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
2767}
2768
2769// Avoid using MCRegisterClass::getSize, since that function will go away
2770// (move from MC* level to Target* level). Return size in bits.
2771unsigned getRegBitWidth(unsigned RCID) {
2772 switch (RCID) {
2773 case AMDGPU::VGPR_16RegClassID:
2774 case AMDGPU::VGPR_16_Lo128RegClassID:
2775 case AMDGPU::SGPR_LO16RegClassID:
2776 case AMDGPU::AGPR_LO16RegClassID:
2777 return 16;
2778 case AMDGPU::SGPR_32RegClassID:
2779 case AMDGPU::VGPR_32RegClassID:
2780 case AMDGPU::VGPR_32_Lo256RegClassID:
2781 case AMDGPU::VRegOrLds_32RegClassID:
2782 case AMDGPU::AGPR_32RegClassID:
2783 case AMDGPU::VS_32RegClassID:
2784 case AMDGPU::AV_32RegClassID:
2785 case AMDGPU::SReg_32RegClassID:
2786 case AMDGPU::SReg_32_XM0RegClassID:
2787 case AMDGPU::SRegOrLds_32RegClassID:
2788 return 32;
2789 case AMDGPU::SGPR_64RegClassID:
2790 case AMDGPU::VS_64RegClassID:
2791 case AMDGPU::SReg_64RegClassID:
2792 case AMDGPU::VReg_64RegClassID:
2793 case AMDGPU::AReg_64RegClassID:
2794 case AMDGPU::SReg_64_XEXECRegClassID:
2795 case AMDGPU::VReg_64_Align2RegClassID:
2796 case AMDGPU::AReg_64_Align2RegClassID:
2797 case AMDGPU::AV_64RegClassID:
2798 case AMDGPU::AV_64_Align2RegClassID:
2799 case AMDGPU::VReg_64_Lo256_Align2RegClassID:
2800 case AMDGPU::VS_64_Lo256RegClassID:
2801 return 64;
2802 case AMDGPU::SGPR_96RegClassID:
2803 case AMDGPU::SReg_96RegClassID:
2804 case AMDGPU::VReg_96RegClassID:
2805 case AMDGPU::AReg_96RegClassID:
2806 case AMDGPU::VReg_96_Align2RegClassID:
2807 case AMDGPU::AReg_96_Align2RegClassID:
2808 case AMDGPU::AV_96RegClassID:
2809 case AMDGPU::AV_96_Align2RegClassID:
2810 case AMDGPU::VReg_96_Lo256_Align2RegClassID:
2811 return 96;
2812 case AMDGPU::SGPR_128RegClassID:
2813 case AMDGPU::SReg_128RegClassID:
2814 case AMDGPU::VReg_128RegClassID:
2815 case AMDGPU::AReg_128RegClassID:
2816 case AMDGPU::VReg_128_Align2RegClassID:
2817 case AMDGPU::AReg_128_Align2RegClassID:
2818 case AMDGPU::AV_128RegClassID:
2819 case AMDGPU::AV_128_Align2RegClassID:
2820 case AMDGPU::SReg_128_XNULLRegClassID:
2821 case AMDGPU::VReg_128_Lo256_Align2RegClassID:
2822 return 128;
2823 case AMDGPU::SGPR_160RegClassID:
2824 case AMDGPU::SReg_160RegClassID:
2825 case AMDGPU::VReg_160RegClassID:
2826 case AMDGPU::AReg_160RegClassID:
2827 case AMDGPU::VReg_160_Align2RegClassID:
2828 case AMDGPU::AReg_160_Align2RegClassID:
2829 case AMDGPU::AV_160RegClassID:
2830 case AMDGPU::AV_160_Align2RegClassID:
2831 case AMDGPU::VReg_160_Lo256_Align2RegClassID:
2832 return 160;
2833 case AMDGPU::SGPR_192RegClassID:
2834 case AMDGPU::SReg_192RegClassID:
2835 case AMDGPU::VReg_192RegClassID:
2836 case AMDGPU::AReg_192RegClassID:
2837 case AMDGPU::VReg_192_Align2RegClassID:
2838 case AMDGPU::AReg_192_Align2RegClassID:
2839 case AMDGPU::AV_192RegClassID:
2840 case AMDGPU::AV_192_Align2RegClassID:
2841 case AMDGPU::VReg_192_Lo256_Align2RegClassID:
2842 return 192;
2843 case AMDGPU::SGPR_224RegClassID:
2844 case AMDGPU::SReg_224RegClassID:
2845 case AMDGPU::VReg_224RegClassID:
2846 case AMDGPU::AReg_224RegClassID:
2847 case AMDGPU::VReg_224_Align2RegClassID:
2848 case AMDGPU::AReg_224_Align2RegClassID:
2849 case AMDGPU::AV_224RegClassID:
2850 case AMDGPU::AV_224_Align2RegClassID:
2851 case AMDGPU::VReg_224_Lo256_Align2RegClassID:
2852 return 224;
2853 case AMDGPU::SGPR_256RegClassID:
2854 case AMDGPU::SReg_256RegClassID:
2855 case AMDGPU::VReg_256RegClassID:
2856 case AMDGPU::AReg_256RegClassID:
2857 case AMDGPU::VReg_256_Align2RegClassID:
2858 case AMDGPU::AReg_256_Align2RegClassID:
2859 case AMDGPU::AV_256RegClassID:
2860 case AMDGPU::AV_256_Align2RegClassID:
2861 case AMDGPU::SReg_256_XNULLRegClassID:
2862 case AMDGPU::VReg_256_Lo256_Align2RegClassID:
2863 return 256;
2864 case AMDGPU::SGPR_288RegClassID:
2865 case AMDGPU::SReg_288RegClassID:
2866 case AMDGPU::VReg_288RegClassID:
2867 case AMDGPU::AReg_288RegClassID:
2868 case AMDGPU::VReg_288_Align2RegClassID:
2869 case AMDGPU::AReg_288_Align2RegClassID:
2870 case AMDGPU::AV_288RegClassID:
2871 case AMDGPU::AV_288_Align2RegClassID:
2872 case AMDGPU::VReg_288_Lo256_Align2RegClassID:
2873 return 288;
2874 case AMDGPU::SGPR_320RegClassID:
2875 case AMDGPU::SReg_320RegClassID:
2876 case AMDGPU::VReg_320RegClassID:
2877 case AMDGPU::AReg_320RegClassID:
2878 case AMDGPU::VReg_320_Align2RegClassID:
2879 case AMDGPU::AReg_320_Align2RegClassID:
2880 case AMDGPU::AV_320RegClassID:
2881 case AMDGPU::AV_320_Align2RegClassID:
2882 case AMDGPU::VReg_320_Lo256_Align2RegClassID:
2883 return 320;
2884 case AMDGPU::SGPR_352RegClassID:
2885 case AMDGPU::SReg_352RegClassID:
2886 case AMDGPU::VReg_352RegClassID:
2887 case AMDGPU::AReg_352RegClassID:
2888 case AMDGPU::VReg_352_Align2RegClassID:
2889 case AMDGPU::AReg_352_Align2RegClassID:
2890 case AMDGPU::AV_352RegClassID:
2891 case AMDGPU::AV_352_Align2RegClassID:
2892 case AMDGPU::VReg_352_Lo256_Align2RegClassID:
2893 return 352;
2894 case AMDGPU::SGPR_384RegClassID:
2895 case AMDGPU::SReg_384RegClassID:
2896 case AMDGPU::VReg_384RegClassID:
2897 case AMDGPU::AReg_384RegClassID:
2898 case AMDGPU::VReg_384_Align2RegClassID:
2899 case AMDGPU::AReg_384_Align2RegClassID:
2900 case AMDGPU::AV_384RegClassID:
2901 case AMDGPU::AV_384_Align2RegClassID:
2902 case AMDGPU::VReg_384_Lo256_Align2RegClassID:
2903 return 384;
2904 case AMDGPU::SGPR_512RegClassID:
2905 case AMDGPU::SReg_512RegClassID:
2906 case AMDGPU::VReg_512RegClassID:
2907 case AMDGPU::AReg_512RegClassID:
2908 case AMDGPU::VReg_512_Align2RegClassID:
2909 case AMDGPU::AReg_512_Align2RegClassID:
2910 case AMDGPU::AV_512RegClassID:
2911 case AMDGPU::AV_512_Align2RegClassID:
2912 case AMDGPU::VReg_512_Lo256_Align2RegClassID:
2913 return 512;
2914 case AMDGPU::SGPR_1024RegClassID:
2915 case AMDGPU::SReg_1024RegClassID:
2916 case AMDGPU::VReg_1024RegClassID:
2917 case AMDGPU::AReg_1024RegClassID:
2918 case AMDGPU::VReg_1024_Align2RegClassID:
2919 case AMDGPU::AReg_1024_Align2RegClassID:
2920 case AMDGPU::AV_1024RegClassID:
2921 case AMDGPU::AV_1024_Align2RegClassID:
2922 case AMDGPU::VReg_1024_Lo256_Align2RegClassID:
2923 return 1024;
2924 default:
2925 llvm_unreachable("Unexpected register class");
2926 }
2927}
2928
2929unsigned getRegBitWidth(const MCRegisterClass &RC) {
2930 return getRegBitWidth(RC.getID());
2931}
2932
2933bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
2935 return true;
2936
2937 uint64_t Val = static_cast<uint64_t>(Literal);
2938 return (Val == llvm::bit_cast<uint64_t>(0.0)) ||
2939 (Val == llvm::bit_cast<uint64_t>(1.0)) ||
2940 (Val == llvm::bit_cast<uint64_t>(-1.0)) ||
2941 (Val == llvm::bit_cast<uint64_t>(0.5)) ||
2942 (Val == llvm::bit_cast<uint64_t>(-0.5)) ||
2943 (Val == llvm::bit_cast<uint64_t>(2.0)) ||
2944 (Val == llvm::bit_cast<uint64_t>(-2.0)) ||
2945 (Val == llvm::bit_cast<uint64_t>(4.0)) ||
2946 (Val == llvm::bit_cast<uint64_t>(-4.0)) ||
2947 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
2948}
2949
2950bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
2952 return true;
2953
2954 // The actual type of the operand does not seem to matter as long
2955 // as the bits match one of the inline immediate values. For example:
2956 //
2957 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
2958 // so it is a legal inline immediate.
2959 //
2960 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
2961 // floating-point, so it is a legal inline immediate.
2962
2963 uint32_t Val = static_cast<uint32_t>(Literal);
2964 return (Val == llvm::bit_cast<uint32_t>(0.0f)) ||
2965 (Val == llvm::bit_cast<uint32_t>(1.0f)) ||
2966 (Val == llvm::bit_cast<uint32_t>(-1.0f)) ||
2967 (Val == llvm::bit_cast<uint32_t>(0.5f)) ||
2968 (Val == llvm::bit_cast<uint32_t>(-0.5f)) ||
2969 (Val == llvm::bit_cast<uint32_t>(2.0f)) ||
2970 (Val == llvm::bit_cast<uint32_t>(-2.0f)) ||
2971 (Val == llvm::bit_cast<uint32_t>(4.0f)) ||
2972 (Val == llvm::bit_cast<uint32_t>(-4.0f)) ||
2973 (Val == 0x3e22f983 && HasInv2Pi);
2974}
2975
2976bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi) {
2977 if (!HasInv2Pi)
2978 return false;
2980 return true;
2981 uint16_t Val = static_cast<uint16_t>(Literal);
2982 return Val == 0x3F00 || // 0.5
2983 Val == 0xBF00 || // -0.5
2984 Val == 0x3F80 || // 1.0
2985 Val == 0xBF80 || // -1.0
2986 Val == 0x4000 || // 2.0
2987 Val == 0xC000 || // -2.0
2988 Val == 0x4080 || // 4.0
2989 Val == 0xC080 || // -4.0
2990 Val == 0x3E22; // 1.0 / (2.0 * pi)
2991}
2992
2993bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi) {
2994 return isInlinableLiteral32(Literal, HasInv2Pi);
2995}
2996
2997bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi) {
2998 if (!HasInv2Pi)
2999 return false;
3001 return true;
3002 uint16_t Val = static_cast<uint16_t>(Literal);
3003 return Val == 0x3C00 || // 1.0
3004 Val == 0xBC00 || // -1.0
3005 Val == 0x3800 || // 0.5
3006 Val == 0xB800 || // -0.5
3007 Val == 0x4000 || // 2.0
3008 Val == 0xC000 || // -2.0
3009 Val == 0x4400 || // 4.0
3010 Val == 0xC400 || // -4.0
3011 Val == 0x3118; // 1/2pi
3012}
3013
3014std::optional<unsigned> getInlineEncodingV216(bool IsFloat, uint32_t Literal) {
3015 // Unfortunately, the Instruction Set Architecture Reference Guide is
3016 // misleading about how the inline operands work for (packed) 16-bit
3017 // instructions. In a nutshell, the actual HW behavior is:
3018 //
3019 // - integer encodings (-16 .. 64) are always produced as sign-extended
3020 // 32-bit values
3021 // - float encodings are produced as:
3022 // - for F16 instructions: corresponding half-precision float values in
3023 // the LSBs, 0 in the MSBs
3024 // - for UI16 instructions: corresponding single-precision float value
3025 int32_t Signed = static_cast<int32_t>(Literal);
3026 if (Signed >= 0 && Signed <= 64)
3027 return 128 + Signed;
3028
3029 if (Signed >= -16 && Signed <= -1)
3030 return 192 + std::abs(Signed);
3031
3032 if (IsFloat) {
3033 // clang-format off
3034 switch (Literal) {
3035 case 0x3800: return 240; // 0.5
3036 case 0xB800: return 241; // -0.5
3037 case 0x3C00: return 242; // 1.0
3038 case 0xBC00: return 243; // -1.0
3039 case 0x4000: return 244; // 2.0
3040 case 0xC000: return 245; // -2.0
3041 case 0x4400: return 246; // 4.0
3042 case 0xC400: return 247; // -4.0
3043 case 0x3118: return 248; // 1.0 / (2.0 * pi)
3044 default: break;
3045 }
3046 // clang-format on
3047 } else {
3048 // clang-format off
3049 switch (Literal) {
3050 case 0x3F000000: return 240; // 0.5
3051 case 0xBF000000: return 241; // -0.5
3052 case 0x3F800000: return 242; // 1.0
3053 case 0xBF800000: return 243; // -1.0
3054 case 0x40000000: return 244; // 2.0
3055 case 0xC0000000: return 245; // -2.0
3056 case 0x40800000: return 246; // 4.0
3057 case 0xC0800000: return 247; // -4.0
3058 case 0x3E22F983: return 248; // 1.0 / (2.0 * pi)
3059 default: break;
3060 }
3061 // clang-format on
3062 }
3063
3064 return {};
3065}
3066
3067// Encoding of the literal as an inline constant for a V_PK_*_IU16 instruction
3068// or nullopt.
3069std::optional<unsigned> getInlineEncodingV2I16(uint32_t Literal) {
3070 return getInlineEncodingV216(false, Literal);
3071}
3072
3073// Encoding of the literal as an inline constant for a V_PK_*_BF16 instruction
3074// or nullopt.
3075std::optional<unsigned> getInlineEncodingV2BF16(uint32_t Literal) {
3076 int32_t Signed = static_cast<int32_t>(Literal);
3077 if (Signed >= 0 && Signed <= 64)
3078 return 128 + Signed;
3079
3080 if (Signed >= -16 && Signed <= -1)
3081 return 192 + std::abs(Signed);
3082
3083 // clang-format off
3084 switch (Literal) {
3085 case 0x3F00: return 240; // 0.5
3086 case 0xBF00: return 241; // -0.5
3087 case 0x3F80: return 242; // 1.0
3088 case 0xBF80: return 243; // -1.0
3089 case 0x4000: return 244; // 2.0
3090 case 0xC000: return 245; // -2.0
3091 case 0x4080: return 246; // 4.0
3092 case 0xC080: return 247; // -4.0
3093 case 0x3E22: return 248; // 1.0 / (2.0 * pi)
3094 default: break;
3095 }
3096 // clang-format on
3097
3098 return std::nullopt;
3099}
3100
3101// Encoding of the literal as an inline constant for a V_PK_*_F16 instruction
3102// or nullopt.
3103std::optional<unsigned> getInlineEncodingV2F16(uint32_t Literal) {
3104 return getInlineEncodingV216(true, Literal);
3105}
3106
3107// Whether the given literal can be inlined for a V_PK_* instruction.
3109 switch (OpType) {
3112 return getInlineEncodingV216(false, Literal).has_value();
3115 return getInlineEncodingV216(true, Literal).has_value();
3120 return false;
3121 default:
3122 llvm_unreachable("bad packed operand type");
3123 }
3124}
3125
3126// Whether the given literal can be inlined for a V_PK_*_IU16 instruction.
3130
3131// Whether the given literal can be inlined for a V_PK_*_BF16 instruction.
3135
3136// Whether the given literal can be inlined for a V_PK_*_F16 instruction.
3140
3141bool isValid32BitLiteral(uint64_t Val, bool IsFP64) {
3142 if (IsFP64)
3143 return !Lo_32(Val);
3144
3145 return isUInt<32>(Val) || isInt<32>(Val);
3146}
3147
3149 switch (Type) {
3150 default:
3151 break;
3156 return Imm & 0xffff;
3169 return Lo_32(Imm);
3171 return Hi_32(Imm);
3172 }
3173 return Imm;
3174}
3175
3177 const Function *F = A->getParent();
3178
3179 // Arguments to compute shaders are never a source of divergence.
3180 CallingConv::ID CC = F->getCallingConv();
3181 switch (CC) {
3184 return true;
3195 // For non-compute shaders, SGPR inputs are marked with either inreg or
3196 // byval. Everything else is in VGPRs.
3197 return A->hasAttribute(Attribute::InReg) ||
3198 A->hasAttribute(Attribute::ByVal);
3199 default:
3200 // TODO: treat i1 as divergent?
3201 return A->hasAttribute(Attribute::InReg);
3202 }
3203}
3204
3205bool isArgPassedInSGPR(const CallBase *CB, unsigned ArgNo) {
3206 // Arguments to compute shaders are never a source of divergence.
3208 switch (CC) {
3211 return true;
3222 // For non-compute shaders, SGPR inputs are marked with either inreg or
3223 // byval. Everything else is in VGPRs.
3224 return CB->paramHasAttr(ArgNo, Attribute::InReg) ||
3225 CB->paramHasAttr(ArgNo, Attribute::ByVal);
3226 default:
3227 return CB->paramHasAttr(ArgNo, Attribute::InReg);
3228 }
3229}
3230
3231static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
3232 return isGCN3Encoding(ST) || isGFX10Plus(ST);
3233}
3234
3236 int64_t EncodedOffset) {
3237 if (isGFX12Plus(ST))
3238 return isUInt<23>(EncodedOffset);
3239
3240 return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset)
3241 : isUInt<8>(EncodedOffset);
3242}
3243
3245 int64_t EncodedOffset, bool IsBuffer) {
3246 if (isGFX12Plus(ST)) {
3247 if (IsBuffer && EncodedOffset < 0)
3248 return false;
3249 return isInt<24>(EncodedOffset);
3250 }
3251
3252 return !IsBuffer && hasSMRDSignedImmOffset(ST) && isInt<21>(EncodedOffset);
3253}
3254
3255static bool isDwordAligned(uint64_t ByteOffset) {
3256 return (ByteOffset & 3) == 0;
3257}
3258
3260 uint64_t ByteOffset) {
3261 if (hasSMEMByteOffset(ST))
3262 return ByteOffset;
3263
3264 assert(isDwordAligned(ByteOffset));
3265 return ByteOffset >> 2;
3266}
3267
3268std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
3269 int64_t ByteOffset, bool IsBuffer,
3270 bool HasSOffset) {
3271 // For unbuffered smem loads, it is illegal for the Immediate Offset to be
3272 // negative if the resulting (Offset + (M0 or SOffset or zero) is negative.
3273 // Handle case where SOffset is not present.
3274 if (!IsBuffer && !HasSOffset && ByteOffset < 0 && hasSMRDSignedImmOffset(ST))
3275 return std::nullopt;
3276
3277 if (isGFX12Plus(ST)) // 24 bit signed offsets
3278 return isInt<24>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3279 : std::nullopt;
3280
3281 // The signed version is always a byte offset.
3282 if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
3284 return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3285 : std::nullopt;
3286 }
3287
3288 if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
3289 return std::nullopt;
3290
3291 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
3292 return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
3293 ? std::optional<int64_t>(EncodedOffset)
3294 : std::nullopt;
3295}
3296
3297std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
3298 int64_t ByteOffset) {
3299 if (!isCI(ST) || !isDwordAligned(ByteOffset))
3300 return std::nullopt;
3301
3302 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
3303 return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset)
3304 : std::nullopt;
3305}
3306
3308 if (AMDGPU::isGFX10(ST))
3309 return 12;
3310
3311 if (AMDGPU::isGFX12(ST))
3312 return 24;
3313 return 13;
3314}
3315
3316namespace {
3317
3318struct SourceOfDivergence {
3319 unsigned Intr;
3320};
3321const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
3322
3323struct AlwaysUniform {
3324 unsigned Intr;
3325};
3326const AlwaysUniform *lookupAlwaysUniform(unsigned Intr);
3327
3328#define GET_SourcesOfDivergence_IMPL
3329#define GET_UniformIntrinsics_IMPL
3330#define GET_Gfx9BufferFormat_IMPL
3331#define GET_Gfx10BufferFormat_IMPL
3332#define GET_Gfx11PlusBufferFormat_IMPL
3333
3334#include "AMDGPUGenSearchableTables.inc"
3335
3336} // end anonymous namespace
3337
3338bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
3339 return lookupSourceOfDivergence(IntrID);
3340}
3341
3342bool isIntrinsicAlwaysUniform(unsigned IntrID) {
3343 return lookupAlwaysUniform(IntrID);
3344}
3345
3347 uint8_t NumComponents,
3348 uint8_t NumFormat,
3349 const MCSubtargetInfo &STI) {
3350 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(
3351 BitsPerComp, NumComponents, NumFormat)
3352 : isGFX10(STI)
3353 ? getGfx10BufferFormatInfo(BitsPerComp, NumComponents, NumFormat)
3354 : getGfx9BufferFormatInfo(BitsPerComp, NumComponents, NumFormat);
3355}
3356
3358 const MCSubtargetInfo &STI) {
3359 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(Format)
3360 : isGFX10(STI) ? getGfx10BufferFormatInfo(Format)
3361 : getGfx9BufferFormatInfo(Format);
3362}
3363
3365 const MCRegisterInfo &MRI) {
3366 const unsigned VGPRClasses[] = {
3367 AMDGPU::VGPR_16RegClassID, AMDGPU::VGPR_32RegClassID,
3368 AMDGPU::VReg_64RegClassID, AMDGPU::VReg_96RegClassID,
3369 AMDGPU::VReg_128RegClassID, AMDGPU::VReg_160RegClassID,
3370 AMDGPU::VReg_192RegClassID, AMDGPU::VReg_224RegClassID,
3371 AMDGPU::VReg_256RegClassID, AMDGPU::VReg_288RegClassID,
3372 AMDGPU::VReg_320RegClassID, AMDGPU::VReg_352RegClassID,
3373 AMDGPU::VReg_384RegClassID, AMDGPU::VReg_512RegClassID,
3374 AMDGPU::VReg_1024RegClassID};
3375
3376 for (unsigned RCID : VGPRClasses) {
3377 const MCRegisterClass &RC = MRI.getRegClass(RCID);
3378 if (RC.contains(Reg))
3379 return &RC;
3380 }
3381
3382 return nullptr;
3383}
3384
3386 unsigned Enc = MRI.getEncodingValue(Reg);
3387 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
3388 return Idx >> 8;
3389}
3390
3392 const MCRegisterInfo &MRI) {
3393 unsigned Enc = MRI.getEncodingValue(Reg);
3394 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
3395 if (Idx >= 0x100)
3396 return AMDGPU::NoRegister;
3397
3399 if (!RC)
3400 return AMDGPU::NoRegister;
3401
3402 Idx |= MSBs << 8;
3403 if (RC->getID() == AMDGPU::VGPR_16RegClassID) {
3404 // This class has 2048 registers with interleaved lo16 and hi16.
3405 Idx *= 2;
3407 ++Idx;
3408 }
3409
3410 return RC->getRegister(Idx);
3411}
3412
3413std::pair<const AMDGPU::OpName *, const AMDGPU::OpName *>
3415 static const AMDGPU::OpName VOPOps[4] = {
3416 AMDGPU::OpName::src0, AMDGPU::OpName::src1, AMDGPU::OpName::src2,
3417 AMDGPU::OpName::vdst};
3418 static const AMDGPU::OpName VDSOps[4] = {
3419 AMDGPU::OpName::addr, AMDGPU::OpName::data0, AMDGPU::OpName::data1,
3420 AMDGPU::OpName::vdst};
3421 static const AMDGPU::OpName FLATOps[4] = {
3422 AMDGPU::OpName::vaddr, AMDGPU::OpName::vdata,
3423 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdst};
3424 static const AMDGPU::OpName BUFOps[4] = {
3425 AMDGPU::OpName::vaddr, AMDGPU::OpName::NUM_OPERAND_NAMES,
3426 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdata};
3427 static const AMDGPU::OpName VIMGOps[4] = {
3428 AMDGPU::OpName::vaddr0, AMDGPU::OpName::vaddr1, AMDGPU::OpName::vaddr2,
3429 AMDGPU::OpName::vdata};
3430
3431 // For VOPD instructions MSB of a corresponding Y component operand VGPR
3432 // address is supposed to match X operand, otherwise VOPD shall not be
3433 // combined.
3434 static const AMDGPU::OpName VOPDOpsX[4] = {
3435 AMDGPU::OpName::src0X, AMDGPU::OpName::vsrc1X, AMDGPU::OpName::vsrc2X,
3436 AMDGPU::OpName::vdstX};
3437 static const AMDGPU::OpName VOPDOpsY[4] = {
3438 AMDGPU::OpName::src0Y, AMDGPU::OpName::vsrc1Y, AMDGPU::OpName::vsrc2Y,
3439 AMDGPU::OpName::vdstY};
3440
3441 unsigned TSFlags = Desc.TSFlags;
3442
3443 if (TSFlags &
3446 // LD_SCALE operands ignore MSB.
3447 if (Desc.getOpcode() == AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32 ||
3448 Desc.getOpcode() == AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32_gfx1250 ||
3449 Desc.getOpcode() == AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64 ||
3450 Desc.getOpcode() == AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64_gfx1250)
3451 return {};
3452 return {VOPOps, nullptr};
3453 }
3454
3455 if (TSFlags & SIInstrFlags::DS)
3456 return {VDSOps, nullptr};
3457
3458 if (TSFlags & SIInstrFlags::FLAT)
3459 return {FLATOps, nullptr};
3460
3461 if (TSFlags & (SIInstrFlags::MUBUF | SIInstrFlags::MTBUF))
3462 return {BUFOps, nullptr};
3463
3464 if (TSFlags & SIInstrFlags::VIMAGE)
3465 return {VIMGOps, nullptr};
3466
3467 if (AMDGPU::isVOPD(Desc.getOpcode()))
3468 return {VOPDOpsX, VOPDOpsY};
3469
3470 assert(!(TSFlags & SIInstrFlags::MIMG));
3471
3472 if (TSFlags & (SIInstrFlags::VSAMPLE | SIInstrFlags::EXP))
3473 llvm_unreachable("Sample and export VGPR lowering is not implemented and"
3474 " these instructions are not expected on gfx1250");
3475
3476 return {};
3477}
3478
3479bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode) {
3480 uint64_t TSFlags = MII.get(Opcode).TSFlags;
3481
3482 if (TSFlags & SIInstrFlags::SMRD)
3483 return !getSMEMIsBuffer(Opcode);
3484 if (!(TSFlags & SIInstrFlags::FLAT))
3485 return false;
3486
3487 // Only SV and SVS modes are supported.
3488 if (TSFlags & SIInstrFlags::FlatScratch)
3489 return hasNamedOperand(Opcode, OpName::vaddr);
3490
3491 // Only GVS mode is supported.
3492 return hasNamedOperand(Opcode, OpName::vaddr) &&
3493 hasNamedOperand(Opcode, OpName::saddr);
3494
3495 return false;
3496}
3497
3498bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc, const MCInstrInfo &MII,
3499 const MCSubtargetInfo &ST) {
3500 for (auto OpName : {OpName::vdst, OpName::src0, OpName::src1, OpName::src2}) {
3501 int Idx = getNamedOperandIdx(OpDesc.getOpcode(), OpName);
3502 if (Idx == -1)
3503 continue;
3504
3505 const MCOperandInfo &OpInfo = OpDesc.operands()[Idx];
3506 int16_t RegClass = MII.getOpRegClassID(
3507 OpInfo, ST.getHwMode(MCSubtargetInfo::HwMode_RegInfo));
3508 if (RegClass == AMDGPU::VReg_64RegClassID ||
3509 RegClass == AMDGPU::VReg_64_Align2RegClassID)
3510 return true;
3511 }
3512
3513 return false;
3514}
3515
3516bool isDPALU_DPP32BitOpc(unsigned Opc) {
3517 switch (Opc) {
3518 case AMDGPU::V_MUL_LO_U32_e64:
3519 case AMDGPU::V_MUL_LO_U32_e64_dpp:
3520 case AMDGPU::V_MUL_LO_U32_e64_dpp_gfx1250:
3521 case AMDGPU::V_MUL_HI_U32_e64:
3522 case AMDGPU::V_MUL_HI_U32_e64_dpp:
3523 case AMDGPU::V_MUL_HI_U32_e64_dpp_gfx1250:
3524 case AMDGPU::V_MUL_HI_I32_e64:
3525 case AMDGPU::V_MUL_HI_I32_e64_dpp:
3526 case AMDGPU::V_MUL_HI_I32_e64_dpp_gfx1250:
3527 case AMDGPU::V_MAD_U32_e64:
3528 case AMDGPU::V_MAD_U32_e64_dpp:
3529 case AMDGPU::V_MAD_U32_e64_dpp_gfx1250:
3530 return true;
3531 default:
3532 return false;
3533 }
3534}
3535
3536bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII,
3537 const MCSubtargetInfo &ST) {
3538 if (!ST.hasFeature(AMDGPU::FeatureDPALU_DPP))
3539 return false;
3540
3541 if (isDPALU_DPP32BitOpc(OpDesc.getOpcode()))
3542 return ST.hasFeature(AMDGPU::FeatureGFX1250Insts);
3543
3544 return hasAny64BitVGPROperands(OpDesc, MII, ST);
3545}
3546
3548 return ST.hasFeature(AMDGPU::FeatureAddressableLocalMemorySize327680) ? 256
3549 : 128;
3550}
3551
3552bool isPackedFP32Inst(unsigned Opc) {
3553 switch (Opc) {
3554 case AMDGPU::V_PK_ADD_F32:
3555 case AMDGPU::V_PK_ADD_F32_gfx12:
3556 case AMDGPU::V_PK_MUL_F32:
3557 case AMDGPU::V_PK_MUL_F32_gfx12:
3558 case AMDGPU::V_PK_FMA_F32:
3559 case AMDGPU::V_PK_FMA_F32_gfx12:
3560 return true;
3561 default:
3562 return false;
3563 }
3564}
3565
3566const std::array<unsigned, 3> &ClusterDimsAttr::getDims() const {
3567 assert(isFixedDims() && "expect kind to be FixedDims");
3568 return Dims;
3569}
3570
3571std::string ClusterDimsAttr::to_string() const {
3572 SmallString<10> Buffer;
3573 raw_svector_ostream OS(Buffer);
3574
3575 switch (getKind()) {
3576 case Kind::Unknown:
3577 return "";
3578 case Kind::NoCluster: {
3579 OS << EncoNoCluster << ',' << EncoNoCluster << ',' << EncoNoCluster;
3580 return Buffer.c_str();
3581 }
3582 case Kind::VariableDims: {
3583 OS << EncoVariableDims << ',' << EncoVariableDims << ','
3584 << EncoVariableDims;
3585 return Buffer.c_str();
3586 }
3587 case Kind::FixedDims: {
3588 OS << Dims[0] << ',' << Dims[1] << ',' << Dims[2];
3589 return Buffer.c_str();
3590 }
3591 }
3592 llvm_unreachable("Unknown ClusterDimsAttr kind");
3593}
3594
3596 std::optional<SmallVector<unsigned>> Attr =
3597 getIntegerVecAttribute(F, "amdgpu-cluster-dims", /*Size=*/3);
3599
3600 if (!Attr.has_value())
3601 AttrKind = Kind::Unknown;
3602 else if (all_of(*Attr, [](unsigned V) { return V == EncoNoCluster; }))
3603 AttrKind = Kind::NoCluster;
3604 else if (all_of(*Attr, [](unsigned V) { return V == EncoVariableDims; }))
3605 AttrKind = Kind::VariableDims;
3606
3607 ClusterDimsAttr A(AttrKind);
3608 if (AttrKind == Kind::FixedDims)
3609 A.Dims = {(*Attr)[0], (*Attr)[1], (*Attr)[2]};
3610
3611 return A;
3612}
3613
3614} // namespace AMDGPU
3615
3618 switch (S) {
3620 OS << "Unsupported";
3621 break;
3623 OS << "Any";
3624 break;
3626 OS << "Off";
3627 break;
3629 OS << "On";
3630 break;
3631 }
3632 return OS;
3633}
3634
3635} // namespace llvm
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static llvm::cl::opt< unsigned > DefaultAMDHSACodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden, llvm::cl::init(llvm::AMDGPU::AMDHSA_COV6), llvm::cl::desc("Set default AMDHSA Code Object Version (module flag " "or asm directive still take priority if present)"))
#define MAP_REG2REG
Provides AMDGPU specific target descriptions.
MC layer struct for AMDGPUMCKernelCodeT, provides MCExpr functionality where required.
@ AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define RegName(no)
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Register Reg
Register const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
#define T
uint64_t High
if(PassOpts->AAPipeline)
#define S_00B848_MEM_ORDERED(x)
Definition SIDefines.h:1233
#define S_00B848_WGP_MODE(x)
Definition SIDefines.h:1230
#define S_00B848_FWD_PROGRESS(x)
Definition SIDefines.h:1236
unsigned unsigned DefaultVal
This file contains some functions that are useful when dealing with strings.
static const int BlockSize
Definition TarWriter.cpp:33
static ClusterDimsAttr get(const Function &F)
const std::array< unsigned, 3 > & getDims() const
TargetIDSetting getXnackSetting() const
AMDGPUTargetID(const MCSubtargetInfo &STI)
void setTargetIDFromTargetIDStream(StringRef TargetID)
TargetIDSetting getSramEccSetting() const
unsigned getIndexInParsedOperands(unsigned CompOprIdx) const
unsigned getIndexOfSrcInParsedOperands(unsigned CompSrcIdx) const
std::optional< unsigned > getInvalidCompOperandIndex(std::function< unsigned(unsigned, unsigned)> GetRegIdx, const MCRegisterInfo &MRI, bool SkipSrc=false, bool AllowSameVGPR=false, bool VOPD3=false) const
std::array< unsigned, Component::MAX_OPR_NUM > RegIndices
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
constexpr bool test(unsigned I) const
unsigned getAddressSpace() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayStore() const
Return true if this instruction could possibly modify memory.
bool mayLoad() const
Return true if this instruction could possibly read memory.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
unsigned getOpcode() const
Return the opcode number for this descriptor.
Interface to description of machine instruction set.
Definition MCInstrInfo.h:27
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
int16_t getOpRegClassID(const MCOperandInfo &OpInfo, unsigned HwModeId) const
Return the ID of the register class to use for OpInfo, for the active HwMode HwModeId.
Definition MCInstrInfo.h:80
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:87
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
StringRef getCPU() const
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
const char * c_str()
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
Definition StringRef.h:854
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
Manages the enabling and disabling of subtarget specific features.
const std::vector< std::string > & getFeatures() const
Returns the vector of individual subtarget features.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
OSType getOS() const
Get the parsed operating system type of this triple.
Definition Triple.h:420
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:411
bool isAMDGCN() const
Tests whether the target is AMDGCN.
Definition Triple.h:904
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
unsigned decodeFieldVaVcc(unsigned Encoded)
unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc)
unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt)
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc)
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned decodeFieldSaSdst(unsigned Encoded)
unsigned decodeFieldVaSdst(unsigned Encoded)
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
unsigned decodeFieldVaSsrc(unsigned Encoded)
int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst)
const CustomOperandVal DepCtrInfo[]
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
unsigned decodeFieldVaVdst(unsigned Encoded)
unsigned decodeFieldHoldCnt(unsigned Encoded)
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst)
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
static constexpr ExpTgt ExpTgtInfo[]
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
unsigned getTgtId(const StringRef Name)
constexpr uint32_t VersionMinor
HSA metadata minor version.
constexpr uint32_t VersionMajor
HSA metadata major version.
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, std::optional< bool > EnableWavefrontSize32)
unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI)
unsigned getArchVGPRAllocGranule()
For subtargets with a unified VGPR file and mixed ArchVGPR/AGPR usage, returns the allocation granule...
unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getWavefrontSize(const MCSubtargetInfo *STI)
unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize)
unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, bool FlatScrUsed, bool XNACKUsed)
unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI)
unsigned getLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getAddressableLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getEUsPerCU(const MCSubtargetInfo *STI)
unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI)
unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
static TargetIDSetting getTargetIDSettingFromFeatureString(StringRef FeatureString)
unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, bool Addressable)
unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs)
unsigned getMinWavesPerEU(const MCSubtargetInfo *STI)
unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI)
unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getAllocatedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getEncodedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, std::optional< bool > EnableWavefrontSize32)
unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves, AMDGPUSubtarget::Generation Gen)
static unsigned getGranulatedNumRegisterBlocks(unsigned NumRegs, unsigned Granule)
unsigned getAddressableNumArchVGPRs(const MCSubtargetInfo *STI)
unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI)
unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize)
StringLiteral const UfmtSymbolicGFX11[]
bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI)
unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI)
StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI)
unsigned const DfmtNfmt2UFmtGFX10[]
StringLiteral const DfmtSymbolic[]
static StringLiteral const * getNfmtLookupTable(const MCSubtargetInfo &STI)
bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI)
StringLiteral const NfmtSymbolicGFX10[]
bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI)
int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt, const MCSubtargetInfo &STI)
StringRef getDfmtName(unsigned Id)
int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt)
int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI)
bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI)
StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI)
unsigned const DfmtNfmt2UFmtGFX11[]
StringLiteral const NfmtSymbolicVI[]
StringLiteral const NfmtSymbolicSICI[]
int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI)
int64_t getDfmt(const StringRef Name)
StringLiteral const UfmtSymbolicGFX10[]
void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt)
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
StringRef getMsgOpName(int64_t MsgId, uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a sendmsg operation.
static uint64_t getMsgIdMask(const MCSubtargetInfo &STI)
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
constexpr unsigned VOPD_VGPR_BANK_MASKS[]
constexpr unsigned COMPONENTS_NUM
constexpr unsigned VOPD3_VGPR_BANK_MASKS[]
bool isPackedFP32Inst(unsigned Opc)
bool isGCN3Encoding(const MCSubtargetInfo &STI)
bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi)
bool isGFX10_BEncoding(const MCSubtargetInfo &STI)
bool isGFX10_GFX11(const MCSubtargetInfo &STI)
bool isInlinableLiteralV216(uint32_t Literal, uint8_t OpType)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi)
bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset)
Convert ByteOffset to dwords if the subtarget uses dword SMRD immediate offsets.
static unsigned encodeStorecnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Storecnt)
MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
static bool hasSMEMByteOffset(const MCSubtargetInfo &ST)
bool isVOPCAsmOnly(unsigned Opc)
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
bool getMTBUFHasSrsrc(unsigned Opc)
std::optional< int64_t > getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, int64_t ByteOffset)
bool getWMMAIsXDL(unsigned Opc)
uint8_t wmmaScaleF8F6F4FormatToNumRegs(unsigned Fmt)
static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr, int Size, unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
bool isGFX10Before1030(const MCSubtargetInfo &STI)
bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo)
Does this operand support only inlinable literals?
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc)
const int OPR_ID_UNSUPPORTED
bool shouldEmitConstantsToTextSection(const Triple &TT)
bool isInlinableLiteralV2I16(uint32_t Literal)
int getMTBUFElements(unsigned Opc)
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
static int encodeCustomOperandVal(const CustomOperandVal &Op, int64_t InputVal)
unsigned getTemporalHintType(const MCInstrDesc TID)
int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, int32_t ArgNumVGPR)
bool isGFX10(const MCSubtargetInfo &STI)
const MCRegisterClass * getVGPRPhysRegClass(MCPhysReg Reg, const MCRegisterInfo &MRI)
bool isInlinableLiteralV2BF16(uint32_t Literal)
unsigned getMaxNumUserSGPRs(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV216(bool IsFloat, uint32_t Literal)
FPType getFPDstSelType(unsigned Opc)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool hasA16(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset, bool IsBuffer)
bool isGFX12Plus(const MCSubtargetInfo &STI)
unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler)
bool hasPackedD16(const MCSubtargetInfo &STI)
unsigned getStorecntBitMask(const IsaVersion &Version)
unsigned getLdsDwGranularity(const MCSubtargetInfo &ST)
bool isGFX940(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2F16(uint32_t Literal)
bool isHsaAbi(const MCSubtargetInfo &STI)
bool isGFX11(const MCSubtargetInfo &STI)
const int OPR_VAL_INVALID
unsigned getVGPREncodingMSBs(MCPhysReg Reg, const MCRegisterInfo &MRI)
bool getSMEMIsBuffer(unsigned Opc)
bool isGFX10_3_GFX11(const MCSubtargetInfo &STI)
bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val)
Checks if Val is inside MD, a !range-like metadata.
uint8_t mfmaScaleF8F6F4FormatToNumRegs(unsigned EncodingVal)
unsigned getVOPDOpcode(unsigned Opc, bool VOPD3)
bool isGroupSegment(const GlobalValue *GV)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
bool getMTBUFHasSoffset(unsigned Opc)
bool hasXNACK(const MCSubtargetInfo &STI)
bool isValid32BitLiteral(uint64_t Val, bool IsFP64)
static unsigned getCombinedCountBitMask(const IsaVersion &Version, bool IsStore)
CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3)
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
bool isVOPC64DPP(unsigned Opc)
int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements)
bool getMAIIsGFX940XDL(unsigned Opc)
bool isSI(const MCSubtargetInfo &STI)
unsigned getDefaultAMDHSACodeObjectVersion()
bool isReadOnlySegment(const GlobalValue *GV)
bool isArgPassedInSGPR(const Argument *A)
bool isIntrinsicAlwaysUniform(unsigned IntrID)
int getMUBUFBaseOpcode(unsigned Opc)
unsigned getAMDHSACodeObjectVersion(const Module &M)
unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt)
unsigned getWaitcntBitMask(const IsaVersion &Version)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool getVOP3IsSingle(unsigned Opc)
bool isGFX9(const MCSubtargetInfo &STI)
bool isDPALU_DPP32BitOpc(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
static bool isDwordAligned(uint64_t ByteOffset)
unsigned getVOPDEncodingFamily(const MCSubtargetInfo &ST)
bool isGFX10_AEncoding(const MCSubtargetInfo &STI)
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this a KImm operand?
bool getHasColorExport(const Function &F)
int getMTBUFBaseOpcode(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
unsigned getSamplecntBitMask(const IsaVersion &Version)
unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion)
std::tuple< char, unsigned, unsigned > parseAsmPhysRegName(StringRef RegName)
Returns a valid charcode or 0 in the first entry if this is a valid physical register name.
bool hasSRAMECC(const MCSubtargetInfo &STI)
bool getHasDepthExport(const Function &F)
bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI)
bool getMUBUFHasVAddr(unsigned Opc)
bool isTrue16Inst(unsigned Opc)
std::pair< unsigned, unsigned > getVOPDComponents(unsigned VOPDOpcode)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
bool isGFX12(const MCSubtargetInfo &STI)
unsigned getInitialPSInputAddr(const Function &F)
unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Expcnt)
bool isAsyncStore(unsigned Opc)
unsigned getDynamicVGPRBlockSize(const Function &F)
unsigned getKmcntBitMask(const IsaVersion &Version)
unsigned getVmcntBitMask(const IsaVersion &Version)
bool isNotGFX10Plus(const MCSubtargetInfo &STI)
bool hasMAIInsts(const MCSubtargetInfo &STI)
unsigned getBitOp2(unsigned Opc)
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
unsigned getXcntBitMask(const IsaVersion &Version)
bool isGenericAtomic(unsigned Opc)
const MFMA_F8F6F4_Info * getWMMA_F8F6F4_WithFormatArgs(unsigned FmtA, unsigned FmtB, unsigned F8F8Opcode)
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
bool isGFX8Plus(const MCSubtargetInfo &STI)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
bool getMUBUFTfe(unsigned Opc)
unsigned getBvhcntBitMask(const IsaVersion &Version)
bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST)
bool hasMIMG_R128(const MCSubtargetInfo &STI)
bool hasGFX10_3Insts(const MCSubtargetInfo &STI)
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
bool hasG16(const MCSubtargetInfo &STI)
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI)
bool getMUBUFHasSoffset(unsigned Opc)
bool isNotGFX11Plus(const MCSubtargetInfo &STI)
bool isGFX11Plus(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isInlineValue(unsigned Reg)
bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this floating-point operand?
std::tuple< char, unsigned, unsigned > parseAsmConstraintPhysReg(StringRef Constraint)
Returns a valid charcode or 0 in the first entry if this is a valid physical register constraint.
unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion)
static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr, int Size, const MCSubtargetInfo &STI)
static unsigned encodeLoadcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Loadcnt)
bool isGFX10Plus(const MCSubtargetInfo &STI)
static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size, unsigned Code, int &Idx, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
static bool isValidRegPrefix(char C)
std::optional< int64_t > getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset, bool IsBuffer, bool HasSOffset)
bool isGlobalSegment(const GlobalValue *GV)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
Definition SIDefines.h:231
@ OPERAND_REG_INLINE_C_LAST
Definition SIDefines.h:254
@ OPERAND_REG_IMM_V2FP16
Definition SIDefines.h:209
@ OPERAND_REG_INLINE_C_FP64
Definition SIDefines.h:222
@ OPERAND_REG_INLINE_C_BF16
Definition SIDefines.h:219
@ OPERAND_REG_INLINE_C_V2BF16
Definition SIDefines.h:224
@ OPERAND_REG_IMM_V2INT16
Definition SIDefines.h:210
@ OPERAND_REG_IMM_BF16
Definition SIDefines.h:206
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
Definition SIDefines.h:201
@ OPERAND_REG_IMM_V2BF16
Definition SIDefines.h:208
@ OPERAND_REG_INLINE_AC_FIRST
Definition SIDefines.h:256
@ OPERAND_REG_IMM_FP16
Definition SIDefines.h:207
@ OPERAND_REG_IMM_NOINLINE_V2FP16
Definition SIDefines.h:211
@ OPERAND_REG_IMM_FP64
Definition SIDefines.h:205
@ OPERAND_REG_INLINE_C_V2FP16
Definition SIDefines.h:225
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
Definition SIDefines.h:236
@ OPERAND_REG_INLINE_AC_FP32
Definition SIDefines.h:237
@ OPERAND_REG_IMM_V2INT32
Definition SIDefines.h:212
@ OPERAND_REG_IMM_FP32
Definition SIDefines.h:204
@ OPERAND_REG_INLINE_C_FIRST
Definition SIDefines.h:253
@ OPERAND_REG_INLINE_C_FP32
Definition SIDefines.h:221
@ OPERAND_REG_INLINE_AC_LAST
Definition SIDefines.h:257
@ OPERAND_REG_INLINE_C_INT32
Definition SIDefines.h:217
@ OPERAND_REG_INLINE_C_V2INT16
Definition SIDefines.h:223
@ OPERAND_REG_IMM_V2FP32
Definition SIDefines.h:213
@ OPERAND_REG_INLINE_AC_FP64
Definition SIDefines.h:238
@ OPERAND_REG_INLINE_C_FP16
Definition SIDefines.h:220
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
Definition SIDefines.h:228
void initDefaultAMDKernelCodeT(AMDGPUMCKernelCodeT &KernelCode, const MCSubtargetInfo *STI)
bool isNotGFX9Plus(const MCSubtargetInfo &STI)
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
bool hasGDS(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset)
bool isGFX9Plus(const MCSubtargetInfo &STI)
bool hasDPPSrc1SGPR(const MCSubtargetInfo &STI)
MCPhysReg getVGPRWithMSBs(MCPhysReg Reg, unsigned MSBs, const MCRegisterInfo &MRI)
If Reg is a low VGPR return a corresponding high VGPR with MSBs set.
const int OPR_ID_DUPLICATE
bool isVOPD(unsigned Opc)
VOPD::InstInfo getVOPDInstInfo(const MCInstrDesc &OpX, const MCInstrDesc &OpY)
unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Vmcnt)
unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt)
bool isCvt_F32_Fp8_Bf8_e64(unsigned Opc)
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
static unsigned encodeStorecntDscnt(const IsaVersion &Version, unsigned Storecnt, unsigned Dscnt)
bool isGFX1250(const MCSubtargetInfo &STI)
int getMCOpcode(uint16_t Opcode, unsigned Gen)
const MIMGBaseOpcodeInfo * getMIMGBaseOpcode(unsigned Opc)
bool isVI(const MCSubtargetInfo &STI)
int64_t encode32BitLiteral(int64_t Imm, OperandType Type)
bool isTensorStore(unsigned Opc)
bool getMUBUFIsBufferInv(unsigned Opc)
bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
static int encodeCustomOperand(const CustomOperandVal *Opr, int Size, const StringRef Name, int64_t InputVal, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned hasKernargPreload(const MCSubtargetInfo &STI)
bool supportsWGP(const MCSubtargetInfo &STI)
bool isMAC(unsigned Opc)
bool isCI(const MCSubtargetInfo &STI)
unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Lgkmcnt)
bool getVOP2IsSingle(unsigned Opc)
bool getMAIIsDGEMM(unsigned Opc)
Returns true if MAI operation is a double precision GEMM.
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
const int OPR_ID_UNKNOWN
unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion)
SmallVector< unsigned > getIntegerVecAttribute(const Function &F, StringRef Name, unsigned Size, unsigned DefaultVal)
int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels)
bool isNotGFX12Plus(const MCSubtargetInfo &STI)
bool getMTBUFHasVAddr(unsigned Opc)
unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt)
uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion)
std::pair< unsigned, unsigned > getIntegerPairAttribute(const Function &F, StringRef Name, std::pair< unsigned, unsigned > Default, bool OnlyFirstRequired)
unsigned getLoadcntBitMask(const IsaVersion &Version)
bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi)
bool hasVOPD(const MCSubtargetInfo &STI)
int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily, bool VOPD3)
static unsigned encodeDscnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Dscnt)
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
const MFMA_F8F6F4_Info * getMFMA_F8F6F4_WithFormatArgs(unsigned CBSZ, unsigned BLGP, unsigned F8F8Opcode)
unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion)
bool isGFX9_GFX10_GFX11(const MCSubtargetInfo &STI)
bool isGFX9_GFX10(const MCSubtargetInfo &STI)
int getMUBUFElements(unsigned Opc)
static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt, unsigned Dscnt)
const GcnBufferFormatInfo * getGcnBufferFormatInfo(uint8_t BitsPerComp, uint8_t NumComponents, uint8_t NumFormat, const MCSubtargetInfo &STI)
unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc)
bool isPermlane16(unsigned Opc)
bool getMUBUFHasSrsrc(unsigned Opc)
unsigned getDscntBitMask(const IsaVersion &Version)
bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_Gfx
Used for AMD graphics targets.
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ ELFABIVERSION_AMDGPU_HSA_V4
Definition ELF.h:384
@ ELFABIVERSION_AMDGPU_HSA_V5
Definition ELF.h:385
@ ELFABIVERSION_AMDGPU_HSA_V6
Definition ELF.h:386
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:682
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1727
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition MathExtras.h:557
std::string utostr(uint64_t X, bool isNeg=false)
Op::Description Desc
FunctionAddr VTableAddr uintptr_t uintptr_t Version
Definition InstrProf.h:302
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition MathExtras.h:159
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:198
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
Definition MathExtras.h:164
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:405
To bit_cast(const From &from) noexcept
Definition bit.h:90
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
@ AlwaysUniform
The result values are always uniform.
Definition Uniformity.h:23
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
#define N
AMD Kernel Code Object (amd_kernel_code_t).
Instruction set architecture version.
Represents the counter values to wait for in an s_waitcnt instruction.