LLVM 22.0.0git
AMDGPUDisassembler.cpp
Go to the documentation of this file.
1//===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//===----------------------------------------------------------------------===//
10//
11/// \file
12///
13/// This file contains definition for AMDGPU ISA disassembler
14//
15//===----------------------------------------------------------------------===//
16
17// ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
18
21#include "SIDefines.h"
22#include "SIRegisterInfo.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCDecoder.h"
32#include "llvm/MC/MCExpr.h"
33#include "llvm/MC/MCInstrDesc.h"
39
40using namespace llvm;
41using namespace llvm::MCD;
42
43#define DEBUG_TYPE "amdgpu-disassembler"
44
45#define SGPR_MAX \
46 (isGFX10Plus() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \
47 : AMDGPU::EncValues::SGPR_MAX_SI)
48
50
51static int64_t getInlineImmValF16(unsigned Imm);
52static int64_t getInlineImmValBF16(unsigned Imm);
53static int64_t getInlineImmVal32(unsigned Imm);
54static int64_t getInlineImmVal64(unsigned Imm);
55
57 MCContext &Ctx, MCInstrInfo const *MCII)
58 : MCDisassembler(STI, Ctx), MCII(MCII), MRI(*Ctx.getRegisterInfo()),
59 MAI(*Ctx.getAsmInfo()), TargetMaxInstBytes(MAI.getMaxInstLength(&STI)),
60 CodeObjectVersion(AMDGPU::getDefaultAMDHSACodeObjectVersion()) {
61 // ToDo: AMDGPUDisassembler supports only VI ISA.
62 if (!STI.hasFeature(AMDGPU::FeatureGCN3Encoding) && !isGFX10Plus())
63 reportFatalUsageError("disassembly not yet supported for subtarget");
64
65 for (auto [Symbol, Code] : AMDGPU::UCVersion::getGFXVersions())
66 createConstantSymbolExpr(Symbol, Code);
67
68 UCVersionW64Expr = createConstantSymbolExpr("UC_VERSION_W64_BIT", 0x2000);
69 UCVersionW32Expr = createConstantSymbolExpr("UC_VERSION_W32_BIT", 0x4000);
70 UCVersionMDPExpr = createConstantSymbolExpr("UC_VERSION_MDP_BIT", 0x8000);
71}
72
75}
76
78addOperand(MCInst &Inst, const MCOperand& Opnd) {
79 Inst.addOperand(Opnd);
80 return Opnd.isValid() ?
83}
84
86 AMDGPU::OpName Name) {
87 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), Name);
88 if (OpIdx != -1) {
89 auto *I = MI.begin();
90 std::advance(I, OpIdx);
91 MI.insert(I, Op);
92 }
93 return OpIdx;
94}
95
96static DecodeStatus decodeSOPPBrTarget(MCInst &Inst, unsigned Imm,
98 const MCDisassembler *Decoder) {
99 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
100
101 // Our branches take a simm16.
102 int64_t Offset = SignExtend64<16>(Imm) * 4 + 4 + Addr;
103
104 if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2, 0))
106 return addOperand(Inst, MCOperand::createImm(Imm));
107}
108
110 const MCDisassembler *Decoder) {
111 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
112 int64_t Offset;
113 if (DAsm->isGFX12Plus()) { // GFX12 supports 24-bit signed offsets.
114 Offset = SignExtend64<24>(Imm);
115 } else if (DAsm->isVI()) { // VI supports 20-bit unsigned offsets.
116 Offset = Imm & 0xFFFFF;
117 } else { // GFX9+ supports 21-bit signed offsets.
118 Offset = SignExtend64<21>(Imm);
119 }
121}
122
123static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, uint64_t Addr,
124 const MCDisassembler *Decoder) {
125 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
126 return addOperand(Inst, DAsm->decodeBoolReg(Val));
127}
128
129static DecodeStatus decodeSplitBarrier(MCInst &Inst, unsigned Val,
131 const MCDisassembler *Decoder) {
132 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
133 return addOperand(Inst, DAsm->decodeSplitBarrier(Val));
134}
135
136static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr,
137 const MCDisassembler *Decoder) {
138 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
139 return addOperand(Inst, DAsm->decodeDpp8FI(Val));
140}
141
142#define DECODE_OPERAND(StaticDecoderName, DecoderName) \
143 static DecodeStatus StaticDecoderName(MCInst &Inst, unsigned Imm, \
144 uint64_t /*Addr*/, \
145 const MCDisassembler *Decoder) { \
146 auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder); \
147 return addOperand(Inst, DAsm->DecoderName(Imm)); \
148 }
149
150// Decoder for registers, decode directly using RegClassID. Imm(8-bit) is
151// number of register. Used by VGPR only and AGPR only operands.
152#define DECODE_OPERAND_REG_8(RegClass) \
153 static DecodeStatus Decode##RegClass##RegisterClass( \
154 MCInst &Inst, unsigned Imm, uint64_t /*Addr*/, \
155 const MCDisassembler *Decoder) { \
156 assert(Imm < (1 << 8) && "8-bit encoding"); \
157 auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder); \
158 return addOperand( \
159 Inst, DAsm->createRegOperand(AMDGPU::RegClass##RegClassID, Imm)); \
160 }
161
162#define DECODE_SrcOp(Name, EncSize, OpWidth, EncImm) \
163 static DecodeStatus Name(MCInst &Inst, unsigned Imm, uint64_t /*Addr*/, \
164 const MCDisassembler *Decoder) { \
165 assert(Imm < (1 << EncSize) && #EncSize "-bit encoding"); \
166 auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder); \
167 return addOperand(Inst, DAsm->decodeSrcOp(OpWidth, EncImm)); \
168 }
169
170static DecodeStatus decodeSrcOp(MCInst &Inst, unsigned EncSize,
171 unsigned OpWidth, unsigned Imm, unsigned EncImm,
172 const MCDisassembler *Decoder) {
173 assert(Imm < (1U << EncSize) && "Operand doesn't fit encoding!");
174 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
175 return addOperand(Inst, DAsm->decodeSrcOp(OpWidth, EncImm));
176}
177
178// Decoder for registers. Imm(7-bit) is number of register, uses decodeSrcOp to
179// get register class. Used by SGPR only operands.
180#define DECODE_OPERAND_SREG_7(RegClass, OpWidth) \
181 DECODE_SrcOp(Decode##RegClass##RegisterClass, 7, OpWidth, Imm)
182
183#define DECODE_OPERAND_SREG_8(RegClass, OpWidth) \
184 DECODE_SrcOp(Decode##RegClass##RegisterClass, 8, OpWidth, Imm)
185
186// Decoder for registers. Imm(10-bit): Imm{7-0} is number of register,
187// Imm{9} is acc(agpr or vgpr) Imm{8} should be 0 (see VOP3Pe_SMFMAC).
188// Set Imm{8} to 1 (IS_VGPR) to decode using 'enum10' from decodeSrcOp.
189// Used by AV_ register classes (AGPR or VGPR only register operands).
190template <unsigned OpWidth>
191static DecodeStatus decodeAV10(MCInst &Inst, unsigned Imm, uint64_t /* Addr */,
192 const MCDisassembler *Decoder) {
193 return decodeSrcOp(Inst, 10, OpWidth, Imm, Imm | AMDGPU::EncValues::IS_VGPR,
194 Decoder);
195}
196
197// Decoder for Src(9-bit encoding) registers only.
198template <unsigned OpWidth>
199static DecodeStatus decodeSrcReg9(MCInst &Inst, unsigned Imm,
200 uint64_t /* Addr */,
201 const MCDisassembler *Decoder) {
202 return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, Decoder);
203}
204
205// Decoder for Src(9-bit encoding) AGPR, register number encoded in 9bits, set
206// Imm{9} to 1 (set acc) and decode using 'enum10' from decodeSrcOp, registers
207// only.
208template <unsigned OpWidth>
209static DecodeStatus decodeSrcA9(MCInst &Inst, unsigned Imm, uint64_t /* Addr */,
210 const MCDisassembler *Decoder) {
211 return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, Decoder);
212}
213
214// Decoder for 'enum10' from decodeSrcOp, Imm{0-8} is 9-bit Src encoding
215// Imm{9} is acc, registers only.
216template <unsigned OpWidth>
217static DecodeStatus decodeSrcAV10(MCInst &Inst, unsigned Imm,
218 uint64_t /* Addr */,
219 const MCDisassembler *Decoder) {
220 return decodeSrcOp(Inst, 10, OpWidth, Imm, Imm, Decoder);
221}
222
223// Decoder for RegisterOperands using 9-bit Src encoding. Operand can be
224// register from RegClass or immediate. Registers that don't belong to RegClass
225// will be decoded and InstPrinter will report warning. Immediate will be
226// decoded into constant matching the OperandType (important for floating point
227// types).
228template <unsigned OpWidth>
229static DecodeStatus decodeSrcRegOrImm9(MCInst &Inst, unsigned Imm,
230 uint64_t /* Addr */,
231 const MCDisassembler *Decoder) {
232 return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, Decoder);
233}
234
235// Decoder for Src(9-bit encoding) AGPR or immediate. Set Imm{9} to 1 (set acc)
236// and decode using 'enum10' from decodeSrcOp.
237template <unsigned OpWidth>
238static DecodeStatus decodeSrcRegOrImmA9(MCInst &Inst, unsigned Imm,
239 uint64_t /* Addr */,
240 const MCDisassembler *Decoder) {
241 return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, Decoder);
242}
243
244// Default decoders generated by tablegen: 'Decode<RegClass>RegisterClass'
245// when RegisterClass is used as an operand. Most often used for destination
246// operands.
247
249DECODE_OPERAND_REG_8(VGPR_32_Lo128)
252DECODE_OPERAND_REG_8(VReg_128)
253DECODE_OPERAND_REG_8(VReg_192)
254DECODE_OPERAND_REG_8(VReg_256)
255DECODE_OPERAND_REG_8(VReg_288)
256DECODE_OPERAND_REG_8(VReg_320)
257DECODE_OPERAND_REG_8(VReg_352)
258DECODE_OPERAND_REG_8(VReg_384)
259DECODE_OPERAND_REG_8(VReg_512)
260DECODE_OPERAND_REG_8(VReg_1024)
261
262DECODE_OPERAND_SREG_7(SReg_32, 32)
263DECODE_OPERAND_SREG_7(SReg_32_XM0, 32)
264DECODE_OPERAND_SREG_7(SReg_32_XEXEC, 32)
265DECODE_OPERAND_SREG_7(SReg_32_XM0_XEXEC, 32)
266DECODE_OPERAND_SREG_7(SReg_32_XEXEC_HI, 32)
267DECODE_OPERAND_SREG_7(SReg_64_XEXEC, 64)
268DECODE_OPERAND_SREG_7(SReg_64_XEXEC_XNULL, 64)
269DECODE_OPERAND_SREG_7(SReg_96, 96)
270DECODE_OPERAND_SREG_7(SReg_128, 128)
271DECODE_OPERAND_SREG_7(SReg_128_XNULL, 128)
272DECODE_OPERAND_SREG_7(SReg_256, 256)
273DECODE_OPERAND_SREG_7(SReg_256_XNULL, 256)
274DECODE_OPERAND_SREG_7(SReg_512, 512)
275
276DECODE_OPERAND_SREG_8(SReg_64, 64)
277
280DECODE_OPERAND_REG_8(AReg_128)
281DECODE_OPERAND_REG_8(AReg_256)
282DECODE_OPERAND_REG_8(AReg_512)
283DECODE_OPERAND_REG_8(AReg_1024)
284
286 uint64_t /*Addr*/,
287 const MCDisassembler *Decoder) {
288 assert(isUInt<10>(Imm) && "10-bit encoding expected");
289 assert((Imm & (1 << 8)) == 0 && "Imm{8} should not be used");
290
291 bool IsHi = Imm & (1 << 9);
292 unsigned RegIdx = Imm & 0xff;
293 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
294 return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
295}
296
297static DecodeStatus
299 const MCDisassembler *Decoder) {
300 assert(isUInt<8>(Imm) && "8-bit encoding expected");
301
302 bool IsHi = Imm & (1 << 7);
303 unsigned RegIdx = Imm & 0x7f;
304 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
305 return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
306}
307
308template <unsigned OpWidth>
310 uint64_t /*Addr*/,
311 const MCDisassembler *Decoder) {
312 assert(isUInt<9>(Imm) && "9-bit encoding expected");
313
314 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
315 if (Imm & AMDGPU::EncValues::IS_VGPR) {
316 bool IsHi = Imm & (1 << 7);
317 unsigned RegIdx = Imm & 0x7f;
318 return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
319 }
320 return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF));
321}
322
323template <unsigned OpWidth>
324static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm,
325 uint64_t /*Addr*/,
326 const MCDisassembler *Decoder) {
327 assert(isUInt<10>(Imm) && "10-bit encoding expected");
328
329 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
330 if (Imm & AMDGPU::EncValues::IS_VGPR) {
331 bool IsHi = Imm & (1 << 9);
332 unsigned RegIdx = Imm & 0xff;
333 return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
334 }
335 return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF));
336}
337
338static DecodeStatus decodeOperand_VGPR_16(MCInst &Inst, unsigned Imm,
339 uint64_t /*Addr*/,
340 const MCDisassembler *Decoder) {
341 assert(isUInt<10>(Imm) && "10-bit encoding expected");
342 assert(Imm & AMDGPU::EncValues::IS_VGPR && "VGPR expected");
343
344 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
345
346 bool IsHi = Imm & (1 << 9);
347 unsigned RegIdx = Imm & 0xff;
348 return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
349}
350
351static DecodeStatus decodeOperand_KImmFP(MCInst &Inst, unsigned Imm,
353 const MCDisassembler *Decoder) {
354 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
355 return addOperand(Inst, DAsm->decodeMandatoryLiteralConstant(Imm));
356}
357
360 const MCDisassembler *Decoder) {
361 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
362 return addOperand(Inst, DAsm->decodeMandatoryLiteral64Constant(Imm));
363}
364
365static DecodeStatus decodeOperandVOPDDstY(MCInst &Inst, unsigned Val,
366 uint64_t Addr, const void *Decoder) {
367 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
368 return addOperand(Inst, DAsm->decodeVOPDDstYOp(Inst, Val));
369}
370
371static bool IsAGPROperand(const MCInst &Inst, int OpIdx,
372 const MCRegisterInfo *MRI) {
373 if (OpIdx < 0)
374 return false;
375
376 const MCOperand &Op = Inst.getOperand(OpIdx);
377 if (!Op.isReg())
378 return false;
379
380 MCRegister Sub = MRI->getSubReg(Op.getReg(), AMDGPU::sub0);
381 auto Reg = Sub ? Sub : Op.getReg();
382 return Reg >= AMDGPU::AGPR0 && Reg <= AMDGPU::AGPR255;
383}
384
385static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm, unsigned Opw,
386 const MCDisassembler *Decoder) {
387 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
388 if (!DAsm->isGFX90A()) {
389 Imm &= 511;
390 } else {
391 // If atomic has both vdata and vdst their register classes are tied.
392 // The bit is decoded along with the vdst, first operand. We need to
393 // change register class to AGPR if vdst was AGPR.
394 // If a DS instruction has both data0 and data1 their register classes
395 // are also tied.
396 unsigned Opc = Inst.getOpcode();
397 uint64_t TSFlags = DAsm->getMCII()->get(Opc).TSFlags;
398 AMDGPU::OpName DataName = (TSFlags & SIInstrFlags::DS)
399 ? AMDGPU::OpName::data0
400 : AMDGPU::OpName::vdata;
401 const MCRegisterInfo *MRI = DAsm->getContext().getRegisterInfo();
402 int DataIdx = AMDGPU::getNamedOperandIdx(Opc, DataName);
403 if ((int)Inst.getNumOperands() == DataIdx) {
404 int DstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
405 if (IsAGPROperand(Inst, DstIdx, MRI))
406 Imm |= 512;
407 }
408
409 if (TSFlags & SIInstrFlags::DS) {
410 int Data2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
411 if ((int)Inst.getNumOperands() == Data2Idx &&
412 IsAGPROperand(Inst, DataIdx, MRI))
413 Imm |= 512;
414 }
415 }
416 return addOperand(Inst, DAsm->decodeSrcOp(Opw, Imm | 256));
417}
418
419template <unsigned Opw>
420static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm,
421 uint64_t /* Addr */,
422 const MCDisassembler *Decoder) {
423 return decodeAVLdSt(Inst, Imm, Opw, Decoder);
424}
425
426static DecodeStatus decodeOperand_VSrc_f64(MCInst &Inst, unsigned Imm,
428 const MCDisassembler *Decoder) {
429 assert(Imm < (1 << 9) && "9-bit encoding");
430 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
431 return addOperand(Inst, DAsm->decodeSrcOp(64, Imm));
432}
433
434#define DECODE_SDWA(DecName) \
435DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
436
437DECODE_SDWA(Src32)
438DECODE_SDWA(Src16)
439DECODE_SDWA(VopcDst)
440
441static DecodeStatus decodeVersionImm(MCInst &Inst, unsigned Imm,
442 uint64_t /* Addr */,
443 const MCDisassembler *Decoder) {
444 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
445 return addOperand(Inst, DAsm->decodeVersionImm(Imm));
446}
447
448#include "AMDGPUGenDisassemblerTables.inc"
449
450namespace {
451// Define bitwidths for various types used to instantiate the decoder.
452template <> constexpr uint32_t InsnBitWidth<uint32_t> = 32;
453template <> constexpr uint32_t InsnBitWidth<uint64_t> = 64;
454template <> constexpr uint32_t InsnBitWidth<std::bitset<96>> = 96;
455template <> constexpr uint32_t InsnBitWidth<std::bitset<128>> = 128;
456} // namespace
457
458//===----------------------------------------------------------------------===//
459//
460//===----------------------------------------------------------------------===//
461
462template <typename InsnType>
464 InsnType Inst, uint64_t Address,
465 raw_ostream &Comments) const {
466 assert(MI.getOpcode() == 0);
467 assert(MI.getNumOperands() == 0);
468 MCInst TmpInst;
469 HasLiteral = false;
470 const auto SavedBytes = Bytes;
471
472 SmallString<64> LocalComments;
473 raw_svector_ostream LocalCommentStream(LocalComments);
474 CommentStream = &LocalCommentStream;
475
476 DecodeStatus Res =
477 decodeInstruction(Table, TmpInst, Inst, Address, this, STI);
478
479 CommentStream = nullptr;
480
481 if (Res != MCDisassembler::Fail) {
482 MI = TmpInst;
483 Comments << LocalComments;
485 }
486 Bytes = SavedBytes;
488}
489
490template <typename InsnType>
493 MCInst &MI, InsnType Inst, uint64_t Address,
494 raw_ostream &Comments) const {
495 for (const uint8_t *T : {Table1, Table2}) {
496 if (DecodeStatus Res = tryDecodeInst(T, MI, Inst, Address, Comments))
497 return Res;
498 }
500}
501
502template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
503 assert(Bytes.size() >= sizeof(T));
504 const auto Res =
505 support::endian::read<T, llvm::endianness::little>(Bytes.data());
506 Bytes = Bytes.slice(sizeof(T));
507 return Res;
508}
509
510static inline std::bitset<96> eat12Bytes(ArrayRef<uint8_t> &Bytes) {
511 using namespace llvm::support::endian;
512 assert(Bytes.size() >= 12);
513 std::bitset<96> Lo(read<uint64_t, endianness::little>(Bytes.data()));
514 Bytes = Bytes.slice(8);
515 std::bitset<96> Hi(read<uint32_t, endianness::little>(Bytes.data()));
516 Bytes = Bytes.slice(4);
517 return (Hi << 64) | Lo;
518}
519
520static inline std::bitset<128> eat16Bytes(ArrayRef<uint8_t> &Bytes) {
521 using namespace llvm::support::endian;
522 assert(Bytes.size() >= 16);
523 std::bitset<128> Lo(read<uint64_t, endianness::little>(Bytes.data()));
524 Bytes = Bytes.slice(8);
525 std::bitset<128> Hi(read<uint64_t, endianness::little>(Bytes.data()));
526 Bytes = Bytes.slice(8);
527 return (Hi << 64) | Lo;
528}
529
530void AMDGPUDisassembler::decodeImmOperands(MCInst &MI,
531 const MCInstrInfo &MCII) const {
532 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
533 for (auto [OpNo, OpDesc] : enumerate(Desc.operands())) {
534 if (OpNo >= MI.getNumOperands())
535 continue;
536
537 // TODO: Fix V_DUAL_FMAMK_F32_X_FMAAK_F32_gfx12 vsrc operands,
538 // defined to take VGPR_32, but in reality allowing inline constants.
539 bool IsSrc = AMDGPU::OPERAND_SRC_FIRST <= OpDesc.OperandType &&
540 OpDesc.OperandType <= AMDGPU::OPERAND_SRC_LAST;
541 if (!IsSrc && OpDesc.OperandType != MCOI::OPERAND_REGISTER)
542 continue;
543
544 MCOperand &Op = MI.getOperand(OpNo);
545 if (!Op.isImm())
546 continue;
547 int64_t Imm = Op.getImm();
550 Op = decodeIntImmed(Imm);
551 continue;
552 }
553
555 Op = decodeLiteralConstant(OpDesc.OperandType ==
557 continue;
558 }
559
562 switch (OpDesc.OperandType) {
568 break;
575 Imm = getInlineImmValF16(Imm);
576 break;
582 Imm = getInlineImmVal64(Imm);
583 break;
584 default:
585 Imm = getInlineImmVal32(Imm);
586 }
587 Op.setImm(Imm);
588 }
589 }
590}
591
593 ArrayRef<uint8_t> Bytes_,
595 raw_ostream &CS) const {
596 unsigned MaxInstBytesNum = std::min((size_t)TargetMaxInstBytes, Bytes_.size());
597 Bytes = Bytes_.slice(0, MaxInstBytesNum);
598
599 // In case the opcode is not recognized we'll assume a Size of 4 bytes (unless
600 // there are fewer bytes left). This will be overridden on success.
601 Size = std::min((size_t)4, Bytes_.size());
602
603 do {
604 // ToDo: better to switch encoding length using some bit predicate
605 // but it is unknown yet, so try all we can
606
607 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
608 // encodings
609 if (isGFX1250() && Bytes.size() >= 16) {
610 std::bitset<128> DecW = eat16Bytes(Bytes);
611 if (tryDecodeInst(DecoderTableGFX1250128, MI, DecW, Address, CS))
612 break;
613 Bytes = Bytes_.slice(0, MaxInstBytesNum);
614 }
615
616 if (isGFX11Plus() && Bytes.size() >= 12) {
617 std::bitset<96> DecW = eat12Bytes(Bytes);
618
619 if (isGFX11() &&
620 tryDecodeInst(DecoderTableGFX1196, DecoderTableGFX11_FAKE1696, MI,
621 DecW, Address, CS))
622 break;
623
624 if (isGFX1250() &&
625 tryDecodeInst(DecoderTableGFX125096, DecoderTableGFX1250_FAKE1696, MI,
626 DecW, Address, CS))
627 break;
628
629 if (isGFX12() &&
630 tryDecodeInst(DecoderTableGFX1296, DecoderTableGFX12_FAKE1696, MI,
631 DecW, Address, CS))
632 break;
633
634 if (isGFX12() &&
635 tryDecodeInst(DecoderTableGFX12W6496, MI, DecW, Address, CS))
636 break;
637
638 if (STI.hasFeature(AMDGPU::Feature64BitLiterals)) {
639 // Return 8 bytes for a potential literal.
640 Bytes = Bytes_.slice(4, MaxInstBytesNum - 4);
641
642 if (isGFX1250() &&
643 tryDecodeInst(DecoderTableGFX125096, MI, DecW, Address, CS))
644 break;
645 }
646
647 // Reinitialize Bytes
648 Bytes = Bytes_.slice(0, MaxInstBytesNum);
649
650 } else if (Bytes.size() >= 16 &&
651 STI.hasFeature(AMDGPU::FeatureGFX950Insts)) {
652 std::bitset<128> DecW = eat16Bytes(Bytes);
653 if (tryDecodeInst(DecoderTableGFX940128, MI, DecW, Address, CS))
654 break;
655
656 // Reinitialize Bytes
657 Bytes = Bytes_.slice(0, MaxInstBytesNum);
658 }
659
660 if (Bytes.size() >= 8) {
661 const uint64_t QW = eatBytes<uint64_t>(Bytes);
662
663 if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding) &&
664 tryDecodeInst(DecoderTableGFX10_B64, MI, QW, Address, CS))
665 break;
666
667 if (STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) &&
668 tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address, CS))
669 break;
670
671 if (STI.hasFeature(AMDGPU::FeatureGFX950Insts) &&
672 tryDecodeInst(DecoderTableGFX95064, MI, QW, Address, CS))
673 break;
674
675 // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
676 // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
677 // table first so we print the correct name.
678 if (STI.hasFeature(AMDGPU::FeatureFmaMixInsts) &&
679 tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address, CS))
680 break;
681
682 if (STI.hasFeature(AMDGPU::FeatureGFX940Insts) &&
683 tryDecodeInst(DecoderTableGFX94064, MI, QW, Address, CS))
684 break;
685
686 if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts) &&
687 tryDecodeInst(DecoderTableGFX90A64, MI, QW, Address, CS))
688 break;
689
690 if ((isVI() || isGFX9()) &&
691 tryDecodeInst(DecoderTableGFX864, MI, QW, Address, CS))
692 break;
693
694 if (isGFX9() && tryDecodeInst(DecoderTableGFX964, MI, QW, Address, CS))
695 break;
696
697 if (isGFX10() && tryDecodeInst(DecoderTableGFX1064, MI, QW, Address, CS))
698 break;
699
700 if (isGFX1250() &&
701 tryDecodeInst(DecoderTableGFX125064, DecoderTableGFX1250_FAKE1664, MI,
702 QW, Address, CS))
703 break;
704
705 if (isGFX12() &&
706 tryDecodeInst(DecoderTableGFX1264, DecoderTableGFX12_FAKE1664, MI, QW,
707 Address, CS))
708 break;
709
710 if (isGFX11() &&
711 tryDecodeInst(DecoderTableGFX1164, DecoderTableGFX11_FAKE1664, MI, QW,
712 Address, CS))
713 break;
714
715 if (isGFX11() &&
716 tryDecodeInst(DecoderTableGFX11W6464, MI, QW, Address, CS))
717 break;
718
719 if (isGFX12() &&
720 tryDecodeInst(DecoderTableGFX12W6464, MI, QW, Address, CS))
721 break;
722
723 // Reinitialize Bytes
724 Bytes = Bytes_.slice(0, MaxInstBytesNum);
725 }
726
727 // Try decode 32-bit instruction
728 if (Bytes.size() >= 4) {
729 const uint32_t DW = eatBytes<uint32_t>(Bytes);
730
731 if ((isVI() || isGFX9()) &&
732 tryDecodeInst(DecoderTableGFX832, MI, DW, Address, CS))
733 break;
734
735 if (tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address, CS))
736 break;
737
738 if (isGFX9() && tryDecodeInst(DecoderTableGFX932, MI, DW, Address, CS))
739 break;
740
741 if (STI.hasFeature(AMDGPU::FeatureGFX950Insts) &&
742 tryDecodeInst(DecoderTableGFX95032, MI, DW, Address, CS))
743 break;
744
745 if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts) &&
746 tryDecodeInst(DecoderTableGFX90A32, MI, DW, Address, CS))
747 break;
748
749 if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding) &&
750 tryDecodeInst(DecoderTableGFX10_B32, MI, DW, Address, CS))
751 break;
752
753 if (isGFX10() && tryDecodeInst(DecoderTableGFX1032, MI, DW, Address, CS))
754 break;
755
756 if (isGFX11() &&
757 tryDecodeInst(DecoderTableGFX1132, DecoderTableGFX11_FAKE1632, MI, DW,
758 Address, CS))
759 break;
760
761 if (isGFX1250() &&
762 tryDecodeInst(DecoderTableGFX125032, DecoderTableGFX1250_FAKE1632, MI,
763 DW, Address, CS))
764 break;
765
766 if (isGFX12() &&
767 tryDecodeInst(DecoderTableGFX1232, DecoderTableGFX12_FAKE1632, MI, DW,
768 Address, CS))
769 break;
770 }
771
773 } while (false);
774
776
777 decodeImmOperands(MI, *MCII);
778
779 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::DPP) {
780 if (isMacDPP(MI))
782
783 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3P)
785 else if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOPC)
786 convertVOPCDPPInst(MI); // Special VOP3 case
787 else if (AMDGPU::isVOPC64DPP(MI.getOpcode()))
788 convertVOPC64DPPInst(MI); // Special VOP3 case
789 else if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dpp8) !=
790 -1)
792 else if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3)
793 convertVOP3DPPInst(MI); // Regular VOP3 case
794 }
795
797
798 if (AMDGPU::isMAC(MI.getOpcode())) {
799 // Insert dummy unused src2_modifiers.
801 AMDGPU::OpName::src2_modifiers);
802 }
803
804 if (MI.getOpcode() == AMDGPU::V_CVT_SR_BF8_F32_e64_dpp ||
805 MI.getOpcode() == AMDGPU::V_CVT_SR_FP8_F32_e64_dpp) {
806 // Insert dummy unused src2_modifiers.
808 AMDGPU::OpName::src2_modifiers);
809 }
810
811 if ((MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::DS) &&
813 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::gds);
814 }
815
816 if (MCII->get(MI.getOpcode()).TSFlags &
818 int CPolPos = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
819 AMDGPU::OpName::cpol);
820 if (CPolPos != -1) {
821 unsigned CPol =
822 (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsAtomicRet) ?
824 if (MI.getNumOperands() <= (unsigned)CPolPos) {
826 AMDGPU::OpName::cpol);
827 } else if (CPol) {
828 MI.getOperand(CPolPos).setImm(MI.getOperand(CPolPos).getImm() | CPol);
829 }
830 }
831 }
832
833 if ((MCII->get(MI.getOpcode()).TSFlags &
835 (STI.hasFeature(AMDGPU::FeatureGFX90AInsts))) {
836 // GFX90A lost TFE, its place is occupied by ACC.
837 int TFEOpIdx =
838 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
839 if (TFEOpIdx != -1) {
840 auto *TFEIter = MI.begin();
841 std::advance(TFEIter, TFEOpIdx);
842 MI.insert(TFEIter, MCOperand::createImm(0));
843 }
844 }
845
846 if (MCII->get(MI.getOpcode()).TSFlags &
848 int SWZOpIdx =
849 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::swz);
850 if (SWZOpIdx != -1) {
851 auto *SWZIter = MI.begin();
852 std::advance(SWZIter, SWZOpIdx);
853 MI.insert(SWZIter, MCOperand::createImm(0));
854 }
855 }
856
857 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG) {
858 int VAddr0Idx =
859 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
860 int RsrcIdx =
861 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
862 unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1;
863 if (VAddr0Idx >= 0 && NSAArgs > 0) {
864 unsigned NSAWords = (NSAArgs + 3) / 4;
865 if (Bytes.size() < 4 * NSAWords)
867 for (unsigned i = 0; i < NSAArgs; ++i) {
868 const unsigned VAddrIdx = VAddr0Idx + 1 + i;
869 auto VAddrRCID =
870 MCII->get(MI.getOpcode()).operands()[VAddrIdx].RegClass;
871 MI.insert(MI.begin() + VAddrIdx, createRegOperand(VAddrRCID, Bytes[i]));
872 }
873 Bytes = Bytes.slice(4 * NSAWords);
874 }
875
877 }
878
879 if (MCII->get(MI.getOpcode()).TSFlags &
882
883 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::EXP)
885
886 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VINTERP)
888
889 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SDWA)
891
892 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsMAI)
894
895 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsWMMA)
897
898 int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
899 AMDGPU::OpName::vdst_in);
900 if (VDstIn_Idx != -1) {
901 int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx,
903 if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx ||
904 !MI.getOperand(VDstIn_Idx).isReg() ||
905 MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) {
906 if (MI.getNumOperands() > (unsigned)VDstIn_Idx)
907 MI.erase(&MI.getOperand(VDstIn_Idx));
909 MCOperand::createReg(MI.getOperand(Tied).getReg()),
910 AMDGPU::OpName::vdst_in);
911 }
912 }
913
914 bool IsSOPK = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SOPK;
915 if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::imm) && !IsSOPK)
917
918 // Some VOPC instructions, e.g., v_cmpx_f_f64, use VOP3 encoding and
919 // have EXEC as implicit destination. Issue a warning if encoding for
920 // vdst is not EXEC.
921 if ((MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3) &&
922 MCII->get(MI.getOpcode()).hasImplicitDefOfPhysReg(AMDGPU::EXEC)) {
923 auto ExecEncoding = MRI.getEncodingValue(AMDGPU::EXEC_LO);
924 if (Bytes_[0] != ExecEncoding)
926 }
927
928 Size = MaxInstBytesNum - Bytes.size();
929 return Status;
930}
931
933 if (STI.hasFeature(AMDGPU::FeatureGFX11Insts)) {
934 // The MCInst still has these fields even though they are no longer encoded
935 // in the GFX11 instruction.
936 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::vm);
937 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::compr);
938 }
939}
940
943 if (MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_t16_gfx11 ||
944 MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_fake16_gfx11 ||
945 MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_t16_gfx12 ||
946 MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_fake16_gfx12 ||
947 MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_t16_gfx11 ||
948 MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_fake16_gfx11 ||
949 MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_t16_gfx12 ||
950 MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_fake16_gfx12 ||
951 MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_t16_gfx11 ||
952 MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_fake16_gfx11 ||
953 MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_t16_gfx12 ||
954 MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_fake16_gfx12 ||
955 MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_t16_gfx11 ||
956 MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_fake16_gfx11 ||
957 MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_t16_gfx12 ||
958 MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_fake16_gfx12) {
959 // The MCInst has this field that is not directly encoded in the
960 // instruction.
961 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::op_sel);
962 }
963}
964
966 if (STI.hasFeature(AMDGPU::FeatureGFX9) ||
967 STI.hasFeature(AMDGPU::FeatureGFX10)) {
968 if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::sdst))
969 // VOPC - insert clamp
970 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
971 } else if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands)) {
972 int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
973 if (SDst != -1) {
974 // VOPC - insert VCC register as sdst
976 AMDGPU::OpName::sdst);
977 } else {
978 // VOP1/2 - insert omod if present in instruction
979 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
980 }
981 }
982}
983
984/// Adjust the register values used by V_MFMA_F8F6F4_f8_f8 instructions to the
985/// appropriate subregister for the used format width.
987 MCOperand &MO, uint8_t NumRegs) {
988 switch (NumRegs) {
989 case 4:
990 return MO.setReg(MRI.getSubReg(MO.getReg(), AMDGPU::sub0_sub1_sub2_sub3));
991 case 6:
992 return MO.setReg(
993 MRI.getSubReg(MO.getReg(), AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5));
994 case 8:
995 if (MCRegister NewReg = MRI.getSubReg(
996 MO.getReg(), AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7)) {
997 MO.setReg(NewReg);
998 }
999 return;
1000 case 12: {
1001 // There is no 384-bit subreg index defined.
1002 MCRegister BaseReg = MRI.getSubReg(MO.getReg(), AMDGPU::sub0);
1003 MCRegister NewReg = MRI.getMatchingSuperReg(
1004 BaseReg, AMDGPU::sub0, &MRI.getRegClass(AMDGPU::VReg_384RegClassID));
1005 return MO.setReg(NewReg);
1006 }
1007 case 16:
1008 // No-op in cases where one operand is still f8/bf8.
1009 return;
1010 default:
1011 llvm_unreachable("Unexpected size for mfma/wmma f8f6f4 operand");
1012 }
1013}
1014
1015/// f8f6f4 instructions have different pseudos depending on the used formats. In
1016/// the disassembler table, we only have the variants with the largest register
1017/// classes which assume using an fp8/bf8 format for both operands. The actual
1018/// register class depends on the format in blgp and cbsz operands. Adjust the
1019/// register classes depending on the used format.
1021 int BlgpIdx =
1022 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::blgp);
1023 if (BlgpIdx == -1)
1024 return;
1025
1026 int CbszIdx =
1027 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::cbsz);
1028
1029 unsigned CBSZ = MI.getOperand(CbszIdx).getImm();
1030 unsigned BLGP = MI.getOperand(BlgpIdx).getImm();
1031
1032 const AMDGPU::MFMA_F8F6F4_Info *AdjustedRegClassOpcode =
1033 AMDGPU::getMFMA_F8F6F4_WithFormatArgs(CBSZ, BLGP, MI.getOpcode());
1034 if (!AdjustedRegClassOpcode ||
1035 AdjustedRegClassOpcode->Opcode == MI.getOpcode())
1036 return;
1037
1038 MI.setOpcode(AdjustedRegClassOpcode->Opcode);
1039 int Src0Idx =
1040 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
1041 int Src1Idx =
1042 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
1043 adjustMFMA_F8F6F4OpRegClass(MRI, MI.getOperand(Src0Idx),
1044 AdjustedRegClassOpcode->NumRegsSrcA);
1045 adjustMFMA_F8F6F4OpRegClass(MRI, MI.getOperand(Src1Idx),
1046 AdjustedRegClassOpcode->NumRegsSrcB);
1047}
1048
1050 int FmtAIdx =
1051 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::matrix_a_fmt);
1052 if (FmtAIdx == -1)
1053 return;
1054
1055 int FmtBIdx =
1056 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::matrix_b_fmt);
1057
1058 unsigned FmtA = MI.getOperand(FmtAIdx).getImm();
1059 unsigned FmtB = MI.getOperand(FmtBIdx).getImm();
1060
1061 const AMDGPU::MFMA_F8F6F4_Info *AdjustedRegClassOpcode =
1062 AMDGPU::getWMMA_F8F6F4_WithFormatArgs(FmtA, FmtB, MI.getOpcode());
1063 if (!AdjustedRegClassOpcode ||
1064 AdjustedRegClassOpcode->Opcode == MI.getOpcode())
1065 return;
1066
1067 MI.setOpcode(AdjustedRegClassOpcode->Opcode);
1068 int Src0Idx =
1069 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
1070 int Src1Idx =
1071 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
1072 adjustMFMA_F8F6F4OpRegClass(MRI, MI.getOperand(Src0Idx),
1073 AdjustedRegClassOpcode->NumRegsSrcA);
1074 adjustMFMA_F8F6F4OpRegClass(MRI, MI.getOperand(Src1Idx),
1075 AdjustedRegClassOpcode->NumRegsSrcB);
1076}
1077
1079 unsigned OpSel = 0;
1080 unsigned OpSelHi = 0;
1081 unsigned NegLo = 0;
1082 unsigned NegHi = 0;
1083};
1084
1085// Reconstruct values of VOP3/VOP3P operands such as op_sel.
1086// Note that these values do not affect disassembler output,
1087// so this is only necessary for consistency with src_modifiers.
1089 bool IsVOP3P = false) {
1090 VOPModifiers Modifiers;
1091 unsigned Opc = MI.getOpcode();
1092 const AMDGPU::OpName ModOps[] = {AMDGPU::OpName::src0_modifiers,
1093 AMDGPU::OpName::src1_modifiers,
1094 AMDGPU::OpName::src2_modifiers};
1095 for (int J = 0; J < 3; ++J) {
1096 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
1097 if (OpIdx == -1)
1098 continue;
1099
1100 unsigned Val = MI.getOperand(OpIdx).getImm();
1101
1102 Modifiers.OpSel |= !!(Val & SISrcMods::OP_SEL_0) << J;
1103 if (IsVOP3P) {
1104 Modifiers.OpSelHi |= !!(Val & SISrcMods::OP_SEL_1) << J;
1105 Modifiers.NegLo |= !!(Val & SISrcMods::NEG) << J;
1106 Modifiers.NegHi |= !!(Val & SISrcMods::NEG_HI) << J;
1107 } else if (J == 0) {
1108 Modifiers.OpSel |= !!(Val & SISrcMods::DST_OP_SEL) << 3;
1109 }
1110 }
1111
1112 return Modifiers;
1113}
1114
1115// Instructions decode the op_sel/suffix bits into the src_modifier
1116// operands. Copy those bits into the src operands for true16 VGPRs.
1118 const unsigned Opc = MI.getOpcode();
1119 const MCRegisterClass &ConversionRC =
1120 MRI.getRegClass(AMDGPU::VGPR_16RegClassID);
1121 constexpr std::array<std::tuple<AMDGPU::OpName, AMDGPU::OpName, unsigned>, 4>
1122 OpAndOpMods = {{{AMDGPU::OpName::src0, AMDGPU::OpName::src0_modifiers,
1124 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_modifiers,
1126 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_modifiers,
1128 {AMDGPU::OpName::vdst, AMDGPU::OpName::src0_modifiers,
1130 for (const auto &[OpName, OpModsName, OpSelMask] : OpAndOpMods) {
1131 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, OpName);
1132 int OpModsIdx = AMDGPU::getNamedOperandIdx(Opc, OpModsName);
1133 if (OpIdx == -1 || OpModsIdx == -1)
1134 continue;
1135 MCOperand &Op = MI.getOperand(OpIdx);
1136 if (!Op.isReg())
1137 continue;
1138 if (!ConversionRC.contains(Op.getReg()))
1139 continue;
1140 unsigned OpEnc = MRI.getEncodingValue(Op.getReg());
1141 const MCOperand &OpMods = MI.getOperand(OpModsIdx);
1142 unsigned ModVal = OpMods.getImm();
1143 if (ModVal & OpSelMask) { // isHi
1144 unsigned RegIdx = OpEnc & AMDGPU::HWEncoding::REG_IDX_MASK;
1145 Op.setReg(ConversionRC.getRegister(RegIdx * 2 + 1));
1146 }
1147 }
1148}
1149
1150// MAC opcodes have special old and src2 operands.
1151// src2 is tied to dst, while old is not tied (but assumed to be).
1153 constexpr int DST_IDX = 0;
1154 auto Opcode = MI.getOpcode();
1155 const auto &Desc = MCII->get(Opcode);
1156 auto OldIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::old);
1157
1158 if (OldIdx != -1 && Desc.getOperandConstraint(
1159 OldIdx, MCOI::OperandConstraint::TIED_TO) == -1) {
1160 assert(AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src2));
1161 assert(Desc.getOperandConstraint(
1162 AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2),
1164 (void)DST_IDX;
1165 return true;
1166 }
1167
1168 return false;
1169}
1170
1171// Create dummy old operand and insert dummy unused src2_modifiers
1173 assert(MI.getNumOperands() + 1 < MCII->get(MI.getOpcode()).getNumOperands());
1174 insertNamedMCOperand(MI, MCOperand::createReg(0), AMDGPU::OpName::old);
1176 AMDGPU::OpName::src2_modifiers);
1177}
1178
1180 unsigned Opc = MI.getOpcode();
1181
1182 int VDstInIdx =
1183 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst_in);
1184 if (VDstInIdx != -1)
1185 insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::vdst_in);
1186
1187 unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1188 if (MI.getNumOperands() < DescNumOps &&
1189 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
1191 auto Mods = collectVOPModifiers(MI);
1193 AMDGPU::OpName::op_sel);
1194 } else {
1195 // Insert dummy unused src modifiers.
1196 if (MI.getNumOperands() < DescNumOps &&
1197 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers))
1199 AMDGPU::OpName::src0_modifiers);
1200
1201 if (MI.getNumOperands() < DescNumOps &&
1202 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers))
1204 AMDGPU::OpName::src1_modifiers);
1205 }
1206}
1207
1210
1211 int VDstInIdx =
1212 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst_in);
1213 if (VDstInIdx != -1)
1214 insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::vdst_in);
1215
1216 unsigned Opc = MI.getOpcode();
1217 unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1218 if (MI.getNumOperands() < DescNumOps &&
1219 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
1220 auto Mods = collectVOPModifiers(MI);
1222 AMDGPU::OpName::op_sel);
1223 }
1224}
1225
1226// Note that before gfx10, the MIMG encoding provided no information about
1227// VADDR size. Consequently, decoded instructions always show address as if it
1228// has 1 dword, which could be not really so.
1230 auto TSFlags = MCII->get(MI.getOpcode()).TSFlags;
1231
1232 int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1233 AMDGPU::OpName::vdst);
1234
1235 int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1236 AMDGPU::OpName::vdata);
1237 int VAddr0Idx =
1238 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
1239 AMDGPU::OpName RsrcOpName = (TSFlags & SIInstrFlags::MIMG)
1240 ? AMDGPU::OpName::srsrc
1241 : AMDGPU::OpName::rsrc;
1242 int RsrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), RsrcOpName);
1243 int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1244 AMDGPU::OpName::dmask);
1245
1246 int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1247 AMDGPU::OpName::tfe);
1248 int D16Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1249 AMDGPU::OpName::d16);
1250
1251 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
1252 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1254
1255 assert(VDataIdx != -1);
1256 if (BaseOpcode->BVH) {
1257 // Add A16 operand for intersect_ray instructions
1258 addOperand(MI, MCOperand::createImm(BaseOpcode->A16));
1259 return;
1260 }
1261
1262 bool IsAtomic = (VDstIdx != -1);
1263 bool IsGather4 = TSFlags & SIInstrFlags::Gather4;
1264 bool IsVSample = TSFlags & SIInstrFlags::VSAMPLE;
1265 bool IsNSA = false;
1266 bool IsPartialNSA = false;
1267 unsigned AddrSize = Info->VAddrDwords;
1268
1269 if (isGFX10Plus()) {
1270 unsigned DimIdx =
1271 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dim);
1272 int A16Idx =
1273 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::a16);
1274 const AMDGPU::MIMGDimInfo *Dim =
1275 AMDGPU::getMIMGDimInfoByEncoding(MI.getOperand(DimIdx).getImm());
1276 const bool IsA16 = (A16Idx != -1 && MI.getOperand(A16Idx).getImm());
1277
1278 AddrSize =
1279 AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, AMDGPU::hasG16(STI));
1280
1281 // VSAMPLE insts that do not use vaddr3 behave the same as NSA forms.
1282 // VIMAGE insts other than BVH never use vaddr4.
1283 IsNSA = Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA ||
1284 Info->MIMGEncoding == AMDGPU::MIMGEncGfx11NSA ||
1285 Info->MIMGEncoding == AMDGPU::MIMGEncGfx12;
1286 if (!IsNSA) {
1287 if (!IsVSample && AddrSize > 12)
1288 AddrSize = 16;
1289 } else {
1290 if (AddrSize > Info->VAddrDwords) {
1291 if (!STI.hasFeature(AMDGPU::FeaturePartialNSAEncoding)) {
1292 // The NSA encoding does not contain enough operands for the
1293 // combination of base opcode / dimension. Should this be an error?
1294 return;
1295 }
1296 IsPartialNSA = true;
1297 }
1298 }
1299 }
1300
1301 unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
1302 unsigned DstSize = IsGather4 ? 4 : std::max(llvm::popcount(DMask), 1);
1303
1304 bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm();
1305 if (D16 && AMDGPU::hasPackedD16(STI)) {
1306 DstSize = (DstSize + 1) / 2;
1307 }
1308
1309 if (TFEIdx != -1 && MI.getOperand(TFEIdx).getImm())
1310 DstSize += 1;
1311
1312 if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords)
1313 return;
1314
1315 int NewOpcode =
1316 AMDGPU::getMIMGOpcode(Info->BaseOpcode, Info->MIMGEncoding, DstSize, AddrSize);
1317 if (NewOpcode == -1)
1318 return;
1319
1320 // Widen the register to the correct number of enabled channels.
1321 MCRegister NewVdata;
1322 if (DstSize != Info->VDataDwords) {
1323 auto DataRCID = MCII->get(NewOpcode).operands()[VDataIdx].RegClass;
1324
1325 // Get first subregister of VData
1326 MCRegister Vdata0 = MI.getOperand(VDataIdx).getReg();
1327 MCRegister VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
1328 Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
1329
1330 NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
1331 &MRI.getRegClass(DataRCID));
1332 if (!NewVdata) {
1333 // It's possible to encode this such that the low register + enabled
1334 // components exceeds the register count.
1335 return;
1336 }
1337 }
1338
1339 // If not using NSA on GFX10+, widen vaddr0 address register to correct size.
1340 // If using partial NSA on GFX11+ widen last address register.
1341 int VAddrSAIdx = IsPartialNSA ? (RsrcIdx - 1) : VAddr0Idx;
1342 MCRegister NewVAddrSA;
1343 if (STI.hasFeature(AMDGPU::FeatureNSAEncoding) && (!IsNSA || IsPartialNSA) &&
1344 AddrSize != Info->VAddrDwords) {
1345 MCRegister VAddrSA = MI.getOperand(VAddrSAIdx).getReg();
1346 MCRegister VAddrSubSA = MRI.getSubReg(VAddrSA, AMDGPU::sub0);
1347 VAddrSA = VAddrSubSA ? VAddrSubSA : VAddrSA;
1348
1349 auto AddrRCID = MCII->get(NewOpcode).operands()[VAddrSAIdx].RegClass;
1350 NewVAddrSA = MRI.getMatchingSuperReg(VAddrSA, AMDGPU::sub0,
1351 &MRI.getRegClass(AddrRCID));
1352 if (!NewVAddrSA)
1353 return;
1354 }
1355
1356 MI.setOpcode(NewOpcode);
1357
1358 if (NewVdata != AMDGPU::NoRegister) {
1359 MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
1360
1361 if (IsAtomic) {
1362 // Atomic operations have an additional operand (a copy of data)
1363 MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
1364 }
1365 }
1366
1367 if (NewVAddrSA) {
1368 MI.getOperand(VAddrSAIdx) = MCOperand::createReg(NewVAddrSA);
1369 } else if (IsNSA) {
1370 assert(AddrSize <= Info->VAddrDwords);
1371 MI.erase(MI.begin() + VAddr0Idx + AddrSize,
1372 MI.begin() + VAddr0Idx + Info->VAddrDwords);
1373 }
1374}
1375
1376// Opsel and neg bits are used in src_modifiers and standalone operands. Autogen
1377// decoder only adds to src_modifiers, so manually add the bits to the other
1378// operands.
1380 unsigned Opc = MI.getOpcode();
1381 unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1382 auto Mods = collectVOPModifiers(MI, true);
1383
1384 if (MI.getNumOperands() < DescNumOps &&
1385 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vdst_in))
1386 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::vdst_in);
1387
1388 if (MI.getNumOperands() < DescNumOps &&
1389 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel))
1391 AMDGPU::OpName::op_sel);
1392 if (MI.getNumOperands() < DescNumOps &&
1393 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel_hi))
1395 AMDGPU::OpName::op_sel_hi);
1396 if (MI.getNumOperands() < DescNumOps &&
1397 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::neg_lo))
1399 AMDGPU::OpName::neg_lo);
1400 if (MI.getNumOperands() < DescNumOps &&
1401 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::neg_hi))
1403 AMDGPU::OpName::neg_hi);
1404}
1405
1406// Create dummy old operand and insert optional operands
1408 unsigned Opc = MI.getOpcode();
1409 unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1410
1411 if (MI.getNumOperands() < DescNumOps &&
1412 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::old))
1413 insertNamedMCOperand(MI, MCOperand::createReg(0), AMDGPU::OpName::old);
1414
1415 if (MI.getNumOperands() < DescNumOps &&
1416 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers))
1418 AMDGPU::OpName::src0_modifiers);
1419
1420 if (MI.getNumOperands() < DescNumOps &&
1421 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers))
1423 AMDGPU::OpName::src1_modifiers);
1424}
1425
1427 unsigned Opc = MI.getOpcode();
1428 unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1429
1431
1432 if (MI.getNumOperands() < DescNumOps &&
1433 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
1436 AMDGPU::OpName::op_sel);
1437 }
1438}
1439
1441 assert(HasLiteral && "Should have decoded a literal");
1442 insertNamedMCOperand(MI, MCOperand::createImm(Literal), AMDGPU::OpName::immX);
1443}
1444
1445const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
1446 return getContext().getRegisterInfo()->
1447 getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
1448}
1449
1450inline
1452 const Twine& ErrMsg) const {
1453 *CommentStream << "Error: " + ErrMsg;
1454
1455 // ToDo: add support for error operands to MCInst.h
1456 // return MCOperand::createError(V);
1457 return MCOperand();
1458}
1459
1460inline
1463}
1464
1465inline
1467 unsigned Val) const {
1468 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
1469 if (Val >= RegCl.getNumRegs())
1470 return errOperand(Val, Twine(getRegClassName(RegClassID)) +
1471 ": unknown register " + Twine(Val));
1472 return createRegOperand(RegCl.getRegister(Val));
1473}
1474
1475inline
1477 unsigned Val) const {
1478 // ToDo: SI/CI have 104 SGPRs, VI - 102
1479 // Valery: here we accepting as much as we can, let assembler sort it out
1480 int shift = 0;
1481 switch (SRegClassID) {
1482 case AMDGPU::SGPR_32RegClassID:
1483 case AMDGPU::TTMP_32RegClassID:
1484 break;
1485 case AMDGPU::SGPR_64RegClassID:
1486 case AMDGPU::TTMP_64RegClassID:
1487 shift = 1;
1488 break;
1489 case AMDGPU::SGPR_96RegClassID:
1490 case AMDGPU::TTMP_96RegClassID:
1491 case AMDGPU::SGPR_128RegClassID:
1492 case AMDGPU::TTMP_128RegClassID:
1493 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
1494 // this bundle?
1495 case AMDGPU::SGPR_256RegClassID:
1496 case AMDGPU::TTMP_256RegClassID:
1497 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
1498 // this bundle?
1499 case AMDGPU::SGPR_288RegClassID:
1500 case AMDGPU::TTMP_288RegClassID:
1501 case AMDGPU::SGPR_320RegClassID:
1502 case AMDGPU::TTMP_320RegClassID:
1503 case AMDGPU::SGPR_352RegClassID:
1504 case AMDGPU::TTMP_352RegClassID:
1505 case AMDGPU::SGPR_384RegClassID:
1506 case AMDGPU::TTMP_384RegClassID:
1507 case AMDGPU::SGPR_512RegClassID:
1508 case AMDGPU::TTMP_512RegClassID:
1509 shift = 2;
1510 break;
1511 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
1512 // this bundle?
1513 default:
1514 llvm_unreachable("unhandled register class");
1515 }
1516
1517 if (Val % (1 << shift)) {
1518 *CommentStream << "Warning: " << getRegClassName(SRegClassID)
1519 << ": scalar reg isn't aligned " << Val;
1520 }
1521
1522 return createRegOperand(SRegClassID, Val >> shift);
1523}
1524
1526 bool IsHi) const {
1527 unsigned RegIdxInVGPR16 = RegIdx * 2 + (IsHi ? 1 : 0);
1528 return createRegOperand(AMDGPU::VGPR_16RegClassID, RegIdxInVGPR16);
1529}
1530
1531// Decode Literals for insts which always have a literal in the encoding
1534 if (HasLiteral) {
1535 assert(
1537 "Should only decode multiple kimm with VOPD, check VSrc operand types");
1538 if (Literal != Val)
1539 return errOperand(Val, "More than one unique literal is illegal");
1540 }
1541 HasLiteral = true;
1542 Literal = Val;
1543 return MCOperand::createImm(Literal);
1544}
1545
1548 if (HasLiteral) {
1549 if (Literal64 != Val)
1550 return errOperand(Val, "More than one unique literal is illegal");
1551 }
1552 HasLiteral = true;
1553 Literal = Literal64 = Val;
1554 return MCOperand::createImm(Literal64);
1555}
1556
1558 // For now all literal constants are supposed to be unsigned integer
1559 // ToDo: deal with signed/unsigned 64-bit integer constants
1560 // ToDo: deal with float/double constants
1561 if (!HasLiteral) {
1562 if (Bytes.size() < 4) {
1563 return errOperand(0, "cannot read literal, inst bytes left " +
1564 Twine(Bytes.size()));
1565 }
1566 HasLiteral = true;
1567 Literal = Literal64 = eatBytes<uint32_t>(Bytes);
1568 if (ExtendFP64)
1569 Literal64 <<= 32;
1570 }
1571 return MCOperand::createImm(ExtendFP64 ? Literal64 : Literal);
1572}
1573
1575 assert(STI.hasFeature(AMDGPU::Feature64BitLiterals));
1576
1577 if (!HasLiteral) {
1578 if (Bytes.size() < 8) {
1579 return errOperand(0, "cannot read literal64, inst bytes left " +
1580 Twine(Bytes.size()));
1581 }
1582 HasLiteral = true;
1583 Literal64 = eatBytes<uint64_t>(Bytes);
1584 }
1585 return MCOperand::createImm(Literal64);
1586}
1587
1589 using namespace AMDGPU::EncValues;
1590
1591 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
1592 return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
1593 (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
1594 (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
1595 // Cast prevents negative overflow.
1596}
1597
1598static int64_t getInlineImmVal32(unsigned Imm) {
1599 switch (Imm) {
1600 case 240:
1601 return llvm::bit_cast<uint32_t>(0.5f);
1602 case 241:
1603 return llvm::bit_cast<uint32_t>(-0.5f);
1604 case 242:
1605 return llvm::bit_cast<uint32_t>(1.0f);
1606 case 243:
1607 return llvm::bit_cast<uint32_t>(-1.0f);
1608 case 244:
1609 return llvm::bit_cast<uint32_t>(2.0f);
1610 case 245:
1611 return llvm::bit_cast<uint32_t>(-2.0f);
1612 case 246:
1613 return llvm::bit_cast<uint32_t>(4.0f);
1614 case 247:
1615 return llvm::bit_cast<uint32_t>(-4.0f);
1616 case 248: // 1 / (2 * PI)
1617 return 0x3e22f983;
1618 default:
1619 llvm_unreachable("invalid fp inline imm");
1620 }
1621}
1622
1623static int64_t getInlineImmVal64(unsigned Imm) {
1624 switch (Imm) {
1625 case 240:
1626 return llvm::bit_cast<uint64_t>(0.5);
1627 case 241:
1628 return llvm::bit_cast<uint64_t>(-0.5);
1629 case 242:
1630 return llvm::bit_cast<uint64_t>(1.0);
1631 case 243:
1632 return llvm::bit_cast<uint64_t>(-1.0);
1633 case 244:
1634 return llvm::bit_cast<uint64_t>(2.0);
1635 case 245:
1636 return llvm::bit_cast<uint64_t>(-2.0);
1637 case 246:
1638 return llvm::bit_cast<uint64_t>(4.0);
1639 case 247:
1640 return llvm::bit_cast<uint64_t>(-4.0);
1641 case 248: // 1 / (2 * PI)
1642 return 0x3fc45f306dc9c882;
1643 default:
1644 llvm_unreachable("invalid fp inline imm");
1645 }
1646}
1647
1648static int64_t getInlineImmValF16(unsigned Imm) {
1649 switch (Imm) {
1650 case 240:
1651 return 0x3800;
1652 case 241:
1653 return 0xB800;
1654 case 242:
1655 return 0x3C00;
1656 case 243:
1657 return 0xBC00;
1658 case 244:
1659 return 0x4000;
1660 case 245:
1661 return 0xC000;
1662 case 246:
1663 return 0x4400;
1664 case 247:
1665 return 0xC400;
1666 case 248: // 1 / (2 * PI)
1667 return 0x3118;
1668 default:
1669 llvm_unreachable("invalid fp inline imm");
1670 }
1671}
1672
1673static int64_t getInlineImmValBF16(unsigned Imm) {
1674 switch (Imm) {
1675 case 240:
1676 return 0x3F00;
1677 case 241:
1678 return 0xBF00;
1679 case 242:
1680 return 0x3F80;
1681 case 243:
1682 return 0xBF80;
1683 case 244:
1684 return 0x4000;
1685 case 245:
1686 return 0xC000;
1687 case 246:
1688 return 0x4080;
1689 case 247:
1690 return 0xC080;
1691 case 248: // 1 / (2 * PI)
1692 return 0x3E22;
1693 default:
1694 llvm_unreachable("invalid fp inline imm");
1695 }
1696}
1697
1698unsigned AMDGPUDisassembler::getVgprClassId(unsigned Width) const {
1699 using namespace AMDGPU;
1700
1701 switch (Width) {
1702 case 16:
1703 case 32:
1704 return VGPR_32RegClassID;
1705 case 64:
1706 return VReg_64RegClassID;
1707 case 96:
1708 return VReg_96RegClassID;
1709 case 128:
1710 return VReg_128RegClassID;
1711 case 160:
1712 return VReg_160RegClassID;
1713 case 192:
1714 return VReg_192RegClassID;
1715 case 256:
1716 return VReg_256RegClassID;
1717 case 288:
1718 return VReg_288RegClassID;
1719 case 320:
1720 return VReg_320RegClassID;
1721 case 352:
1722 return VReg_352RegClassID;
1723 case 384:
1724 return VReg_384RegClassID;
1725 case 512:
1726 return VReg_512RegClassID;
1727 case 1024:
1728 return VReg_1024RegClassID;
1729 }
1730 llvm_unreachable("Invalid register width!");
1731}
1732
1733unsigned AMDGPUDisassembler::getAgprClassId(unsigned Width) const {
1734 using namespace AMDGPU;
1735
1736 switch (Width) {
1737 case 16:
1738 case 32:
1739 return AGPR_32RegClassID;
1740 case 64:
1741 return AReg_64RegClassID;
1742 case 96:
1743 return AReg_96RegClassID;
1744 case 128:
1745 return AReg_128RegClassID;
1746 case 160:
1747 return AReg_160RegClassID;
1748 case 256:
1749 return AReg_256RegClassID;
1750 case 288:
1751 return AReg_288RegClassID;
1752 case 320:
1753 return AReg_320RegClassID;
1754 case 352:
1755 return AReg_352RegClassID;
1756 case 384:
1757 return AReg_384RegClassID;
1758 case 512:
1759 return AReg_512RegClassID;
1760 case 1024:
1761 return AReg_1024RegClassID;
1762 }
1763 llvm_unreachable("Invalid register width!");
1764}
1765
1766unsigned AMDGPUDisassembler::getSgprClassId(unsigned Width) const {
1767 using namespace AMDGPU;
1768
1769 switch (Width) {
1770 case 16:
1771 case 32:
1772 return SGPR_32RegClassID;
1773 case 64:
1774 return SGPR_64RegClassID;
1775 case 96:
1776 return SGPR_96RegClassID;
1777 case 128:
1778 return SGPR_128RegClassID;
1779 case 160:
1780 return SGPR_160RegClassID;
1781 case 256:
1782 return SGPR_256RegClassID;
1783 case 288:
1784 return SGPR_288RegClassID;
1785 case 320:
1786 return SGPR_320RegClassID;
1787 case 352:
1788 return SGPR_352RegClassID;
1789 case 384:
1790 return SGPR_384RegClassID;
1791 case 512:
1792 return SGPR_512RegClassID;
1793 }
1794 llvm_unreachable("Invalid register width!");
1795}
1796
1797unsigned AMDGPUDisassembler::getTtmpClassId(unsigned Width) const {
1798 using namespace AMDGPU;
1799
1800 switch (Width) {
1801 case 16:
1802 case 32:
1803 return TTMP_32RegClassID;
1804 case 64:
1805 return TTMP_64RegClassID;
1806 case 128:
1807 return TTMP_128RegClassID;
1808 case 256:
1809 return TTMP_256RegClassID;
1810 case 288:
1811 return TTMP_288RegClassID;
1812 case 320:
1813 return TTMP_320RegClassID;
1814 case 352:
1815 return TTMP_352RegClassID;
1816 case 384:
1817 return TTMP_384RegClassID;
1818 case 512:
1819 return TTMP_512RegClassID;
1820 }
1821 llvm_unreachable("Invalid register width!");
1822}
1823
1824int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
1825 using namespace AMDGPU::EncValues;
1826
1827 unsigned TTmpMin = isGFX9Plus() ? TTMP_GFX9PLUS_MIN : TTMP_VI_MIN;
1828 unsigned TTmpMax = isGFX9Plus() ? TTMP_GFX9PLUS_MAX : TTMP_VI_MAX;
1829
1830 return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
1831}
1832
1833MCOperand AMDGPUDisassembler::decodeSrcOp(unsigned Width, unsigned Val) const {
1834 using namespace AMDGPU::EncValues;
1835
1836 assert(Val < 1024); // enum10
1837
1838 bool IsAGPR = Val & 512;
1839 Val &= 511;
1840
1841 if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
1842 return createRegOperand(IsAGPR ? getAgprClassId(Width)
1843 : getVgprClassId(Width), Val - VGPR_MIN);
1844 }
1845 return decodeNonVGPRSrcOp(Width, Val & 0xFF);
1846}
1847
1849 unsigned Val) const {
1850 // Cases when Val{8} is 1 (vgpr, agpr or true 16 vgpr) should have been
1851 // decoded earlier.
1852 assert(Val < (1 << 8) && "9-bit Src encoding when Val{8} is 0");
1853 using namespace AMDGPU::EncValues;
1854
1855 if (Val <= SGPR_MAX) {
1856 // "SGPR_MIN <= Val" is always true and causes compilation warning.
1857 static_assert(SGPR_MIN == 0);
1858 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
1859 }
1860
1861 int TTmpIdx = getTTmpIdx(Val);
1862 if (TTmpIdx >= 0) {
1863 return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
1864 }
1865
1866 if ((INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) ||
1867 (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) ||
1868 Val == LITERAL_CONST)
1869 return MCOperand::createImm(Val);
1870
1871 if (Val == LITERAL64_CONST && STI.hasFeature(AMDGPU::Feature64BitLiterals)) {
1872 return decodeLiteral64Constant();
1873 }
1874
1875 switch (Width) {
1876 case 32:
1877 case 16:
1878 return decodeSpecialReg32(Val);
1879 case 64:
1880 return decodeSpecialReg64(Val);
1881 case 96:
1882 case 128:
1883 case 256:
1884 case 512:
1885 return decodeSpecialReg96Plus(Val);
1886 default:
1887 llvm_unreachable("unexpected immediate type");
1888 }
1889}
1890
1891// Bit 0 of DstY isn't stored in the instruction, because it's always the
1892// opposite of bit 0 of DstX.
1894 unsigned Val) const {
1895 int VDstXInd =
1896 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::vdstX);
1897 assert(VDstXInd != -1);
1898 assert(Inst.getOperand(VDstXInd).isReg());
1899 unsigned XDstReg = MRI.getEncodingValue(Inst.getOperand(VDstXInd).getReg());
1900 Val |= ~XDstReg & 1;
1901 return createRegOperand(getVgprClassId(32), Val);
1902}
1903
1905 using namespace AMDGPU;
1906
1907 switch (Val) {
1908 // clang-format off
1909 case 102: return createRegOperand(FLAT_SCR_LO);
1910 case 103: return createRegOperand(FLAT_SCR_HI);
1911 case 104: return createRegOperand(XNACK_MASK_LO);
1912 case 105: return createRegOperand(XNACK_MASK_HI);
1913 case 106: return createRegOperand(VCC_LO);
1914 case 107: return createRegOperand(VCC_HI);
1915 case 108: return createRegOperand(TBA_LO);
1916 case 109: return createRegOperand(TBA_HI);
1917 case 110: return createRegOperand(TMA_LO);
1918 case 111: return createRegOperand(TMA_HI);
1919 case 124:
1920 return isGFX11Plus() ? createRegOperand(SGPR_NULL) : createRegOperand(M0);
1921 case 125:
1922 return isGFX11Plus() ? createRegOperand(M0) : createRegOperand(SGPR_NULL);
1923 case 126: return createRegOperand(EXEC_LO);
1924 case 127: return createRegOperand(EXEC_HI);
1925 case 230: return createRegOperand(SRC_FLAT_SCRATCH_BASE_LO);
1926 case 231: return createRegOperand(SRC_FLAT_SCRATCH_BASE_HI);
1927 case 235: return createRegOperand(SRC_SHARED_BASE_LO);
1928 case 236: return createRegOperand(SRC_SHARED_LIMIT_LO);
1929 case 237: return createRegOperand(SRC_PRIVATE_BASE_LO);
1930 case 238: return createRegOperand(SRC_PRIVATE_LIMIT_LO);
1931 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1932 case 251: return createRegOperand(SRC_VCCZ);
1933 case 252: return createRegOperand(SRC_EXECZ);
1934 case 253: return createRegOperand(SRC_SCC);
1935 case 254: return createRegOperand(LDS_DIRECT);
1936 default: break;
1937 // clang-format on
1938 }
1939 return errOperand(Val, "unknown operand encoding " + Twine(Val));
1940}
1941
1943 using namespace AMDGPU;
1944
1945 switch (Val) {
1946 case 102: return createRegOperand(FLAT_SCR);
1947 case 104: return createRegOperand(XNACK_MASK);
1948 case 106: return createRegOperand(VCC);
1949 case 108: return createRegOperand(TBA);
1950 case 110: return createRegOperand(TMA);
1951 case 124:
1952 if (isGFX11Plus())
1953 return createRegOperand(SGPR_NULL);
1954 break;
1955 case 125:
1956 if (!isGFX11Plus())
1957 return createRegOperand(SGPR_NULL);
1958 break;
1959 case 126: return createRegOperand(EXEC);
1960 case 230: return createRegOperand(SRC_FLAT_SCRATCH_BASE_LO);
1961 case 235: return createRegOperand(SRC_SHARED_BASE);
1962 case 236: return createRegOperand(SRC_SHARED_LIMIT);
1963 case 237: return createRegOperand(SRC_PRIVATE_BASE);
1964 case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
1965 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1966 case 251: return createRegOperand(SRC_VCCZ);
1967 case 252: return createRegOperand(SRC_EXECZ);
1968 case 253: return createRegOperand(SRC_SCC);
1969 default: break;
1970 }
1971 return errOperand(Val, "unknown operand encoding " + Twine(Val));
1972}
1973
1975 using namespace AMDGPU;
1976
1977 switch (Val) {
1978 case 124:
1979 if (isGFX11Plus())
1980 return createRegOperand(SGPR_NULL);
1981 break;
1982 case 125:
1983 if (!isGFX11Plus())
1984 return createRegOperand(SGPR_NULL);
1985 break;
1986 default:
1987 break;
1988 }
1989 return errOperand(Val, "unknown operand encoding " + Twine(Val));
1990}
1991
1993 const unsigned Val) const {
1994 using namespace AMDGPU::SDWA;
1995 using namespace AMDGPU::EncValues;
1996
1997 if (STI.hasFeature(AMDGPU::FeatureGFX9) ||
1998 STI.hasFeature(AMDGPU::FeatureGFX10)) {
1999 // XXX: cast to int is needed to avoid stupid warning:
2000 // compare with unsigned is always true
2001 if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) &&
2002 Val <= SDWA9EncValues::SRC_VGPR_MAX) {
2003 return createRegOperand(getVgprClassId(Width),
2004 Val - SDWA9EncValues::SRC_VGPR_MIN);
2005 }
2006 if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
2007 Val <= (isGFX10Plus() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
2008 : SDWA9EncValues::SRC_SGPR_MAX_SI)) {
2009 return createSRegOperand(getSgprClassId(Width),
2010 Val - SDWA9EncValues::SRC_SGPR_MIN);
2011 }
2012 if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
2013 Val <= SDWA9EncValues::SRC_TTMP_MAX) {
2014 return createSRegOperand(getTtmpClassId(Width),
2015 Val - SDWA9EncValues::SRC_TTMP_MIN);
2016 }
2017
2018 const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
2019
2020 if ((INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX) ||
2021 (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX))
2022 return MCOperand::createImm(SVal);
2023
2024 return decodeSpecialReg32(SVal);
2025 }
2026 if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands))
2027 return createRegOperand(getVgprClassId(Width), Val);
2028 llvm_unreachable("unsupported target");
2029}
2030
2032 return decodeSDWASrc(16, Val);
2033}
2034
2036 return decodeSDWASrc(32, Val);
2037}
2038
2040 using namespace AMDGPU::SDWA;
2041
2042 assert((STI.hasFeature(AMDGPU::FeatureGFX9) ||
2043 STI.hasFeature(AMDGPU::FeatureGFX10)) &&
2044 "SDWAVopcDst should be present only on GFX9+");
2045
2046 bool IsWave32 = STI.hasFeature(AMDGPU::FeatureWavefrontSize32);
2047
2048 if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
2049 Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
2050
2051 int TTmpIdx = getTTmpIdx(Val);
2052 if (TTmpIdx >= 0) {
2053 auto TTmpClsId = getTtmpClassId(IsWave32 ? 32 : 64);
2054 return createSRegOperand(TTmpClsId, TTmpIdx);
2055 }
2056 if (Val > SGPR_MAX) {
2057 return IsWave32 ? decodeSpecialReg32(Val) : decodeSpecialReg64(Val);
2058 }
2059 return createSRegOperand(getSgprClassId(IsWave32 ? 32 : 64), Val);
2060 }
2061 return createRegOperand(IsWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC);
2062}
2063
2065 return STI.hasFeature(AMDGPU::FeatureWavefrontSize32) ? decodeSrcOp(32, Val)
2066 : decodeSrcOp(64, Val);
2067}
2068
2070 return decodeSrcOp(32, Val);
2071}
2072
2075 return MCOperand();
2076 return MCOperand::createImm(Val);
2077}
2078
2080 using VersionField = AMDGPU::EncodingField<7, 0>;
2081 using W64Bit = AMDGPU::EncodingBit<13>;
2082 using W32Bit = AMDGPU::EncodingBit<14>;
2083 using MDPBit = AMDGPU::EncodingBit<15>;
2085
2086 auto [Version, W64, W32, MDP] = Encoding::decode(Imm);
2087
2088 // Decode into a plain immediate if any unused bits are raised.
2089 if (Encoding::encode(Version, W64, W32, MDP) != Imm)
2090 return MCOperand::createImm(Imm);
2091
2092 const auto &Versions = AMDGPU::UCVersion::getGFXVersions();
2093 const auto *I = find_if(
2094 Versions, [Version = Version](const AMDGPU::UCVersion::GFXVersion &V) {
2095 return V.Code == Version;
2096 });
2097 MCContext &Ctx = getContext();
2098 const MCExpr *E;
2099 if (I == Versions.end())
2101 else
2102 E = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(I->Symbol), Ctx);
2103
2104 if (W64)
2105 E = MCBinaryExpr::createOr(E, UCVersionW64Expr, Ctx);
2106 if (W32)
2107 E = MCBinaryExpr::createOr(E, UCVersionW32Expr, Ctx);
2108 if (MDP)
2109 E = MCBinaryExpr::createOr(E, UCVersionMDPExpr, Ctx);
2110
2111 return MCOperand::createExpr(E);
2112}
2113
2115 return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
2116}
2117
2119
2121 return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
2122}
2123
2125
2127
2129 return AMDGPU::isGFX10Plus(STI);
2130}
2131
2133 return STI.hasFeature(AMDGPU::FeatureGFX11);
2134}
2135
2137 return AMDGPU::isGFX11Plus(STI);
2138}
2139
2141 return STI.hasFeature(AMDGPU::FeatureGFX12);
2142}
2143
2145 return AMDGPU::isGFX12Plus(STI);
2146}
2147
2149
2151 return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
2152}
2153
2156}
2157
2158//===----------------------------------------------------------------------===//
2159// AMDGPU specific symbol handling
2160//===----------------------------------------------------------------------===//
2161
2162/// Print a string describing the reserved bit range specified by Mask with
2163/// offset BaseBytes for use in error comments. Mask is a single continuous
2164/// range of 1s surrounded by zeros. The format here is meant to align with the
2165/// tables that describe these bits in llvm.org/docs/AMDGPUUsage.html.
2166static SmallString<32> getBitRangeFromMask(uint32_t Mask, unsigned BaseBytes) {
2167 SmallString<32> Result;
2168 raw_svector_ostream S(Result);
2169
2170 int TrailingZeros = llvm::countr_zero(Mask);
2171 int PopCount = llvm::popcount(Mask);
2172
2173 if (PopCount == 1) {
2174 S << "bit (" << (TrailingZeros + BaseBytes * CHAR_BIT) << ')';
2175 } else {
2176 S << "bits in range ("
2177 << (TrailingZeros + PopCount - 1 + BaseBytes * CHAR_BIT) << ':'
2178 << (TrailingZeros + BaseBytes * CHAR_BIT) << ')';
2179 }
2180
2181 return Result;
2182}
2183
2184#define GET_FIELD(MASK) (AMDHSA_BITS_GET(FourByteBuffer, MASK))
2185#define PRINT_DIRECTIVE(DIRECTIVE, MASK) \
2186 do { \
2187 KdStream << Indent << DIRECTIVE " " << GET_FIELD(MASK) << '\n'; \
2188 } while (0)
2189#define PRINT_PSEUDO_DIRECTIVE_COMMENT(DIRECTIVE, MASK) \
2190 do { \
2191 KdStream << Indent << MAI.getCommentString() << ' ' << DIRECTIVE " " \
2192 << GET_FIELD(MASK) << '\n'; \
2193 } while (0)
2194
2195#define CHECK_RESERVED_BITS_IMPL(MASK, DESC, MSG) \
2196 do { \
2197 if (FourByteBuffer & (MASK)) { \
2198 return createStringError(std::errc::invalid_argument, \
2199 "kernel descriptor " DESC \
2200 " reserved %s set" MSG, \
2201 getBitRangeFromMask((MASK), 0).c_str()); \
2202 } \
2203 } while (0)
2204
2205#define CHECK_RESERVED_BITS(MASK) CHECK_RESERVED_BITS_IMPL(MASK, #MASK, "")
2206#define CHECK_RESERVED_BITS_MSG(MASK, MSG) \
2207 CHECK_RESERVED_BITS_IMPL(MASK, #MASK, ", " MSG)
2208#define CHECK_RESERVED_BITS_DESC(MASK, DESC) \
2209 CHECK_RESERVED_BITS_IMPL(MASK, DESC, "")
2210#define CHECK_RESERVED_BITS_DESC_MSG(MASK, DESC, MSG) \
2211 CHECK_RESERVED_BITS_IMPL(MASK, DESC, ", " MSG)
2212
2213// NOLINTNEXTLINE(readability-identifier-naming)
2215 uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
2216 using namespace amdhsa;
2217 StringRef Indent = "\t";
2218
2219 // We cannot accurately backward compute #VGPRs used from
2220 // GRANULATED_WORKITEM_VGPR_COUNT. But we are concerned with getting the same
2221 // value of GRANULATED_WORKITEM_VGPR_COUNT in the reassembled binary. So we
2222 // simply calculate the inverse of what the assembler does.
2223
2224 uint32_t GranulatedWorkitemVGPRCount =
2225 GET_FIELD(COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT);
2226
2227 uint32_t NextFreeVGPR =
2228 (GranulatedWorkitemVGPRCount + 1) *
2229 AMDGPU::IsaInfo::getVGPREncodingGranule(&STI, EnableWavefrontSize32);
2230
2231 KdStream << Indent << ".amdhsa_next_free_vgpr " << NextFreeVGPR << '\n';
2232
2233 // We cannot backward compute values used to calculate
2234 // GRANULATED_WAVEFRONT_SGPR_COUNT. Hence the original values for following
2235 // directives can't be computed:
2236 // .amdhsa_reserve_vcc
2237 // .amdhsa_reserve_flat_scratch
2238 // .amdhsa_reserve_xnack_mask
2239 // They take their respective default values if not specified in the assembly.
2240 //
2241 // GRANULATED_WAVEFRONT_SGPR_COUNT
2242 // = f(NEXT_FREE_SGPR + VCC + FLAT_SCRATCH + XNACK_MASK)
2243 //
2244 // We compute the inverse as though all directives apart from NEXT_FREE_SGPR
2245 // are set to 0. So while disassembling we consider that:
2246 //
2247 // GRANULATED_WAVEFRONT_SGPR_COUNT
2248 // = f(NEXT_FREE_SGPR + 0 + 0 + 0)
2249 //
2250 // The disassembler cannot recover the original values of those 3 directives.
2251
2252 uint32_t GranulatedWavefrontSGPRCount =
2253 GET_FIELD(COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT);
2254
2255 if (isGFX10Plus())
2256 CHECK_RESERVED_BITS_MSG(COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT,
2257 "must be zero on gfx10+");
2258
2259 uint32_t NextFreeSGPR = (GranulatedWavefrontSGPRCount + 1) *
2261
2262 KdStream << Indent << ".amdhsa_reserve_vcc " << 0 << '\n';
2264 KdStream << Indent << ".amdhsa_reserve_flat_scratch " << 0 << '\n';
2265 KdStream << Indent << ".amdhsa_reserve_xnack_mask " << 0 << '\n';
2266 KdStream << Indent << ".amdhsa_next_free_sgpr " << NextFreeSGPR << "\n";
2267
2268 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_PRIORITY);
2269
2270 PRINT_DIRECTIVE(".amdhsa_float_round_mode_32",
2271 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32);
2272 PRINT_DIRECTIVE(".amdhsa_float_round_mode_16_64",
2273 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64);
2274 PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_32",
2275 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32);
2276 PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_16_64",
2277 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64);
2278
2279 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_PRIV);
2280
2281 if (!isGFX12Plus())
2282 PRINT_DIRECTIVE(".amdhsa_dx10_clamp",
2283 COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP);
2284
2285 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_DEBUG_MODE);
2286
2287 if (!isGFX12Plus())
2288 PRINT_DIRECTIVE(".amdhsa_ieee_mode",
2289 COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE);
2290
2291 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_BULKY);
2292 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_CDBG_USER);
2293
2294 // Bits [26].
2295 if (isGFX9Plus()) {
2296 PRINT_DIRECTIVE(".amdhsa_fp16_overflow", COMPUTE_PGM_RSRC1_GFX9_PLUS_FP16_OVFL);
2297 } else {
2298 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC1_GFX6_GFX8_RESERVED0,
2299 "COMPUTE_PGM_RSRC1", "must be zero pre-gfx9");
2300 }
2301
2302 // Bits [27].
2303 if (isGFX1250()) {
2304 PRINT_PSEUDO_DIRECTIVE_COMMENT("FLAT_SCRATCH_IS_NV",
2305 COMPUTE_PGM_RSRC1_GFX125_FLAT_SCRATCH_IS_NV);
2306 } else {
2307 CHECK_RESERVED_BITS_DESC(COMPUTE_PGM_RSRC1_GFX6_GFX120_RESERVED1,
2308 "COMPUTE_PGM_RSRC1");
2309 }
2310
2311 // Bits [28].
2312 CHECK_RESERVED_BITS_DESC(COMPUTE_PGM_RSRC1_RESERVED2, "COMPUTE_PGM_RSRC1");
2313
2314 // Bits [29-31].
2315 if (isGFX10Plus()) {
2316 // WGP_MODE is not available on GFX1250.
2317 if (!isGFX1250()) {
2318 PRINT_DIRECTIVE(".amdhsa_workgroup_processor_mode",
2319 COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE);
2320 }
2321 PRINT_DIRECTIVE(".amdhsa_memory_ordered", COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED);
2322 PRINT_DIRECTIVE(".amdhsa_forward_progress", COMPUTE_PGM_RSRC1_GFX10_PLUS_FWD_PROGRESS);
2323 } else {
2324 CHECK_RESERVED_BITS_DESC(COMPUTE_PGM_RSRC1_GFX6_GFX9_RESERVED3,
2325 "COMPUTE_PGM_RSRC1");
2326 }
2327
2328 if (isGFX12Plus())
2329 PRINT_DIRECTIVE(".amdhsa_round_robin_scheduling",
2330 COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN);
2331
2332 return true;
2333}
2334
2335// NOLINTNEXTLINE(readability-identifier-naming)
2337 uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
2338 using namespace amdhsa;
2339 StringRef Indent = "\t";
2341 PRINT_DIRECTIVE(".amdhsa_enable_private_segment",
2342 COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
2343 else
2344 PRINT_DIRECTIVE(".amdhsa_system_sgpr_private_segment_wavefront_offset",
2345 COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
2346 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_x",
2347 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X);
2348 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_y",
2349 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y);
2350 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_z",
2351 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z);
2352 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_info",
2353 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO);
2354 PRINT_DIRECTIVE(".amdhsa_system_vgpr_workitem_id",
2355 COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID);
2356
2357 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_ADDRESS_WATCH);
2358 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_MEMORY);
2359 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC2_GRANULATED_LDS_SIZE);
2360
2362 ".amdhsa_exception_fp_ieee_invalid_op",
2363 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION);
2364 PRINT_DIRECTIVE(".amdhsa_exception_fp_denorm_src",
2365 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE);
2367 ".amdhsa_exception_fp_ieee_div_zero",
2368 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO);
2369 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_overflow",
2370 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW);
2371 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_underflow",
2372 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW);
2373 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_inexact",
2374 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT);
2375 PRINT_DIRECTIVE(".amdhsa_exception_int_div_zero",
2376 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO);
2377
2378 CHECK_RESERVED_BITS_DESC(COMPUTE_PGM_RSRC2_RESERVED0, "COMPUTE_PGM_RSRC2");
2379
2380 return true;
2381}
2382
2383// NOLINTNEXTLINE(readability-identifier-naming)
2385 uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
2386 using namespace amdhsa;
2387 StringRef Indent = "\t";
2388 if (isGFX90A()) {
2389 KdStream << Indent << ".amdhsa_accum_offset "
2390 << (GET_FIELD(COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET) + 1) * 4
2391 << '\n';
2392
2393 PRINT_DIRECTIVE(".amdhsa_tg_split", COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT);
2394
2395 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX90A_RESERVED0,
2396 "COMPUTE_PGM_RSRC3", "must be zero on gfx90a");
2397 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX90A_RESERVED1,
2398 "COMPUTE_PGM_RSRC3", "must be zero on gfx90a");
2399 } else if (isGFX10Plus()) {
2400 // Bits [0-3].
2401 if (!isGFX12Plus()) {
2402 if (!EnableWavefrontSize32 || !*EnableWavefrontSize32) {
2403 PRINT_DIRECTIVE(".amdhsa_shared_vgpr_count",
2404 COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT);
2405 } else {
2407 "SHARED_VGPR_COUNT",
2408 COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT);
2409 }
2410 } else {
2411 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX12_PLUS_RESERVED0,
2412 "COMPUTE_PGM_RSRC3",
2413 "must be zero on gfx12+");
2414 }
2415
2416 // Bits [4-11].
2417 if (isGFX11()) {
2418 PRINT_DIRECTIVE(".amdhsa_inst_pref_size",
2419 COMPUTE_PGM_RSRC3_GFX11_INST_PREF_SIZE);
2420 PRINT_PSEUDO_DIRECTIVE_COMMENT("TRAP_ON_START",
2421 COMPUTE_PGM_RSRC3_GFX11_TRAP_ON_START);
2422 PRINT_PSEUDO_DIRECTIVE_COMMENT("TRAP_ON_END",
2423 COMPUTE_PGM_RSRC3_GFX11_TRAP_ON_END);
2424 } else if (isGFX12Plus()) {
2425 PRINT_DIRECTIVE(".amdhsa_inst_pref_size",
2426 COMPUTE_PGM_RSRC3_GFX12_PLUS_INST_PREF_SIZE);
2427 } else {
2428 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_RESERVED1,
2429 "COMPUTE_PGM_RSRC3",
2430 "must be zero on gfx10");
2431 }
2432
2433 // Bits [12].
2434 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_PLUS_RESERVED2,
2435 "COMPUTE_PGM_RSRC3", "must be zero on gfx10+");
2436
2437 // Bits [13].
2438 if (isGFX12Plus()) {
2440 COMPUTE_PGM_RSRC3_GFX12_PLUS_GLG_EN);
2441 } else {
2442 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_GFX11_RESERVED3,
2443 "COMPUTE_PGM_RSRC3",
2444 "must be zero on gfx10 or gfx11");
2445 }
2446
2447 // Bits [14-21].
2448 if (isGFX1250()) {
2449 PRINT_DIRECTIVE(".amdhsa_named_barrier_count",
2450 COMPUTE_PGM_RSRC3_GFX125_NAMED_BAR_CNT);
2452 "ENABLE_DYNAMIC_VGPR", COMPUTE_PGM_RSRC3_GFX125_ENABLE_DYNAMIC_VGPR);
2454 COMPUTE_PGM_RSRC3_GFX125_TCP_SPLIT);
2456 "ENABLE_DIDT_THROTTLE",
2457 COMPUTE_PGM_RSRC3_GFX125_ENABLE_DIDT_THROTTLE);
2458 } else {
2459 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_GFX120_RESERVED4,
2460 "COMPUTE_PGM_RSRC3",
2461 "must be zero on gfx10+");
2462 }
2463
2464 // Bits [22-30].
2465 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_PLUS_RESERVED5,
2466 "COMPUTE_PGM_RSRC3", "must be zero on gfx10+");
2467
2468 // Bits [31].
2469 if (isGFX11Plus()) {
2471 COMPUTE_PGM_RSRC3_GFX11_PLUS_IMAGE_OP);
2472 } else {
2473 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_RESERVED6,
2474 "COMPUTE_PGM_RSRC3",
2475 "must be zero on gfx10");
2476 }
2477 } else if (FourByteBuffer) {
2478 return createStringError(
2479 std::errc::invalid_argument,
2480 "kernel descriptor COMPUTE_PGM_RSRC3 must be all zero before gfx9");
2481 }
2482 return true;
2483}
2484#undef PRINT_PSEUDO_DIRECTIVE_COMMENT
2485#undef PRINT_DIRECTIVE
2486#undef GET_FIELD
2487#undef CHECK_RESERVED_BITS_IMPL
2488#undef CHECK_RESERVED_BITS
2489#undef CHECK_RESERVED_BITS_MSG
2490#undef CHECK_RESERVED_BITS_DESC
2491#undef CHECK_RESERVED_BITS_DESC_MSG
2492
2493/// Create an error object to return from onSymbolStart for reserved kernel
2494/// descriptor bits being set.
2495static Error createReservedKDBitsError(uint32_t Mask, unsigned BaseBytes,
2496 const char *Msg = "") {
2497 return createStringError(
2498 std::errc::invalid_argument, "kernel descriptor reserved %s set%s%s",
2499 getBitRangeFromMask(Mask, BaseBytes).c_str(), *Msg ? ", " : "", Msg);
2500}
2501
2502/// Create an error object to return from onSymbolStart for reserved kernel
2503/// descriptor bytes being set.
2504static Error createReservedKDBytesError(unsigned BaseInBytes,
2505 unsigned WidthInBytes) {
2506 // Create an error comment in the same format as the "Kernel Descriptor"
2507 // table here: https://llvm.org/docs/AMDGPUUsage.html#kernel-descriptor .
2508 return createStringError(
2509 std::errc::invalid_argument,
2510 "kernel descriptor reserved bits in range (%u:%u) set",
2511 (BaseInBytes + WidthInBytes) * CHAR_BIT - 1, BaseInBytes * CHAR_BIT);
2512}
2513
2516 raw_string_ostream &KdStream) const {
2517#define PRINT_DIRECTIVE(DIRECTIVE, MASK) \
2518 do { \
2519 KdStream << Indent << DIRECTIVE " " \
2520 << ((TwoByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n'; \
2521 } while (0)
2522
2523 uint16_t TwoByteBuffer = 0;
2524 uint32_t FourByteBuffer = 0;
2525
2526 StringRef ReservedBytes;
2527 StringRef Indent = "\t";
2528
2529 assert(Bytes.size() == 64);
2530 DataExtractor DE(Bytes, /*IsLittleEndian=*/true, /*AddressSize=*/8);
2531
2532 switch (Cursor.tell()) {
2534 FourByteBuffer = DE.getU32(Cursor);
2535 KdStream << Indent << ".amdhsa_group_segment_fixed_size " << FourByteBuffer
2536 << '\n';
2537 return true;
2538
2540 FourByteBuffer = DE.getU32(Cursor);
2541 KdStream << Indent << ".amdhsa_private_segment_fixed_size "
2542 << FourByteBuffer << '\n';
2543 return true;
2544
2546 FourByteBuffer = DE.getU32(Cursor);
2547 KdStream << Indent << ".amdhsa_kernarg_size "
2548 << FourByteBuffer << '\n';
2549 return true;
2550
2552 // 4 reserved bytes, must be 0.
2553 ReservedBytes = DE.getBytes(Cursor, 4);
2554 for (int I = 0; I < 4; ++I) {
2555 if (ReservedBytes[I] != 0)
2557 }
2558 return true;
2559
2561 // KERNEL_CODE_ENTRY_BYTE_OFFSET
2562 // So far no directive controls this for Code Object V3, so simply skip for
2563 // disassembly.
2564 DE.skip(Cursor, 8);
2565 return true;
2566
2568 // 20 reserved bytes, must be 0.
2569 ReservedBytes = DE.getBytes(Cursor, 20);
2570 for (int I = 0; I < 20; ++I) {
2571 if (ReservedBytes[I] != 0)
2573 }
2574 return true;
2575
2577 FourByteBuffer = DE.getU32(Cursor);
2578 return decodeCOMPUTE_PGM_RSRC3(FourByteBuffer, KdStream);
2579
2581 FourByteBuffer = DE.getU32(Cursor);
2582 return decodeCOMPUTE_PGM_RSRC1(FourByteBuffer, KdStream);
2583
2585 FourByteBuffer = DE.getU32(Cursor);
2586 return decodeCOMPUTE_PGM_RSRC2(FourByteBuffer, KdStream);
2587
2589 using namespace amdhsa;
2590 TwoByteBuffer = DE.getU16(Cursor);
2591
2593 PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_buffer",
2594 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER);
2595 PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_ptr",
2596 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR);
2597 PRINT_DIRECTIVE(".amdhsa_user_sgpr_queue_ptr",
2598 KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR);
2599 PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_segment_ptr",
2600 KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR);
2601 PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_id",
2602 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID);
2604 PRINT_DIRECTIVE(".amdhsa_user_sgpr_flat_scratch_init",
2605 KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT);
2606 PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_size",
2607 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE);
2608 if (isGFX1250())
2609 PRINT_DIRECTIVE(".amdhsa_uses_cu_stores",
2610 KERNEL_CODE_PROPERTY_USES_CU_STORES);
2611
2612 if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED0)
2613 return createReservedKDBitsError(KERNEL_CODE_PROPERTY_RESERVED0,
2615
2616 // Reserved for GFX9
2617 if (isGFX9() &&
2618 (TwoByteBuffer & KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32)) {
2620 KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
2621 amdhsa::KERNEL_CODE_PROPERTIES_OFFSET, "must be zero on gfx9");
2622 }
2623 if (isGFX10Plus()) {
2624 PRINT_DIRECTIVE(".amdhsa_wavefront_size32",
2625 KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
2626 }
2627
2628 if (CodeObjectVersion >= AMDGPU::AMDHSA_COV5)
2629 PRINT_DIRECTIVE(".amdhsa_uses_dynamic_stack",
2630 KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK);
2631
2632 if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED1) {
2633 return createReservedKDBitsError(KERNEL_CODE_PROPERTY_RESERVED1,
2635 }
2636
2637 return true;
2638
2640 using namespace amdhsa;
2641 TwoByteBuffer = DE.getU16(Cursor);
2642 if (TwoByteBuffer & KERNARG_PRELOAD_SPEC_LENGTH) {
2643 PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_preload_length",
2644 KERNARG_PRELOAD_SPEC_LENGTH);
2645 }
2646
2647 if (TwoByteBuffer & KERNARG_PRELOAD_SPEC_OFFSET) {
2648 PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_preload_offset",
2649 KERNARG_PRELOAD_SPEC_OFFSET);
2650 }
2651 return true;
2652
2654 // 4 bytes from here are reserved, must be 0.
2655 ReservedBytes = DE.getBytes(Cursor, 4);
2656 for (int I = 0; I < 4; ++I) {
2657 if (ReservedBytes[I] != 0)
2659 }
2660 return true;
2661
2662 default:
2663 llvm_unreachable("Unhandled index. Case statements cover everything.");
2664 return true;
2665 }
2666#undef PRINT_DIRECTIVE
2667}
2668
2670 StringRef KdName, ArrayRef<uint8_t> Bytes, uint64_t KdAddress) const {
2671
2672 // CP microcode requires the kernel descriptor to be 64 aligned.
2673 if (Bytes.size() != 64 || KdAddress % 64 != 0)
2674 return createStringError(std::errc::invalid_argument,
2675 "kernel descriptor must be 64-byte aligned");
2676
2677 // FIXME: We can't actually decode "in order" as is done below, as e.g. GFX10
2678 // requires us to know the setting of .amdhsa_wavefront_size32 in order to
2679 // accurately produce .amdhsa_next_free_vgpr, and they appear in the wrong
2680 // order. Workaround this by first looking up .amdhsa_wavefront_size32 here
2681 // when required.
2682 if (isGFX10Plus()) {
2683 uint16_t KernelCodeProperties =
2686 EnableWavefrontSize32 =
2687 AMDHSA_BITS_GET(KernelCodeProperties,
2688 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
2689 }
2690
2691 std::string Kd;
2692 raw_string_ostream KdStream(Kd);
2693 KdStream << ".amdhsa_kernel " << KdName << '\n';
2694
2696 while (C && C.tell() < Bytes.size()) {
2697 Expected<bool> Res = decodeKernelDescriptorDirective(C, Bytes, KdStream);
2698
2699 cantFail(C.takeError());
2700
2701 if (!Res)
2702 return Res;
2703 }
2704 KdStream << ".end_amdhsa_kernel\n";
2705 outs() << KdStream.str();
2706 return true;
2707}
2708
2710 uint64_t &Size,
2711 ArrayRef<uint8_t> Bytes,
2712 uint64_t Address) const {
2713 // Right now only kernel descriptor needs to be handled.
2714 // We ignore all other symbols for target specific handling.
2715 // TODO:
2716 // Fix the spurious symbol issue for AMDGPU kernels. Exists for both Code
2717 // Object V2 and V3 when symbols are marked protected.
2718
2719 // amd_kernel_code_t for Code Object V2.
2720 if (Symbol.Type == ELF::STT_AMDGPU_HSA_KERNEL) {
2721 Size = 256;
2722 return createStringError(std::errc::invalid_argument,
2723 "code object v2 is not supported");
2724 }
2725
2726 // Code Object V3 kernel descriptors.
2727 StringRef Name = Symbol.Name;
2728 if (Symbol.Type == ELF::STT_OBJECT && Name.ends_with(StringRef(".kd"))) {
2729 Size = 64; // Size = 64 regardless of success or failure.
2730 return decodeKernelDescriptor(Name.drop_back(3), Bytes, Address);
2731 }
2732
2733 return false;
2734}
2735
2736const MCExpr *AMDGPUDisassembler::createConstantSymbolExpr(StringRef Id,
2737 int64_t Val) {
2738 MCContext &Ctx = getContext();
2739 MCSymbol *Sym = Ctx.getOrCreateSymbol(Id);
2740 // Note: only set value to Val on a new symbol in case an dissassembler
2741 // has already been initialized in this context.
2742 if (!Sym->isVariable()) {
2743 Sym->setVariableValue(MCConstantExpr::create(Val, Ctx));
2744 } else {
2745 int64_t Res = ~Val;
2746 bool Valid = Sym->getVariableValue()->evaluateAsAbsolute(Res);
2747 if (!Valid || Res != Val)
2748 Ctx.reportWarning(SMLoc(), "unsupported redefinition of " + Id);
2749 }
2750 return MCSymbolRefExpr::create(Sym, Ctx);
2751}
2752
2753//===----------------------------------------------------------------------===//
2754// AMDGPUSymbolizer
2755//===----------------------------------------------------------------------===//
2756
2757// Try to find symbol name for specified label
2759 MCInst &Inst, raw_ostream & /*cStream*/, int64_t Value,
2760 uint64_t /*Address*/, bool IsBranch, uint64_t /*Offset*/,
2761 uint64_t /*OpSize*/, uint64_t /*InstSize*/) {
2762
2763 if (!IsBranch) {
2764 return false;
2765 }
2766
2767 auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
2768 if (!Symbols)
2769 return false;
2770
2771 auto Result = llvm::find_if(*Symbols, [Value](const SymbolInfoTy &Val) {
2772 return Val.Addr == static_cast<uint64_t>(Value) &&
2773 Val.Type == ELF::STT_NOTYPE;
2774 });
2775 if (Result != Symbols->end()) {
2776 auto *Sym = Ctx.getOrCreateSymbol(Result->Name);
2777 const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
2779 return true;
2780 }
2781 // Add to list of referenced addresses, so caller can synthesize a label.
2782 ReferencedAddresses.push_back(static_cast<uint64_t>(Value));
2783 return false;
2784}
2785
2787 int64_t Value,
2788 uint64_t Address) {
2789 llvm_unreachable("unimplemented");
2790}
2791
2792//===----------------------------------------------------------------------===//
2793// Initialization
2794//===----------------------------------------------------------------------===//
2795
2797 LLVMOpInfoCallback /*GetOpInfo*/,
2798 LLVMSymbolLookupCallback /*SymbolLookUp*/,
2799 void *DisInfo,
2800 MCContext *Ctx,
2801 std::unique_ptr<MCRelocationInfo> &&RelInfo) {
2802 return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
2803}
2804
2806 const MCSubtargetInfo &STI,
2807 MCContext &Ctx) {
2808 return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
2809}
2810
2811extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
2817}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
static int IsAGPROperand(const MCInst &Inst, AMDGPU::OpName Name, const MCRegisterInfo *MRI)
#define CHECK_RESERVED_BITS_DESC(MASK, DESC)
static VOPModifiers collectVOPModifiers(const MCInst &MI, bool IsVOP3P=false)
static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op, AMDGPU::OpName Name)
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUDisassembler()
static DecodeStatus decodeOperand_VSrcT16_Lo128(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecodeStatus decodeOperand_KImmFP64(MCInst &Inst, uint64_t Imm, uint64_t Addr, const MCDisassembler *Decoder)
static SmallString< 32 > getBitRangeFromMask(uint32_t Mask, unsigned BaseBytes)
Print a string describing the reserved bit range specified by Mask with offset BaseBytes for use in e...
#define DECODE_OPERAND_SREG_8(RegClass, OpWidth)
static DecodeStatus decodeSMEMOffset(MCInst &Inst, unsigned Imm, uint64_t Addr, const MCDisassembler *Decoder)
static std::bitset< 128 > eat16Bytes(ArrayRef< uint8_t > &Bytes)
static DecodeStatus decodeVersionImm(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
#define DECODE_OPERAND_SREG_7(RegClass, OpWidth)
static DecodeStatus decodeSrcA9(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecodeStatus decodeOperand_VGPR_16(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
#define PRINT_PSEUDO_DIRECTIVE_COMMENT(DIRECTIVE, MASK)
static DecodeStatus decodeSrcOp(MCInst &Inst, unsigned EncSize, unsigned OpWidth, unsigned Imm, unsigned EncImm, const MCDisassembler *Decoder)
static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr, const MCDisassembler *Decoder)
static DecodeStatus decodeOperand_VSrc_f64(MCInst &Inst, unsigned Imm, uint64_t Addr, const MCDisassembler *Decoder)
static int64_t getInlineImmValBF16(unsigned Imm)
#define DECODE_SDWA(DecName)
static DecodeStatus decodeSOPPBrTarget(MCInst &Inst, unsigned Imm, uint64_t Addr, const MCDisassembler *Decoder)
#define DECODE_OPERAND_REG_8(RegClass)
#define PRINT_DIRECTIVE(DIRECTIVE, MASK)
static DecodeStatus decodeSrcRegOrImm9(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecodeStatus DecodeVGPR_16RegisterClass(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecodeStatus decodeSrcReg9(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static int64_t getInlineImmVal32(unsigned Imm)
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
#define CHECK_RESERVED_BITS(MASK)
static DecodeStatus decodeSrcAV10(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
#define SGPR_MAX
static int64_t getInlineImmVal64(unsigned Imm)
static T eatBytes(ArrayRef< uint8_t > &Bytes)
static DecodeStatus decodeOperand_KImmFP(MCInst &Inst, unsigned Imm, uint64_t Addr, const MCDisassembler *Decoder)
static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm, unsigned Opw, const MCDisassembler *Decoder)
static MCDisassembler * createAMDGPUDisassembler(const Target &T, const MCSubtargetInfo &STI, MCContext &Ctx)
static DecodeStatus decodeSrcRegOrImmA9(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecodeStatus DecodeVGPR_16_Lo128RegisterClass(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
#define CHECK_RESERVED_BITS_MSG(MASK, MSG)
static DecodeStatus decodeOperandVOPDDstY(MCInst &Inst, unsigned Val, uint64_t Addr, const void *Decoder)
static MCSymbolizer * createAMDGPUSymbolizer(const Triple &, LLVMOpInfoCallback, LLVMSymbolLookupCallback, void *DisInfo, MCContext *Ctx, std::unique_ptr< MCRelocationInfo > &&RelInfo)
static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, uint64_t Addr, const MCDisassembler *Decoder)
static int64_t getInlineImmValF16(unsigned Imm)
#define GET_FIELD(MASK)
static std::bitset< 96 > eat12Bytes(ArrayRef< uint8_t > &Bytes)
static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static Error createReservedKDBytesError(unsigned BaseInBytes, unsigned WidthInBytes)
Create an error object to return from onSymbolStart for reserved kernel descriptor bytes being set.
static DecodeStatus decodeSplitBarrier(MCInst &Inst, unsigned Val, uint64_t Addr, const MCDisassembler *Decoder)
static DecodeStatus decodeAV10(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
#define CHECK_RESERVED_BITS_DESC_MSG(MASK, DESC, MSG)
static Error createReservedKDBitsError(uint32_t Mask, unsigned BaseBytes, const char *Msg="")
Create an error object to return from onSymbolStart for reserved kernel descriptor bits being set.
static void adjustMFMA_F8F6F4OpRegClass(const MCRegisterInfo &MRI, MCOperand &MO, uint8_t NumRegs)
Adjust the register values used by V_MFMA_F8F6F4_f8_f8 instructions to the appropriate subregister fo...
This file contains declaration for AMDGPU ISA disassembler.
Provides AMDGPU specific target descriptions.
AMDHSA kernel descriptor definitions.
#define AMDHSA_BITS_GET(SRC, MSK)
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define LLVM_ABI
Definition: Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:132
uint64_t Addr
std::string Name
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
MachineInstr unsigned OpIdx
Interface definition for SIRegisterInfo.
MCOperand decodeLiteral64Constant() const
void convertVOPC64DPPInst(MCInst &MI) const
void convertEXPInst(MCInst &MI) const
MCOperand createRegOperand(unsigned int RegId) const
MCOperand decodeSpecialReg64(unsigned Val) const
const char * getRegClassName(unsigned RegClassID) const
Expected< bool > decodeCOMPUTE_PGM_RSRC1(uint32_t FourByteBuffer, raw_string_ostream &KdStream) const
Decode as directives that handle COMPUTE_PGM_RSRC1.
Expected< bool > decodeKernelDescriptorDirective(DataExtractor::Cursor &Cursor, ArrayRef< uint8_t > Bytes, raw_string_ostream &KdStream) const
void convertVOPCDPPInst(MCInst &MI) const
MCOperand decodeSpecialReg96Plus(unsigned Val) const
MCOperand decodeSDWASrc32(unsigned Val) const
void setABIVersion(unsigned Version) override
ELF-specific, set the ABI version from the object header.
Expected< bool > decodeCOMPUTE_PGM_RSRC2(uint32_t FourByteBuffer, raw_string_ostream &KdStream) const
Decode as directives that handle COMPUTE_PGM_RSRC2.
unsigned getAgprClassId(unsigned Width) const
MCOperand decodeDpp8FI(unsigned Val) const
MCOperand decodeSDWASrc(unsigned Width, unsigned Val) const
void convertFMAanyK(MCInst &MI) const
DecodeStatus tryDecodeInst(const uint8_t *Table, MCInst &MI, InsnType Inst, uint64_t Address, raw_ostream &Comments) const
void convertMacDPPInst(MCInst &MI) const
MCOperand decodeVOPDDstYOp(MCInst &Inst, unsigned Val) const
MCOperand decodeBoolReg(unsigned Val) const
void convertDPP8Inst(MCInst &MI) const
MCOperand createVGPR16Operand(unsigned RegIdx, bool IsHi) const
MCOperand errOperand(unsigned V, const Twine &ErrMsg) const
MCOperand decodeVersionImm(unsigned Imm) const
Expected< bool > decodeKernelDescriptor(StringRef KdName, ArrayRef< uint8_t > Bytes, uint64_t KdAddress) const
MCOperand decodeSplitBarrier(unsigned Val) const
void convertVOP3DPPInst(MCInst &MI) const
void convertTrue16OpSel(MCInst &MI) const
MCOperand decodeMandatoryLiteralConstant(unsigned Imm) const
Expected< bool > decodeCOMPUTE_PGM_RSRC3(uint32_t FourByteBuffer, raw_string_ostream &KdStream) const
Decode as directives that handle COMPUTE_PGM_RSRC3.
MCOperand decodeNonVGPRSrcOp(unsigned Width, unsigned Val) const
AMDGPUDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx, MCInstrInfo const *MCII)
MCOperand decodeSpecialReg32(unsigned Val) const
MCOperand decodeLiteralConstant(bool ExtendFP64) const
MCOperand decodeSDWAVopcDst(unsigned Val) const
void convertVINTERPInst(MCInst &MI) const
void convertSDWAInst(MCInst &MI) const
MCOperand decodeSrcOp(unsigned Width, unsigned Val) const
unsigned getSgprClassId(unsigned Width) const
static MCOperand decodeIntImmed(unsigned Imm)
void convertWMMAInst(MCInst &MI) const
unsigned getVgprClassId(unsigned Width) const
void convertMAIInst(MCInst &MI) const
f8f6f4 instructions have different pseudos depending on the used formats.
unsigned getTtmpClassId(unsigned Width) const
DecodeStatus getInstruction(MCInst &MI, uint64_t &Size, ArrayRef< uint8_t > Bytes, uint64_t Address, raw_ostream &CS) const override
Returns the disassembly of a single instruction.
MCOperand decodeMandatoryLiteral64Constant(uint64_t Imm) const
void convertMIMGInst(MCInst &MI) const
bool isMacDPP(MCInst &MI) const
int getTTmpIdx(unsigned Val) const
void convertVOP3PDPPInst(MCInst &MI) const
MCOperand createSRegOperand(unsigned SRegClassID, unsigned Val) const
MCOperand decodeSDWASrc16(unsigned Val) const
Expected< bool > onSymbolStart(SymbolInfoTy &Symbol, uint64_t &Size, ArrayRef< uint8_t > Bytes, uint64_t Address) const override
Used to perform separate target specific disassembly for a particular symbol.
bool tryAddingSymbolicOperand(MCInst &Inst, raw_ostream &cStream, int64_t Value, uint64_t Address, bool IsBranch, uint64_t Offset, uint64_t OpSize, uint64_t InstSize) override
Try to add a symbolic operand instead of Value to the MCInst.
void tryAddingPcLoadReferenceComment(raw_ostream &cStream, int64_t Value, uint64_t Address) override
Try to add a comment on the PC-relative load.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
const T * data() const
Definition: ArrayRef.h:144
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition: ArrayRef.h:191
This class represents an Operation in the Expression.
A class representing a position in a DataExtractor, as well as any error encountered during extractio...
Definition: DataExtractor.h:55
uint64_t tell() const
Return the current position of this Cursor.
Definition: DataExtractor.h:72
LLVM_ABI uint32_t getU32(uint64_t *offset_ptr, Error *Err=nullptr) const
Extract a uint32_t value from *offset_ptr.
LLVM_ABI uint16_t getU16(uint64_t *offset_ptr, Error *Err=nullptr) const
Extract a uint16_t value from *offset_ptr.
LLVM_ABI void skip(Cursor &C, uint64_t Length) const
Advance the Cursor position by the given number of bytes.
LLVM_ABI StringRef getBytes(uint64_t *OffsetPtr, uint64_t Length, Error *Err=nullptr) const
Extract a fixed number of bytes from the specified offset.
Lightweight error class with error context and mandatory checking.
Definition: Error.h:159
Tagged union holding either a T or a Error.
Definition: Error.h:485
static const MCBinaryExpr * createOr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:408
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:212
Context object for machine code objects.
Definition: MCContext.h:83
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:414
LLVM_ABI void reportWarning(SMLoc L, const Twine &Msg)
Definition: MCContext.cpp:1122
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:203
Superclass for all disassemblers.
MCContext & getContext() const
const MCSubtargetInfo & STI
raw_ostream * CommentStream
DecodeStatus
Ternary decode status.
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:188
unsigned getNumOperands() const
Definition: MCInst.h:212
unsigned getOpcode() const
Definition: MCInst.h:202
void addOperand(const MCOperand Op)
Definition: MCInst.h:215
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:210
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:199
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:238
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:240
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:220
LLVM_ABI bool hasImplicitDefOfPhysReg(MCRegister Reg, const MCRegisterInfo *MRI=nullptr) const
Return true if this instruction implicitly defines the specified physical register.
Definition: MCInstrDesc.cpp:32
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:27
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:64
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:40
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:166
int64_t getImm() const
Definition: MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition: MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:145
void setReg(MCRegister Reg)
Set the register number.
Definition: MCInst.h:79
bool isReg() const
Definition: MCInst.h:65
MCRegister getReg() const
Returns the register number.
Definition: MCInst.h:73
bool isValid() const
Definition: MCInst.h:64
MCRegisterClass - Base class of TargetRegisterClass.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition: MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
Symbolize and annotate disassembled instructions.
Definition: MCSymbolizer.h:40
MCContext & Ctx
Definition: MCSymbolizer.h:42
Represents a location in source code.
Definition: SMLoc.h:23
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:47
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
LLVM Value Representation.
Definition: Value.h:75
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:662
std::string & str()
Returns the string's reference.
Definition: raw_ostream.h:680
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:692
const char *(* LLVMSymbolLookupCallback)(void *DisInfo, uint64_t ReferenceValue, uint64_t *ReferenceType, uint64_t ReferencePC, const char **ReferenceName)
The type for the symbol lookup function.
int(* LLVMOpInfoCallback)(void *DisInfo, uint64_t PC, uint64_t Offset, uint64_t OpSize, uint64_t InstSize, int TagType, void *TagBuf)
The type for the operand information call back function.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, std::optional< bool > EnableWavefrontSize32)
unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI)
ArrayRef< GFXVersion > getGFXVersions()
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
bool isGFX10(const MCSubtargetInfo &STI)
bool isGFX12Plus(const MCSubtargetInfo &STI)
bool hasPackedD16(const MCSubtargetInfo &STI)
bool isVOPC64DPP(unsigned Opc)
unsigned getAMDHSACodeObjectVersion(const Module &M)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool isGFX9(const MCSubtargetInfo &STI)
LLVM_READONLY const MIMGDimInfo * getMIMGDimInfoByEncoding(uint8_t DimEnc)
const MFMA_F8F6F4_Info * getWMMA_F8F6F4_WithFormatArgs(unsigned FmtA, unsigned FmtB, unsigned F8F8Opcode)
bool hasG16(const MCSubtargetInfo &STI)
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
bool isGFX11Plus(const MCSubtargetInfo &STI)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_REG_IMM_INT64
Definition: SIDefines.h:202
@ OPERAND_REG_IMM_V2FP16
Definition: SIDefines.h:209
@ OPERAND_REG_INLINE_C_FP64
Definition: SIDefines.h:222
@ OPERAND_REG_INLINE_C_BF16
Definition: SIDefines.h:219
@ OPERAND_REG_INLINE_C_V2BF16
Definition: SIDefines.h:224
@ OPERAND_REG_IMM_BF16
Definition: SIDefines.h:206
@ OPERAND_SRC_FIRST
Definition: SIDefines.h:259
@ OPERAND_REG_IMM_V2BF16
Definition: SIDefines.h:208
@ OPERAND_REG_IMM_FP16
Definition: SIDefines.h:207
@ OPERAND_REG_INLINE_C_INT64
Definition: SIDefines.h:218
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
Definition: SIDefines.h:216
@ OPERAND_REG_IMM_FP64
Definition: SIDefines.h:205
@ OPERAND_REG_INLINE_C_V2FP16
Definition: SIDefines.h:225
@ OPERAND_REG_INLINE_AC_FP64
Definition: SIDefines.h:238
@ OPERAND_REG_INLINE_C_FP16
Definition: SIDefines.h:220
@ OPERAND_REG_IMM_INT16
Definition: SIDefines.h:203
@ OPERAND_SRC_LAST
Definition: SIDefines.h:260
bool hasGDS(const MCSubtargetInfo &STI)
bool isGFX9Plus(const MCSubtargetInfo &STI)
bool isGFX1250(const MCSubtargetInfo &STI)
unsigned hasKernargPreload(const MCSubtargetInfo &STI)
bool isMAC(unsigned Opc)
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
bool hasVOPD(const MCSubtargetInfo &STI)
const MFMA_F8F6F4_Info * getMFMA_F8F6F4_WithFormatArgs(unsigned CBSZ, unsigned BLGP, unsigned F8F8Opcode)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ STT_NOTYPE
Definition: ELF.h:1408
@ STT_AMDGPU_HSA_KERNEL
Definition: ELF.h:1422
@ STT_OBJECT
Definition: ELF.h:1409
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:62
uint16_t read16(const void *P, endianness E)
Definition: Endian.h:406
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:477
int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition: bit.h:307
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2491
LLVM_ABI raw_fd_ostream & outs()
This returns a reference to a raw_fd_ostream for standard output.
SmallVectorImpl< T >::const_pointer c_str(SmallVectorImpl< T > &str)
Error createStringError(std::error_code EC, char const *Fmt, const Ts &... Vals)
Create formatted StringError object.
Definition: Error.h:1305
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:157
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:769
Target & getTheGCNTarget()
The target for GCN GPUs.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
std::vector< SymbolInfoTy > SectionSymbolsTy
unsigned M0(unsigned Val)
Definition: VE.h:376
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1777
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition: Error.cpp:180
Description of the encoding of one expression Op.
static void RegisterMCSymbolizer(Target &T, Target::MCSymbolizerCtorTy Fn)
RegisterMCSymbolizer - Register an MCSymbolizer implementation for the given target.
static void RegisterMCDisassembler(Target &T, Target::MCDisassemblerCtorTy Fn)
RegisterMCDisassembler - Register a MCDisassembler implementation for the given target.