LLVM 22.0.0git
AMDGPUDisassembler.cpp
Go to the documentation of this file.
1//===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//===----------------------------------------------------------------------===//
10//
11/// \file
12///
13/// This file contains definition for AMDGPU ISA disassembler
14//
15//===----------------------------------------------------------------------===//
16
17// ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
18
21#include "SIDefines.h"
22#include "SIRegisterInfo.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCDecoder.h"
32#include "llvm/MC/MCExpr.h"
33#include "llvm/MC/MCInstrDesc.h"
39
40using namespace llvm;
41
42#define DEBUG_TYPE "amdgpu-disassembler"
43
44#define SGPR_MAX \
45 (isGFX10Plus() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \
46 : AMDGPU::EncValues::SGPR_MAX_SI)
47
49
50static int64_t getInlineImmValF16(unsigned Imm);
51static int64_t getInlineImmValBF16(unsigned Imm);
52static int64_t getInlineImmVal32(unsigned Imm);
53static int64_t getInlineImmVal64(unsigned Imm);
54
56 MCContext &Ctx, MCInstrInfo const *MCII)
57 : MCDisassembler(STI, Ctx), MCII(MCII), MRI(*Ctx.getRegisterInfo()),
58 MAI(*Ctx.getAsmInfo()), TargetMaxInstBytes(MAI.getMaxInstLength(&STI)),
59 CodeObjectVersion(AMDGPU::getDefaultAMDHSACodeObjectVersion()) {
60 // ToDo: AMDGPUDisassembler supports only VI ISA.
61 if (!STI.hasFeature(AMDGPU::FeatureGCN3Encoding) && !isGFX10Plus())
62 reportFatalUsageError("disassembly not yet supported for subtarget");
63
64 for (auto [Symbol, Code] : AMDGPU::UCVersion::getGFXVersions())
65 createConstantSymbolExpr(Symbol, Code);
66
67 UCVersionW64Expr = createConstantSymbolExpr("UC_VERSION_W64_BIT", 0x2000);
68 UCVersionW32Expr = createConstantSymbolExpr("UC_VERSION_W32_BIT", 0x4000);
69 UCVersionMDPExpr = createConstantSymbolExpr("UC_VERSION_MDP_BIT", 0x8000);
70}
71
74}
75
77addOperand(MCInst &Inst, const MCOperand& Opnd) {
78 Inst.addOperand(Opnd);
79 return Opnd.isValid() ?
82}
83
85 AMDGPU::OpName Name) {
86 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), Name);
87 if (OpIdx != -1) {
88 auto *I = MI.begin();
89 std::advance(I, OpIdx);
90 MI.insert(I, Op);
91 }
92 return OpIdx;
93}
94
95static DecodeStatus decodeSOPPBrTarget(MCInst &Inst, unsigned Imm,
97 const MCDisassembler *Decoder) {
98 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
99
100 // Our branches take a simm16.
101 int64_t Offset = SignExtend64<16>(Imm) * 4 + 4 + Addr;
102
103 if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2, 0))
105 return addOperand(Inst, MCOperand::createImm(Imm));
106}
107
109 const MCDisassembler *Decoder) {
110 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
111 int64_t Offset;
112 if (DAsm->isGFX12Plus()) { // GFX12 supports 24-bit signed offsets.
113 Offset = SignExtend64<24>(Imm);
114 } else if (DAsm->isVI()) { // VI supports 20-bit unsigned offsets.
115 Offset = Imm & 0xFFFFF;
116 } else { // GFX9+ supports 21-bit signed offsets.
117 Offset = SignExtend64<21>(Imm);
118 }
120}
121
122static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, uint64_t Addr,
123 const MCDisassembler *Decoder) {
124 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
125 return addOperand(Inst, DAsm->decodeBoolReg(Val));
126}
127
128static DecodeStatus decodeSplitBarrier(MCInst &Inst, unsigned Val,
130 const MCDisassembler *Decoder) {
131 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
132 return addOperand(Inst, DAsm->decodeSplitBarrier(Val));
133}
134
135static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr,
136 const MCDisassembler *Decoder) {
137 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
138 return addOperand(Inst, DAsm->decodeDpp8FI(Val));
139}
140
141#define DECODE_OPERAND(StaticDecoderName, DecoderName) \
142 static DecodeStatus StaticDecoderName(MCInst &Inst, unsigned Imm, \
143 uint64_t /*Addr*/, \
144 const MCDisassembler *Decoder) { \
145 auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder); \
146 return addOperand(Inst, DAsm->DecoderName(Imm)); \
147 }
148
149// Decoder for registers, decode directly using RegClassID. Imm(8-bit) is
150// number of register. Used by VGPR only and AGPR only operands.
151#define DECODE_OPERAND_REG_8(RegClass) \
152 static DecodeStatus Decode##RegClass##RegisterClass( \
153 MCInst &Inst, unsigned Imm, uint64_t /*Addr*/, \
154 const MCDisassembler *Decoder) { \
155 assert(Imm < (1 << 8) && "8-bit encoding"); \
156 auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder); \
157 return addOperand( \
158 Inst, DAsm->createRegOperand(AMDGPU::RegClass##RegClassID, Imm)); \
159 }
160
161#define DECODE_SrcOp(Name, EncSize, OpWidth, EncImm) \
162 static DecodeStatus Name(MCInst &Inst, unsigned Imm, uint64_t /*Addr*/, \
163 const MCDisassembler *Decoder) { \
164 assert(Imm < (1 << EncSize) && #EncSize "-bit encoding"); \
165 auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder); \
166 return addOperand(Inst, DAsm->decodeSrcOp(OpWidth, EncImm)); \
167 }
168
169static DecodeStatus decodeSrcOp(MCInst &Inst, unsigned EncSize,
170 unsigned OpWidth, unsigned Imm, unsigned EncImm,
171 const MCDisassembler *Decoder) {
172 assert(Imm < (1U << EncSize) && "Operand doesn't fit encoding!");
173 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
174 return addOperand(Inst, DAsm->decodeSrcOp(OpWidth, EncImm));
175}
176
177// Decoder for registers. Imm(7-bit) is number of register, uses decodeSrcOp to
178// get register class. Used by SGPR only operands.
179#define DECODE_OPERAND_SREG_7(RegClass, OpWidth) \
180 DECODE_SrcOp(Decode##RegClass##RegisterClass, 7, OpWidth, Imm)
181
182#define DECODE_OPERAND_SREG_8(RegClass, OpWidth) \
183 DECODE_SrcOp(Decode##RegClass##RegisterClass, 8, OpWidth, Imm)
184
185// Decoder for registers. Imm(10-bit): Imm{7-0} is number of register,
186// Imm{9} is acc(agpr or vgpr) Imm{8} should be 0 (see VOP3Pe_SMFMAC).
187// Set Imm{8} to 1 (IS_VGPR) to decode using 'enum10' from decodeSrcOp.
188// Used by AV_ register classes (AGPR or VGPR only register operands).
189template <unsigned OpWidth>
190static DecodeStatus decodeAV10(MCInst &Inst, unsigned Imm, uint64_t /* Addr */,
191 const MCDisassembler *Decoder) {
192 return decodeSrcOp(Inst, 10, OpWidth, Imm, Imm | AMDGPU::EncValues::IS_VGPR,
193 Decoder);
194}
195
196// Decoder for Src(9-bit encoding) registers only.
197template <unsigned OpWidth>
198static DecodeStatus decodeSrcReg9(MCInst &Inst, unsigned Imm,
199 uint64_t /* Addr */,
200 const MCDisassembler *Decoder) {
201 return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, Decoder);
202}
203
204// Decoder for Src(9-bit encoding) AGPR, register number encoded in 9bits, set
205// Imm{9} to 1 (set acc) and decode using 'enum10' from decodeSrcOp, registers
206// only.
207template <unsigned OpWidth>
208static DecodeStatus decodeSrcA9(MCInst &Inst, unsigned Imm, uint64_t /* Addr */,
209 const MCDisassembler *Decoder) {
210 return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, Decoder);
211}
212
213// Decoder for 'enum10' from decodeSrcOp, Imm{0-8} is 9-bit Src encoding
214// Imm{9} is acc, registers only.
215template <unsigned OpWidth>
216static DecodeStatus decodeSrcAV10(MCInst &Inst, unsigned Imm,
217 uint64_t /* Addr */,
218 const MCDisassembler *Decoder) {
219 return decodeSrcOp(Inst, 10, OpWidth, Imm, Imm, Decoder);
220}
221
222// Decoder for RegisterOperands using 9-bit Src encoding. Operand can be
223// register from RegClass or immediate. Registers that don't belong to RegClass
224// will be decoded and InstPrinter will report warning. Immediate will be
225// decoded into constant matching the OperandType (important for floating point
226// types).
227template <unsigned OpWidth>
228static DecodeStatus decodeSrcRegOrImm9(MCInst &Inst, unsigned Imm,
229 uint64_t /* Addr */,
230 const MCDisassembler *Decoder) {
231 return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, Decoder);
232}
233
234// Decoder for Src(9-bit encoding) AGPR or immediate. Set Imm{9} to 1 (set acc)
235// and decode using 'enum10' from decodeSrcOp.
236template <unsigned OpWidth>
237static DecodeStatus decodeSrcRegOrImmA9(MCInst &Inst, unsigned Imm,
238 uint64_t /* Addr */,
239 const MCDisassembler *Decoder) {
240 return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, Decoder);
241}
242
243// Default decoders generated by tablegen: 'Decode<RegClass>RegisterClass'
244// when RegisterClass is used as an operand. Most often used for destination
245// operands.
246
248DECODE_OPERAND_REG_8(VGPR_32_Lo128)
251DECODE_OPERAND_REG_8(VReg_128)
252DECODE_OPERAND_REG_8(VReg_192)
253DECODE_OPERAND_REG_8(VReg_256)
254DECODE_OPERAND_REG_8(VReg_288)
255DECODE_OPERAND_REG_8(VReg_320)
256DECODE_OPERAND_REG_8(VReg_352)
257DECODE_OPERAND_REG_8(VReg_384)
258DECODE_OPERAND_REG_8(VReg_512)
259DECODE_OPERAND_REG_8(VReg_1024)
260
261DECODE_OPERAND_SREG_7(SReg_32, 32)
262DECODE_OPERAND_SREG_7(SReg_32_XM0, 32)
263DECODE_OPERAND_SREG_7(SReg_32_XEXEC, 32)
264DECODE_OPERAND_SREG_7(SReg_32_XM0_XEXEC, 32)
265DECODE_OPERAND_SREG_7(SReg_32_XEXEC_HI, 32)
266DECODE_OPERAND_SREG_7(SReg_64_XEXEC, 64)
267DECODE_OPERAND_SREG_7(SReg_64_XEXEC_XNULL, 64)
268DECODE_OPERAND_SREG_7(SReg_96, 96)
269DECODE_OPERAND_SREG_7(SReg_128, 128)
270DECODE_OPERAND_SREG_7(SReg_128_XNULL, 128)
271DECODE_OPERAND_SREG_7(SReg_256, 256)
272DECODE_OPERAND_SREG_7(SReg_256_XNULL, 256)
273DECODE_OPERAND_SREG_7(SReg_512, 512)
274
275DECODE_OPERAND_SREG_8(SReg_64, 64)
276
279DECODE_OPERAND_REG_8(AReg_128)
280DECODE_OPERAND_REG_8(AReg_256)
281DECODE_OPERAND_REG_8(AReg_512)
282DECODE_OPERAND_REG_8(AReg_1024)
283
285 uint64_t /*Addr*/,
286 const MCDisassembler *Decoder) {
287 assert(isUInt<10>(Imm) && "10-bit encoding expected");
288 assert((Imm & (1 << 8)) == 0 && "Imm{8} should not be used");
289
290 bool IsHi = Imm & (1 << 9);
291 unsigned RegIdx = Imm & 0xff;
292 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
293 return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
294}
295
296static DecodeStatus
298 const MCDisassembler *Decoder) {
299 assert(isUInt<8>(Imm) && "8-bit encoding expected");
300
301 bool IsHi = Imm & (1 << 7);
302 unsigned RegIdx = Imm & 0x7f;
303 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
304 return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
305}
306
307template <unsigned OpWidth>
309 uint64_t /*Addr*/,
310 const MCDisassembler *Decoder) {
311 assert(isUInt<9>(Imm) && "9-bit encoding expected");
312
313 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
314 if (Imm & AMDGPU::EncValues::IS_VGPR) {
315 bool IsHi = Imm & (1 << 7);
316 unsigned RegIdx = Imm & 0x7f;
317 return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
318 }
319 return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF));
320}
321
322template <unsigned OpWidth>
323static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm,
324 uint64_t /*Addr*/,
325 const MCDisassembler *Decoder) {
326 assert(isUInt<10>(Imm) && "10-bit encoding expected");
327
328 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
329 if (Imm & AMDGPU::EncValues::IS_VGPR) {
330 bool IsHi = Imm & (1 << 9);
331 unsigned RegIdx = Imm & 0xff;
332 return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
333 }
334 return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF));
335}
336
337static DecodeStatus decodeOperand_VGPR_16(MCInst &Inst, unsigned Imm,
338 uint64_t /*Addr*/,
339 const MCDisassembler *Decoder) {
340 assert(isUInt<10>(Imm) && "10-bit encoding expected");
341 assert(Imm & AMDGPU::EncValues::IS_VGPR && "VGPR expected");
342
343 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
344
345 bool IsHi = Imm & (1 << 9);
346 unsigned RegIdx = Imm & 0xff;
347 return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
348}
349
350static DecodeStatus decodeOperand_KImmFP(MCInst &Inst, unsigned Imm,
352 const MCDisassembler *Decoder) {
353 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
354 return addOperand(Inst, DAsm->decodeMandatoryLiteralConstant(Imm));
355}
356
359 const MCDisassembler *Decoder) {
360 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
361 return addOperand(Inst, DAsm->decodeMandatoryLiteral64Constant(Imm));
362}
363
364static DecodeStatus decodeOperandVOPDDstY(MCInst &Inst, unsigned Val,
365 uint64_t Addr, const void *Decoder) {
366 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
367 return addOperand(Inst, DAsm->decodeVOPDDstYOp(Inst, Val));
368}
369
370static bool IsAGPROperand(const MCInst &Inst, int OpIdx,
371 const MCRegisterInfo *MRI) {
372 if (OpIdx < 0)
373 return false;
374
375 const MCOperand &Op = Inst.getOperand(OpIdx);
376 if (!Op.isReg())
377 return false;
378
379 MCRegister Sub = MRI->getSubReg(Op.getReg(), AMDGPU::sub0);
380 auto Reg = Sub ? Sub : Op.getReg();
381 return Reg >= AMDGPU::AGPR0 && Reg <= AMDGPU::AGPR255;
382}
383
384static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm, unsigned Opw,
385 const MCDisassembler *Decoder) {
386 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
387 if (!DAsm->isGFX90A()) {
388 Imm &= 511;
389 } else {
390 // If atomic has both vdata and vdst their register classes are tied.
391 // The bit is decoded along with the vdst, first operand. We need to
392 // change register class to AGPR if vdst was AGPR.
393 // If a DS instruction has both data0 and data1 their register classes
394 // are also tied.
395 unsigned Opc = Inst.getOpcode();
396 uint64_t TSFlags = DAsm->getMCII()->get(Opc).TSFlags;
397 AMDGPU::OpName DataName = (TSFlags & SIInstrFlags::DS)
398 ? AMDGPU::OpName::data0
399 : AMDGPU::OpName::vdata;
400 const MCRegisterInfo *MRI = DAsm->getContext().getRegisterInfo();
401 int DataIdx = AMDGPU::getNamedOperandIdx(Opc, DataName);
402 if ((int)Inst.getNumOperands() == DataIdx) {
403 int DstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
404 if (IsAGPROperand(Inst, DstIdx, MRI))
405 Imm |= 512;
406 }
407
408 if (TSFlags & SIInstrFlags::DS) {
409 int Data2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
410 if ((int)Inst.getNumOperands() == Data2Idx &&
411 IsAGPROperand(Inst, DataIdx, MRI))
412 Imm |= 512;
413 }
414 }
415 return addOperand(Inst, DAsm->decodeSrcOp(Opw, Imm | 256));
416}
417
418template <unsigned Opw>
419static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm,
420 uint64_t /* Addr */,
421 const MCDisassembler *Decoder) {
422 return decodeAVLdSt(Inst, Imm, Opw, Decoder);
423}
424
425static DecodeStatus decodeOperand_VSrc_f64(MCInst &Inst, unsigned Imm,
427 const MCDisassembler *Decoder) {
428 assert(Imm < (1 << 9) && "9-bit encoding");
429 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
430 return addOperand(Inst, DAsm->decodeSrcOp(64, Imm));
431}
432
433#define DECODE_SDWA(DecName) \
434DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
435
436DECODE_SDWA(Src32)
437DECODE_SDWA(Src16)
438DECODE_SDWA(VopcDst)
439
440static DecodeStatus decodeVersionImm(MCInst &Inst, unsigned Imm,
441 uint64_t /* Addr */,
442 const MCDisassembler *Decoder) {
443 const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
444 return addOperand(Inst, DAsm->decodeVersionImm(Imm));
445}
446
447#include "AMDGPUGenDisassemblerTables.inc"
448
449//===----------------------------------------------------------------------===//
450//
451//===----------------------------------------------------------------------===//
452
453template <typename InsnType>
455 InsnType Inst, uint64_t Address,
456 raw_ostream &Comments) const {
457 assert(MI.getOpcode() == 0);
458 assert(MI.getNumOperands() == 0);
459 MCInst TmpInst;
460 HasLiteral = false;
461 const auto SavedBytes = Bytes;
462
463 SmallString<64> LocalComments;
464 raw_svector_ostream LocalCommentStream(LocalComments);
465 CommentStream = &LocalCommentStream;
466
467 DecodeStatus Res =
468 decodeInstruction(Table, TmpInst, Inst, Address, this, STI);
469
470 CommentStream = nullptr;
471
472 if (Res != MCDisassembler::Fail) {
473 MI = TmpInst;
474 Comments << LocalComments;
476 }
477 Bytes = SavedBytes;
479}
480
481template <typename InsnType>
484 MCInst &MI, InsnType Inst, uint64_t Address,
485 raw_ostream &Comments) const {
486 for (const uint8_t *T : {Table1, Table2}) {
487 if (DecodeStatus Res = tryDecodeInst(T, MI, Inst, Address, Comments))
488 return Res;
489 }
491}
492
493template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
494 assert(Bytes.size() >= sizeof(T));
495 const auto Res =
496 support::endian::read<T, llvm::endianness::little>(Bytes.data());
497 Bytes = Bytes.slice(sizeof(T));
498 return Res;
499}
500
502 assert(Bytes.size() >= 12);
503 uint64_t Lo =
504 support::endian::read<uint64_t, llvm::endianness::little>(Bytes.data());
505 Bytes = Bytes.slice(8);
506 uint64_t Hi =
507 support::endian::read<uint32_t, llvm::endianness::little>(Bytes.data());
508 Bytes = Bytes.slice(4);
509 return DecoderUInt128(Lo, Hi);
510}
511
513 assert(Bytes.size() >= 16);
514 uint64_t Lo =
515 support::endian::read<uint64_t, llvm::endianness::little>(Bytes.data());
516 Bytes = Bytes.slice(8);
517 uint64_t Hi =
518 support::endian::read<uint64_t, llvm::endianness::little>(Bytes.data());
519 Bytes = Bytes.slice(8);
520 return DecoderUInt128(Lo, Hi);
521}
522
523void AMDGPUDisassembler::decodeImmOperands(MCInst &MI,
524 const MCInstrInfo &MCII) const {
525 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
526 for (auto [OpNo, OpDesc] : enumerate(Desc.operands())) {
527 if (OpNo >= MI.getNumOperands())
528 continue;
529
530 // TODO: Fix V_DUAL_FMAMK_F32_X_FMAAK_F32_gfx12 vsrc operands,
531 // defined to take VGPR_32, but in reality allowing inline constants.
532 bool IsSrc = AMDGPU::OPERAND_SRC_FIRST <= OpDesc.OperandType &&
533 OpDesc.OperandType <= AMDGPU::OPERAND_SRC_LAST;
534 if (!IsSrc && OpDesc.OperandType != MCOI::OPERAND_REGISTER)
535 continue;
536
537 MCOperand &Op = MI.getOperand(OpNo);
538 if (!Op.isImm())
539 continue;
540 int64_t Imm = Op.getImm();
543 Op = decodeIntImmed(Imm);
544 continue;
545 }
546
548 Op = decodeLiteralConstant(OpDesc.OperandType ==
550 continue;
551 }
552
555 switch (OpDesc.OperandType) {
561 break;
568 Imm = getInlineImmValF16(Imm);
569 break;
575 Imm = getInlineImmVal64(Imm);
576 break;
577 default:
578 Imm = getInlineImmVal32(Imm);
579 }
580 Op.setImm(Imm);
581 }
582 }
583}
584
586 ArrayRef<uint8_t> Bytes_,
588 raw_ostream &CS) const {
589 unsigned MaxInstBytesNum = std::min((size_t)TargetMaxInstBytes, Bytes_.size());
590 Bytes = Bytes_.slice(0, MaxInstBytesNum);
591
592 // In case the opcode is not recognized we'll assume a Size of 4 bytes (unless
593 // there are fewer bytes left). This will be overridden on success.
594 Size = std::min((size_t)4, Bytes_.size());
595
596 do {
597 // ToDo: better to switch encoding length using some bit predicate
598 // but it is unknown yet, so try all we can
599
600 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
601 // encodings
602 if (isGFX1250() && Bytes.size() >= 16) {
603 DecoderUInt128 DecW = eat16Bytes(Bytes);
604 if (tryDecodeInst(DecoderTableGFX1250128, MI, DecW, Address, CS))
605 break;
606 Bytes = Bytes_.slice(0, MaxInstBytesNum);
607 }
608
609 if (isGFX11Plus() && Bytes.size() >= 12) {
610 DecoderUInt128 DecW = eat12Bytes(Bytes);
611
612 if (isGFX11() &&
613 tryDecodeInst(DecoderTableGFX1196, DecoderTableGFX11_FAKE1696, MI,
614 DecW, Address, CS))
615 break;
616
617 if (isGFX1250() &&
618 tryDecodeInst(DecoderTableGFX125096, DecoderTableGFX1250_FAKE1696, MI,
619 DecW, Address, CS))
620 break;
621
622 if (isGFX12() &&
623 tryDecodeInst(DecoderTableGFX1296, DecoderTableGFX12_FAKE1696, MI,
624 DecW, Address, CS))
625 break;
626
627 if (isGFX12() &&
628 tryDecodeInst(DecoderTableGFX12W6496, MI, DecW, Address, CS))
629 break;
630
631 if (STI.hasFeature(AMDGPU::Feature64BitLiterals)) {
632 // Return 8 bytes for a potential literal.
633 Bytes = Bytes_.slice(4, MaxInstBytesNum - 4);
634
635 if (isGFX1250() &&
636 tryDecodeInst(DecoderTableGFX125096, MI, DecW, Address, CS))
637 break;
638 }
639
640 // Reinitialize Bytes
641 Bytes = Bytes_.slice(0, MaxInstBytesNum);
642
643 } else if (Bytes.size() >= 16 &&
644 STI.hasFeature(AMDGPU::FeatureGFX950Insts)) {
645 DecoderUInt128 DecW = eat16Bytes(Bytes);
646 if (tryDecodeInst(DecoderTableGFX940128, MI, DecW, Address, CS))
647 break;
648
649 // Reinitialize Bytes
650 Bytes = Bytes_.slice(0, MaxInstBytesNum);
651 }
652
653 if (Bytes.size() >= 8) {
654 const uint64_t QW = eatBytes<uint64_t>(Bytes);
655
656 if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding) &&
657 tryDecodeInst(DecoderTableGFX10_B64, MI, QW, Address, CS))
658 break;
659
660 if (STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) &&
661 tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address, CS))
662 break;
663
664 if (STI.hasFeature(AMDGPU::FeatureGFX950Insts) &&
665 tryDecodeInst(DecoderTableGFX95064, MI, QW, Address, CS))
666 break;
667
668 // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
669 // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
670 // table first so we print the correct name.
671 if (STI.hasFeature(AMDGPU::FeatureFmaMixInsts) &&
672 tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address, CS))
673 break;
674
675 if (STI.hasFeature(AMDGPU::FeatureGFX940Insts) &&
676 tryDecodeInst(DecoderTableGFX94064, MI, QW, Address, CS))
677 break;
678
679 if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts) &&
680 tryDecodeInst(DecoderTableGFX90A64, MI, QW, Address, CS))
681 break;
682
683 if ((isVI() || isGFX9()) &&
684 tryDecodeInst(DecoderTableGFX864, MI, QW, Address, CS))
685 break;
686
687 if (isGFX9() && tryDecodeInst(DecoderTableGFX964, MI, QW, Address, CS))
688 break;
689
690 if (isGFX10() && tryDecodeInst(DecoderTableGFX1064, MI, QW, Address, CS))
691 break;
692
693 if (isGFX1250() &&
694 tryDecodeInst(DecoderTableGFX125064, DecoderTableGFX1250_FAKE1664, MI,
695 QW, Address, CS))
696 break;
697
698 if (isGFX12() &&
699 tryDecodeInst(DecoderTableGFX1264, DecoderTableGFX12_FAKE1664, MI, QW,
700 Address, CS))
701 break;
702
703 if (isGFX11() &&
704 tryDecodeInst(DecoderTableGFX1164, DecoderTableGFX11_FAKE1664, MI, QW,
705 Address, CS))
706 break;
707
708 if (isGFX11() &&
709 tryDecodeInst(DecoderTableGFX11W6464, MI, QW, Address, CS))
710 break;
711
712 if (isGFX12() &&
713 tryDecodeInst(DecoderTableGFX12W6464, MI, QW, Address, CS))
714 break;
715
716 // Reinitialize Bytes
717 Bytes = Bytes_.slice(0, MaxInstBytesNum);
718 }
719
720 // Try decode 32-bit instruction
721 if (Bytes.size() >= 4) {
722 const uint32_t DW = eatBytes<uint32_t>(Bytes);
723
724 if ((isVI() || isGFX9()) &&
725 tryDecodeInst(DecoderTableGFX832, MI, DW, Address, CS))
726 break;
727
728 if (tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address, CS))
729 break;
730
731 if (isGFX9() && tryDecodeInst(DecoderTableGFX932, MI, DW, Address, CS))
732 break;
733
734 if (STI.hasFeature(AMDGPU::FeatureGFX950Insts) &&
735 tryDecodeInst(DecoderTableGFX95032, MI, DW, Address, CS))
736 break;
737
738 if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts) &&
739 tryDecodeInst(DecoderTableGFX90A32, MI, DW, Address, CS))
740 break;
741
742 if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding) &&
743 tryDecodeInst(DecoderTableGFX10_B32, MI, DW, Address, CS))
744 break;
745
746 if (isGFX10() && tryDecodeInst(DecoderTableGFX1032, MI, DW, Address, CS))
747 break;
748
749 if (isGFX11() &&
750 tryDecodeInst(DecoderTableGFX1132, DecoderTableGFX11_FAKE1632, MI, DW,
751 Address, CS))
752 break;
753
754 if (isGFX1250() &&
755 tryDecodeInst(DecoderTableGFX125032, DecoderTableGFX1250_FAKE1632, MI,
756 DW, Address, CS))
757 break;
758
759 if (isGFX12() &&
760 tryDecodeInst(DecoderTableGFX1232, DecoderTableGFX12_FAKE1632, MI, DW,
761 Address, CS))
762 break;
763 }
764
766 } while (false);
767
769
770 decodeImmOperands(MI, *MCII);
771
772 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::DPP) {
773 if (isMacDPP(MI))
775
776 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3P)
778 else if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOPC)
779 convertVOPCDPPInst(MI); // Special VOP3 case
780 else if (AMDGPU::isVOPC64DPP(MI.getOpcode()))
781 convertVOPC64DPPInst(MI); // Special VOP3 case
782 else if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dpp8) !=
783 -1)
785 else if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3)
786 convertVOP3DPPInst(MI); // Regular VOP3 case
787 }
788
790
791 if (AMDGPU::isMAC(MI.getOpcode())) {
792 // Insert dummy unused src2_modifiers.
794 AMDGPU::OpName::src2_modifiers);
795 }
796
797 if (MI.getOpcode() == AMDGPU::V_CVT_SR_BF8_F32_e64_dpp ||
798 MI.getOpcode() == AMDGPU::V_CVT_SR_FP8_F32_e64_dpp) {
799 // Insert dummy unused src2_modifiers.
801 AMDGPU::OpName::src2_modifiers);
802 }
803
804 if ((MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::DS) &&
806 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::gds);
807 }
808
809 if (MCII->get(MI.getOpcode()).TSFlags &
811 int CPolPos = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
812 AMDGPU::OpName::cpol);
813 if (CPolPos != -1) {
814 unsigned CPol =
815 (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsAtomicRet) ?
817 if (MI.getNumOperands() <= (unsigned)CPolPos) {
819 AMDGPU::OpName::cpol);
820 } else if (CPol) {
821 MI.getOperand(CPolPos).setImm(MI.getOperand(CPolPos).getImm() | CPol);
822 }
823 }
824 }
825
826 if ((MCII->get(MI.getOpcode()).TSFlags &
828 (STI.hasFeature(AMDGPU::FeatureGFX90AInsts))) {
829 // GFX90A lost TFE, its place is occupied by ACC.
830 int TFEOpIdx =
831 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
832 if (TFEOpIdx != -1) {
833 auto *TFEIter = MI.begin();
834 std::advance(TFEIter, TFEOpIdx);
835 MI.insert(TFEIter, MCOperand::createImm(0));
836 }
837 }
838
839 if (MCII->get(MI.getOpcode()).TSFlags &
841 int SWZOpIdx =
842 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::swz);
843 if (SWZOpIdx != -1) {
844 auto *SWZIter = MI.begin();
845 std::advance(SWZIter, SWZOpIdx);
846 MI.insert(SWZIter, MCOperand::createImm(0));
847 }
848 }
849
850 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG) {
851 int VAddr0Idx =
852 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
853 int RsrcIdx =
854 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
855 unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1;
856 if (VAddr0Idx >= 0 && NSAArgs > 0) {
857 unsigned NSAWords = (NSAArgs + 3) / 4;
858 if (Bytes.size() < 4 * NSAWords)
860 for (unsigned i = 0; i < NSAArgs; ++i) {
861 const unsigned VAddrIdx = VAddr0Idx + 1 + i;
862 auto VAddrRCID =
863 MCII->get(MI.getOpcode()).operands()[VAddrIdx].RegClass;
864 MI.insert(MI.begin() + VAddrIdx, createRegOperand(VAddrRCID, Bytes[i]));
865 }
866 Bytes = Bytes.slice(4 * NSAWords);
867 }
868
870 }
871
872 if (MCII->get(MI.getOpcode()).TSFlags &
875
876 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::EXP)
878
879 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VINTERP)
881
882 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SDWA)
884
885 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsMAI)
887
888 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsWMMA)
890
891 int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
892 AMDGPU::OpName::vdst_in);
893 if (VDstIn_Idx != -1) {
894 int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx,
896 if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx ||
897 !MI.getOperand(VDstIn_Idx).isReg() ||
898 MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) {
899 if (MI.getNumOperands() > (unsigned)VDstIn_Idx)
900 MI.erase(&MI.getOperand(VDstIn_Idx));
902 MCOperand::createReg(MI.getOperand(Tied).getReg()),
903 AMDGPU::OpName::vdst_in);
904 }
905 }
906
907 bool IsSOPK = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SOPK;
908 if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::imm) && !IsSOPK)
910
911 // Some VOPC instructions, e.g., v_cmpx_f_f64, use VOP3 encoding and
912 // have EXEC as implicit destination. Issue a warning if encoding for
913 // vdst is not EXEC.
914 if ((MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3) &&
915 MCII->get(MI.getOpcode()).hasImplicitDefOfPhysReg(AMDGPU::EXEC)) {
916 auto ExecEncoding = MRI.getEncodingValue(AMDGPU::EXEC_LO);
917 if (Bytes_[0] != ExecEncoding)
919 }
920
921 Size = MaxInstBytesNum - Bytes.size();
922 return Status;
923}
924
926 if (STI.hasFeature(AMDGPU::FeatureGFX11Insts)) {
927 // The MCInst still has these fields even though they are no longer encoded
928 // in the GFX11 instruction.
929 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::vm);
930 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::compr);
931 }
932}
933
936 if (MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_t16_gfx11 ||
937 MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_fake16_gfx11 ||
938 MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_t16_gfx12 ||
939 MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_fake16_gfx12 ||
940 MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_t16_gfx11 ||
941 MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_fake16_gfx11 ||
942 MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_t16_gfx12 ||
943 MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_fake16_gfx12 ||
944 MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_t16_gfx11 ||
945 MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_fake16_gfx11 ||
946 MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_t16_gfx12 ||
947 MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_fake16_gfx12 ||
948 MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_t16_gfx11 ||
949 MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_fake16_gfx11 ||
950 MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_t16_gfx12 ||
951 MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_fake16_gfx12) {
952 // The MCInst has this field that is not directly encoded in the
953 // instruction.
954 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::op_sel);
955 }
956}
957
959 if (STI.hasFeature(AMDGPU::FeatureGFX9) ||
960 STI.hasFeature(AMDGPU::FeatureGFX10)) {
961 if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::sdst))
962 // VOPC - insert clamp
963 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
964 } else if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands)) {
965 int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
966 if (SDst != -1) {
967 // VOPC - insert VCC register as sdst
969 AMDGPU::OpName::sdst);
970 } else {
971 // VOP1/2 - insert omod if present in instruction
972 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
973 }
974 }
975}
976
977/// Adjust the register values used by V_MFMA_F8F6F4_f8_f8 instructions to the
978/// appropriate subregister for the used format width.
980 MCOperand &MO, uint8_t NumRegs) {
981 switch (NumRegs) {
982 case 4:
983 return MO.setReg(MRI.getSubReg(MO.getReg(), AMDGPU::sub0_sub1_sub2_sub3));
984 case 6:
985 return MO.setReg(
986 MRI.getSubReg(MO.getReg(), AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5));
987 case 8:
988 if (MCRegister NewReg = MRI.getSubReg(
989 MO.getReg(), AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7)) {
990 MO.setReg(NewReg);
991 }
992 return;
993 case 12: {
994 // There is no 384-bit subreg index defined.
995 MCRegister BaseReg = MRI.getSubReg(MO.getReg(), AMDGPU::sub0);
996 MCRegister NewReg = MRI.getMatchingSuperReg(
997 BaseReg, AMDGPU::sub0, &MRI.getRegClass(AMDGPU::VReg_384RegClassID));
998 return MO.setReg(NewReg);
999 }
1000 case 16:
1001 // No-op in cases where one operand is still f8/bf8.
1002 return;
1003 default:
1004 llvm_unreachable("Unexpected size for mfma/wmma f8f6f4 operand");
1005 }
1006}
1007
1008/// f8f6f4 instructions have different pseudos depending on the used formats. In
1009/// the disassembler table, we only have the variants with the largest register
1010/// classes which assume using an fp8/bf8 format for both operands. The actual
1011/// register class depends on the format in blgp and cbsz operands. Adjust the
1012/// register classes depending on the used format.
1014 int BlgpIdx =
1015 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::blgp);
1016 if (BlgpIdx == -1)
1017 return;
1018
1019 int CbszIdx =
1020 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::cbsz);
1021
1022 unsigned CBSZ = MI.getOperand(CbszIdx).getImm();
1023 unsigned BLGP = MI.getOperand(BlgpIdx).getImm();
1024
1025 const AMDGPU::MFMA_F8F6F4_Info *AdjustedRegClassOpcode =
1026 AMDGPU::getMFMA_F8F6F4_WithFormatArgs(CBSZ, BLGP, MI.getOpcode());
1027 if (!AdjustedRegClassOpcode ||
1028 AdjustedRegClassOpcode->Opcode == MI.getOpcode())
1029 return;
1030
1031 MI.setOpcode(AdjustedRegClassOpcode->Opcode);
1032 int Src0Idx =
1033 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
1034 int Src1Idx =
1035 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
1036 adjustMFMA_F8F6F4OpRegClass(MRI, MI.getOperand(Src0Idx),
1037 AdjustedRegClassOpcode->NumRegsSrcA);
1038 adjustMFMA_F8F6F4OpRegClass(MRI, MI.getOperand(Src1Idx),
1039 AdjustedRegClassOpcode->NumRegsSrcB);
1040}
1041
1043 int FmtAIdx =
1044 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::matrix_a_fmt);
1045 if (FmtAIdx == -1)
1046 return;
1047
1048 int FmtBIdx =
1049 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::matrix_b_fmt);
1050
1051 unsigned FmtA = MI.getOperand(FmtAIdx).getImm();
1052 unsigned FmtB = MI.getOperand(FmtBIdx).getImm();
1053
1054 const AMDGPU::MFMA_F8F6F4_Info *AdjustedRegClassOpcode =
1055 AMDGPU::getWMMA_F8F6F4_WithFormatArgs(FmtA, FmtB, MI.getOpcode());
1056 if (!AdjustedRegClassOpcode ||
1057 AdjustedRegClassOpcode->Opcode == MI.getOpcode())
1058 return;
1059
1060 MI.setOpcode(AdjustedRegClassOpcode->Opcode);
1061 int Src0Idx =
1062 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
1063 int Src1Idx =
1064 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
1065 adjustMFMA_F8F6F4OpRegClass(MRI, MI.getOperand(Src0Idx),
1066 AdjustedRegClassOpcode->NumRegsSrcA);
1067 adjustMFMA_F8F6F4OpRegClass(MRI, MI.getOperand(Src1Idx),
1068 AdjustedRegClassOpcode->NumRegsSrcB);
1069}
1070
1072 unsigned OpSel = 0;
1073 unsigned OpSelHi = 0;
1074 unsigned NegLo = 0;
1075 unsigned NegHi = 0;
1076};
1077
1078// Reconstruct values of VOP3/VOP3P operands such as op_sel.
1079// Note that these values do not affect disassembler output,
1080// so this is only necessary for consistency with src_modifiers.
1082 bool IsVOP3P = false) {
1083 VOPModifiers Modifiers;
1084 unsigned Opc = MI.getOpcode();
1085 const AMDGPU::OpName ModOps[] = {AMDGPU::OpName::src0_modifiers,
1086 AMDGPU::OpName::src1_modifiers,
1087 AMDGPU::OpName::src2_modifiers};
1088 for (int J = 0; J < 3; ++J) {
1089 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
1090 if (OpIdx == -1)
1091 continue;
1092
1093 unsigned Val = MI.getOperand(OpIdx).getImm();
1094
1095 Modifiers.OpSel |= !!(Val & SISrcMods::OP_SEL_0) << J;
1096 if (IsVOP3P) {
1097 Modifiers.OpSelHi |= !!(Val & SISrcMods::OP_SEL_1) << J;
1098 Modifiers.NegLo |= !!(Val & SISrcMods::NEG) << J;
1099 Modifiers.NegHi |= !!(Val & SISrcMods::NEG_HI) << J;
1100 } else if (J == 0) {
1101 Modifiers.OpSel |= !!(Val & SISrcMods::DST_OP_SEL) << 3;
1102 }
1103 }
1104
1105 return Modifiers;
1106}
1107
1108// Instructions decode the op_sel/suffix bits into the src_modifier
1109// operands. Copy those bits into the src operands for true16 VGPRs.
1111 const unsigned Opc = MI.getOpcode();
1112 const MCRegisterClass &ConversionRC =
1113 MRI.getRegClass(AMDGPU::VGPR_16RegClassID);
1114 constexpr std::array<std::tuple<AMDGPU::OpName, AMDGPU::OpName, unsigned>, 4>
1115 OpAndOpMods = {{{AMDGPU::OpName::src0, AMDGPU::OpName::src0_modifiers,
1117 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_modifiers,
1119 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_modifiers,
1121 {AMDGPU::OpName::vdst, AMDGPU::OpName::src0_modifiers,
1123 for (const auto &[OpName, OpModsName, OpSelMask] : OpAndOpMods) {
1124 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, OpName);
1125 int OpModsIdx = AMDGPU::getNamedOperandIdx(Opc, OpModsName);
1126 if (OpIdx == -1 || OpModsIdx == -1)
1127 continue;
1128 MCOperand &Op = MI.getOperand(OpIdx);
1129 if (!Op.isReg())
1130 continue;
1131 if (!ConversionRC.contains(Op.getReg()))
1132 continue;
1133 unsigned OpEnc = MRI.getEncodingValue(Op.getReg());
1134 const MCOperand &OpMods = MI.getOperand(OpModsIdx);
1135 unsigned ModVal = OpMods.getImm();
1136 if (ModVal & OpSelMask) { // isHi
1137 unsigned RegIdx = OpEnc & AMDGPU::HWEncoding::REG_IDX_MASK;
1138 Op.setReg(ConversionRC.getRegister(RegIdx * 2 + 1));
1139 }
1140 }
1141}
1142
1143// MAC opcodes have special old and src2 operands.
1144// src2 is tied to dst, while old is not tied (but assumed to be).
1146 constexpr int DST_IDX = 0;
1147 auto Opcode = MI.getOpcode();
1148 const auto &Desc = MCII->get(Opcode);
1149 auto OldIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::old);
1150
1151 if (OldIdx != -1 && Desc.getOperandConstraint(
1152 OldIdx, MCOI::OperandConstraint::TIED_TO) == -1) {
1153 assert(AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src2));
1154 assert(Desc.getOperandConstraint(
1155 AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2),
1157 (void)DST_IDX;
1158 return true;
1159 }
1160
1161 return false;
1162}
1163
1164// Create dummy old operand and insert dummy unused src2_modifiers
1166 assert(MI.getNumOperands() + 1 < MCII->get(MI.getOpcode()).getNumOperands());
1167 insertNamedMCOperand(MI, MCOperand::createReg(0), AMDGPU::OpName::old);
1169 AMDGPU::OpName::src2_modifiers);
1170}
1171
1173 unsigned Opc = MI.getOpcode();
1174
1175 int VDstInIdx =
1176 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst_in);
1177 if (VDstInIdx != -1)
1178 insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::vdst_in);
1179
1180 unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1181 if (MI.getNumOperands() < DescNumOps &&
1182 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
1184 auto Mods = collectVOPModifiers(MI);
1186 AMDGPU::OpName::op_sel);
1187 } else {
1188 // Insert dummy unused src modifiers.
1189 if (MI.getNumOperands() < DescNumOps &&
1190 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers))
1192 AMDGPU::OpName::src0_modifiers);
1193
1194 if (MI.getNumOperands() < DescNumOps &&
1195 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers))
1197 AMDGPU::OpName::src1_modifiers);
1198 }
1199}
1200
1203
1204 int VDstInIdx =
1205 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst_in);
1206 if (VDstInIdx != -1)
1207 insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::vdst_in);
1208
1209 unsigned Opc = MI.getOpcode();
1210 unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1211 if (MI.getNumOperands() < DescNumOps &&
1212 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
1213 auto Mods = collectVOPModifiers(MI);
1215 AMDGPU::OpName::op_sel);
1216 }
1217}
1218
1219// Note that before gfx10, the MIMG encoding provided no information about
1220// VADDR size. Consequently, decoded instructions always show address as if it
1221// has 1 dword, which could be not really so.
1223 auto TSFlags = MCII->get(MI.getOpcode()).TSFlags;
1224
1225 int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1226 AMDGPU::OpName::vdst);
1227
1228 int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1229 AMDGPU::OpName::vdata);
1230 int VAddr0Idx =
1231 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
1232 AMDGPU::OpName RsrcOpName = (TSFlags & SIInstrFlags::MIMG)
1233 ? AMDGPU::OpName::srsrc
1234 : AMDGPU::OpName::rsrc;
1235 int RsrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), RsrcOpName);
1236 int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1237 AMDGPU::OpName::dmask);
1238
1239 int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1240 AMDGPU::OpName::tfe);
1241 int D16Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1242 AMDGPU::OpName::d16);
1243
1244 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
1245 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1247
1248 assert(VDataIdx != -1);
1249 if (BaseOpcode->BVH) {
1250 // Add A16 operand for intersect_ray instructions
1251 addOperand(MI, MCOperand::createImm(BaseOpcode->A16));
1252 return;
1253 }
1254
1255 bool IsAtomic = (VDstIdx != -1);
1256 bool IsGather4 = TSFlags & SIInstrFlags::Gather4;
1257 bool IsVSample = TSFlags & SIInstrFlags::VSAMPLE;
1258 bool IsNSA = false;
1259 bool IsPartialNSA = false;
1260 unsigned AddrSize = Info->VAddrDwords;
1261
1262 if (isGFX10Plus()) {
1263 unsigned DimIdx =
1264 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dim);
1265 int A16Idx =
1266 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::a16);
1267 const AMDGPU::MIMGDimInfo *Dim =
1268 AMDGPU::getMIMGDimInfoByEncoding(MI.getOperand(DimIdx).getImm());
1269 const bool IsA16 = (A16Idx != -1 && MI.getOperand(A16Idx).getImm());
1270
1271 AddrSize =
1272 AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, AMDGPU::hasG16(STI));
1273
1274 // VSAMPLE insts that do not use vaddr3 behave the same as NSA forms.
1275 // VIMAGE insts other than BVH never use vaddr4.
1276 IsNSA = Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA ||
1277 Info->MIMGEncoding == AMDGPU::MIMGEncGfx11NSA ||
1278 Info->MIMGEncoding == AMDGPU::MIMGEncGfx12;
1279 if (!IsNSA) {
1280 if (!IsVSample && AddrSize > 12)
1281 AddrSize = 16;
1282 } else {
1283 if (AddrSize > Info->VAddrDwords) {
1284 if (!STI.hasFeature(AMDGPU::FeaturePartialNSAEncoding)) {
1285 // The NSA encoding does not contain enough operands for the
1286 // combination of base opcode / dimension. Should this be an error?
1287 return;
1288 }
1289 IsPartialNSA = true;
1290 }
1291 }
1292 }
1293
1294 unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
1295 unsigned DstSize = IsGather4 ? 4 : std::max(llvm::popcount(DMask), 1);
1296
1297 bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm();
1298 if (D16 && AMDGPU::hasPackedD16(STI)) {
1299 DstSize = (DstSize + 1) / 2;
1300 }
1301
1302 if (TFEIdx != -1 && MI.getOperand(TFEIdx).getImm())
1303 DstSize += 1;
1304
1305 if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords)
1306 return;
1307
1308 int NewOpcode =
1309 AMDGPU::getMIMGOpcode(Info->BaseOpcode, Info->MIMGEncoding, DstSize, AddrSize);
1310 if (NewOpcode == -1)
1311 return;
1312
1313 // Widen the register to the correct number of enabled channels.
1314 MCRegister NewVdata;
1315 if (DstSize != Info->VDataDwords) {
1316 auto DataRCID = MCII->get(NewOpcode).operands()[VDataIdx].RegClass;
1317
1318 // Get first subregister of VData
1319 MCRegister Vdata0 = MI.getOperand(VDataIdx).getReg();
1320 MCRegister VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
1321 Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
1322
1323 NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
1324 &MRI.getRegClass(DataRCID));
1325 if (!NewVdata) {
1326 // It's possible to encode this such that the low register + enabled
1327 // components exceeds the register count.
1328 return;
1329 }
1330 }
1331
1332 // If not using NSA on GFX10+, widen vaddr0 address register to correct size.
1333 // If using partial NSA on GFX11+ widen last address register.
1334 int VAddrSAIdx = IsPartialNSA ? (RsrcIdx - 1) : VAddr0Idx;
1335 MCRegister NewVAddrSA;
1336 if (STI.hasFeature(AMDGPU::FeatureNSAEncoding) && (!IsNSA || IsPartialNSA) &&
1337 AddrSize != Info->VAddrDwords) {
1338 MCRegister VAddrSA = MI.getOperand(VAddrSAIdx).getReg();
1339 MCRegister VAddrSubSA = MRI.getSubReg(VAddrSA, AMDGPU::sub0);
1340 VAddrSA = VAddrSubSA ? VAddrSubSA : VAddrSA;
1341
1342 auto AddrRCID = MCII->get(NewOpcode).operands()[VAddrSAIdx].RegClass;
1343 NewVAddrSA = MRI.getMatchingSuperReg(VAddrSA, AMDGPU::sub0,
1344 &MRI.getRegClass(AddrRCID));
1345 if (!NewVAddrSA)
1346 return;
1347 }
1348
1349 MI.setOpcode(NewOpcode);
1350
1351 if (NewVdata != AMDGPU::NoRegister) {
1352 MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
1353
1354 if (IsAtomic) {
1355 // Atomic operations have an additional operand (a copy of data)
1356 MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
1357 }
1358 }
1359
1360 if (NewVAddrSA) {
1361 MI.getOperand(VAddrSAIdx) = MCOperand::createReg(NewVAddrSA);
1362 } else if (IsNSA) {
1363 assert(AddrSize <= Info->VAddrDwords);
1364 MI.erase(MI.begin() + VAddr0Idx + AddrSize,
1365 MI.begin() + VAddr0Idx + Info->VAddrDwords);
1366 }
1367}
1368
1369// Opsel and neg bits are used in src_modifiers and standalone operands. Autogen
1370// decoder only adds to src_modifiers, so manually add the bits to the other
1371// operands.
1373 unsigned Opc = MI.getOpcode();
1374 unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1375 auto Mods = collectVOPModifiers(MI, true);
1376
1377 if (MI.getNumOperands() < DescNumOps &&
1378 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vdst_in))
1379 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::vdst_in);
1380
1381 if (MI.getNumOperands() < DescNumOps &&
1382 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel))
1384 AMDGPU::OpName::op_sel);
1385 if (MI.getNumOperands() < DescNumOps &&
1386 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel_hi))
1388 AMDGPU::OpName::op_sel_hi);
1389 if (MI.getNumOperands() < DescNumOps &&
1390 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::neg_lo))
1392 AMDGPU::OpName::neg_lo);
1393 if (MI.getNumOperands() < DescNumOps &&
1394 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::neg_hi))
1396 AMDGPU::OpName::neg_hi);
1397}
1398
1399// Create dummy old operand and insert optional operands
1401 unsigned Opc = MI.getOpcode();
1402 unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1403
1404 if (MI.getNumOperands() < DescNumOps &&
1405 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::old))
1406 insertNamedMCOperand(MI, MCOperand::createReg(0), AMDGPU::OpName::old);
1407
1408 if (MI.getNumOperands() < DescNumOps &&
1409 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers))
1411 AMDGPU::OpName::src0_modifiers);
1412
1413 if (MI.getNumOperands() < DescNumOps &&
1414 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers))
1416 AMDGPU::OpName::src1_modifiers);
1417}
1418
1420 unsigned Opc = MI.getOpcode();
1421 unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1422
1424
1425 if (MI.getNumOperands() < DescNumOps &&
1426 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
1429 AMDGPU::OpName::op_sel);
1430 }
1431}
1432
1434 assert(HasLiteral && "Should have decoded a literal");
1435 insertNamedMCOperand(MI, MCOperand::createImm(Literal), AMDGPU::OpName::immX);
1436}
1437
1438const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
1439 return getContext().getRegisterInfo()->
1440 getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
1441}
1442
1443inline
1445 const Twine& ErrMsg) const {
1446 *CommentStream << "Error: " + ErrMsg;
1447
1448 // ToDo: add support for error operands to MCInst.h
1449 // return MCOperand::createError(V);
1450 return MCOperand();
1451}
1452
1453inline
1456}
1457
1458inline
1460 unsigned Val) const {
1461 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
1462 if (Val >= RegCl.getNumRegs())
1463 return errOperand(Val, Twine(getRegClassName(RegClassID)) +
1464 ": unknown register " + Twine(Val));
1465 return createRegOperand(RegCl.getRegister(Val));
1466}
1467
1468inline
1470 unsigned Val) const {
1471 // ToDo: SI/CI have 104 SGPRs, VI - 102
1472 // Valery: here we accepting as much as we can, let assembler sort it out
1473 int shift = 0;
1474 switch (SRegClassID) {
1475 case AMDGPU::SGPR_32RegClassID:
1476 case AMDGPU::TTMP_32RegClassID:
1477 break;
1478 case AMDGPU::SGPR_64RegClassID:
1479 case AMDGPU::TTMP_64RegClassID:
1480 shift = 1;
1481 break;
1482 case AMDGPU::SGPR_96RegClassID:
1483 case AMDGPU::TTMP_96RegClassID:
1484 case AMDGPU::SGPR_128RegClassID:
1485 case AMDGPU::TTMP_128RegClassID:
1486 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
1487 // this bundle?
1488 case AMDGPU::SGPR_256RegClassID:
1489 case AMDGPU::TTMP_256RegClassID:
1490 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
1491 // this bundle?
1492 case AMDGPU::SGPR_288RegClassID:
1493 case AMDGPU::TTMP_288RegClassID:
1494 case AMDGPU::SGPR_320RegClassID:
1495 case AMDGPU::TTMP_320RegClassID:
1496 case AMDGPU::SGPR_352RegClassID:
1497 case AMDGPU::TTMP_352RegClassID:
1498 case AMDGPU::SGPR_384RegClassID:
1499 case AMDGPU::TTMP_384RegClassID:
1500 case AMDGPU::SGPR_512RegClassID:
1501 case AMDGPU::TTMP_512RegClassID:
1502 shift = 2;
1503 break;
1504 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
1505 // this bundle?
1506 default:
1507 llvm_unreachable("unhandled register class");
1508 }
1509
1510 if (Val % (1 << shift)) {
1511 *CommentStream << "Warning: " << getRegClassName(SRegClassID)
1512 << ": scalar reg isn't aligned " << Val;
1513 }
1514
1515 return createRegOperand(SRegClassID, Val >> shift);
1516}
1517
1519 bool IsHi) const {
1520 unsigned RegIdxInVGPR16 = RegIdx * 2 + (IsHi ? 1 : 0);
1521 return createRegOperand(AMDGPU::VGPR_16RegClassID, RegIdxInVGPR16);
1522}
1523
1524// Decode Literals for insts which always have a literal in the encoding
1527 if (HasLiteral) {
1528 assert(
1530 "Should only decode multiple kimm with VOPD, check VSrc operand types");
1531 if (Literal != Val)
1532 return errOperand(Val, "More than one unique literal is illegal");
1533 }
1534 HasLiteral = true;
1535 Literal = Val;
1536 return MCOperand::createImm(Literal);
1537}
1538
1541 if (HasLiteral) {
1542 if (Literal64 != Val)
1543 return errOperand(Val, "More than one unique literal is illegal");
1544 }
1545 HasLiteral = true;
1546 Literal = Literal64 = Val;
1547 return MCOperand::createImm(Literal64);
1548}
1549
1551 // For now all literal constants are supposed to be unsigned integer
1552 // ToDo: deal with signed/unsigned 64-bit integer constants
1553 // ToDo: deal with float/double constants
1554 if (!HasLiteral) {
1555 if (Bytes.size() < 4) {
1556 return errOperand(0, "cannot read literal, inst bytes left " +
1557 Twine(Bytes.size()));
1558 }
1559 HasLiteral = true;
1560 Literal = Literal64 = eatBytes<uint32_t>(Bytes);
1561 if (ExtendFP64)
1562 Literal64 <<= 32;
1563 }
1564 return MCOperand::createImm(ExtendFP64 ? Literal64 : Literal);
1565}
1566
1568 assert(STI.hasFeature(AMDGPU::Feature64BitLiterals));
1569
1570 if (!HasLiteral) {
1571 if (Bytes.size() < 8) {
1572 return errOperand(0, "cannot read literal64, inst bytes left " +
1573 Twine(Bytes.size()));
1574 }
1575 HasLiteral = true;
1576 Literal64 = eatBytes<uint64_t>(Bytes);
1577 }
1578 return MCOperand::createImm(Literal64);
1579}
1580
1582 using namespace AMDGPU::EncValues;
1583
1584 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
1585 return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
1586 (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
1587 (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
1588 // Cast prevents negative overflow.
1589}
1590
1591static int64_t getInlineImmVal32(unsigned Imm) {
1592 switch (Imm) {
1593 case 240:
1594 return llvm::bit_cast<uint32_t>(0.5f);
1595 case 241:
1596 return llvm::bit_cast<uint32_t>(-0.5f);
1597 case 242:
1598 return llvm::bit_cast<uint32_t>(1.0f);
1599 case 243:
1600 return llvm::bit_cast<uint32_t>(-1.0f);
1601 case 244:
1602 return llvm::bit_cast<uint32_t>(2.0f);
1603 case 245:
1604 return llvm::bit_cast<uint32_t>(-2.0f);
1605 case 246:
1606 return llvm::bit_cast<uint32_t>(4.0f);
1607 case 247:
1608 return llvm::bit_cast<uint32_t>(-4.0f);
1609 case 248: // 1 / (2 * PI)
1610 return 0x3e22f983;
1611 default:
1612 llvm_unreachable("invalid fp inline imm");
1613 }
1614}
1615
1616static int64_t getInlineImmVal64(unsigned Imm) {
1617 switch (Imm) {
1618 case 240:
1619 return llvm::bit_cast<uint64_t>(0.5);
1620 case 241:
1621 return llvm::bit_cast<uint64_t>(-0.5);
1622 case 242:
1623 return llvm::bit_cast<uint64_t>(1.0);
1624 case 243:
1625 return llvm::bit_cast<uint64_t>(-1.0);
1626 case 244:
1627 return llvm::bit_cast<uint64_t>(2.0);
1628 case 245:
1629 return llvm::bit_cast<uint64_t>(-2.0);
1630 case 246:
1631 return llvm::bit_cast<uint64_t>(4.0);
1632 case 247:
1633 return llvm::bit_cast<uint64_t>(-4.0);
1634 case 248: // 1 / (2 * PI)
1635 return 0x3fc45f306dc9c882;
1636 default:
1637 llvm_unreachable("invalid fp inline imm");
1638 }
1639}
1640
1641static int64_t getInlineImmValF16(unsigned Imm) {
1642 switch (Imm) {
1643 case 240:
1644 return 0x3800;
1645 case 241:
1646 return 0xB800;
1647 case 242:
1648 return 0x3C00;
1649 case 243:
1650 return 0xBC00;
1651 case 244:
1652 return 0x4000;
1653 case 245:
1654 return 0xC000;
1655 case 246:
1656 return 0x4400;
1657 case 247:
1658 return 0xC400;
1659 case 248: // 1 / (2 * PI)
1660 return 0x3118;
1661 default:
1662 llvm_unreachable("invalid fp inline imm");
1663 }
1664}
1665
1666static int64_t getInlineImmValBF16(unsigned Imm) {
1667 switch (Imm) {
1668 case 240:
1669 return 0x3F00;
1670 case 241:
1671 return 0xBF00;
1672 case 242:
1673 return 0x3F80;
1674 case 243:
1675 return 0xBF80;
1676 case 244:
1677 return 0x4000;
1678 case 245:
1679 return 0xC000;
1680 case 246:
1681 return 0x4080;
1682 case 247:
1683 return 0xC080;
1684 case 248: // 1 / (2 * PI)
1685 return 0x3E22;
1686 default:
1687 llvm_unreachable("invalid fp inline imm");
1688 }
1689}
1690
1691unsigned AMDGPUDisassembler::getVgprClassId(unsigned Width) const {
1692 using namespace AMDGPU;
1693
1694 switch (Width) {
1695 case 16:
1696 case 32:
1697 return VGPR_32RegClassID;
1698 case 64:
1699 return VReg_64RegClassID;
1700 case 96:
1701 return VReg_96RegClassID;
1702 case 128:
1703 return VReg_128RegClassID;
1704 case 160:
1705 return VReg_160RegClassID;
1706 case 192:
1707 return VReg_192RegClassID;
1708 case 256:
1709 return VReg_256RegClassID;
1710 case 288:
1711 return VReg_288RegClassID;
1712 case 320:
1713 return VReg_320RegClassID;
1714 case 352:
1715 return VReg_352RegClassID;
1716 case 384:
1717 return VReg_384RegClassID;
1718 case 512:
1719 return VReg_512RegClassID;
1720 case 1024:
1721 return VReg_1024RegClassID;
1722 }
1723 llvm_unreachable("Invalid register width!");
1724}
1725
1726unsigned AMDGPUDisassembler::getAgprClassId(unsigned Width) const {
1727 using namespace AMDGPU;
1728
1729 switch (Width) {
1730 case 16:
1731 case 32:
1732 return AGPR_32RegClassID;
1733 case 64:
1734 return AReg_64RegClassID;
1735 case 96:
1736 return AReg_96RegClassID;
1737 case 128:
1738 return AReg_128RegClassID;
1739 case 160:
1740 return AReg_160RegClassID;
1741 case 256:
1742 return AReg_256RegClassID;
1743 case 288:
1744 return AReg_288RegClassID;
1745 case 320:
1746 return AReg_320RegClassID;
1747 case 352:
1748 return AReg_352RegClassID;
1749 case 384:
1750 return AReg_384RegClassID;
1751 case 512:
1752 return AReg_512RegClassID;
1753 case 1024:
1754 return AReg_1024RegClassID;
1755 }
1756 llvm_unreachable("Invalid register width!");
1757}
1758
1759unsigned AMDGPUDisassembler::getSgprClassId(unsigned Width) const {
1760 using namespace AMDGPU;
1761
1762 switch (Width) {
1763 case 16:
1764 case 32:
1765 return SGPR_32RegClassID;
1766 case 64:
1767 return SGPR_64RegClassID;
1768 case 96:
1769 return SGPR_96RegClassID;
1770 case 128:
1771 return SGPR_128RegClassID;
1772 case 160:
1773 return SGPR_160RegClassID;
1774 case 256:
1775 return SGPR_256RegClassID;
1776 case 288:
1777 return SGPR_288RegClassID;
1778 case 320:
1779 return SGPR_320RegClassID;
1780 case 352:
1781 return SGPR_352RegClassID;
1782 case 384:
1783 return SGPR_384RegClassID;
1784 case 512:
1785 return SGPR_512RegClassID;
1786 }
1787 llvm_unreachable("Invalid register width!");
1788}
1789
1790unsigned AMDGPUDisassembler::getTtmpClassId(unsigned Width) const {
1791 using namespace AMDGPU;
1792
1793 switch (Width) {
1794 case 16:
1795 case 32:
1796 return TTMP_32RegClassID;
1797 case 64:
1798 return TTMP_64RegClassID;
1799 case 128:
1800 return TTMP_128RegClassID;
1801 case 256:
1802 return TTMP_256RegClassID;
1803 case 288:
1804 return TTMP_288RegClassID;
1805 case 320:
1806 return TTMP_320RegClassID;
1807 case 352:
1808 return TTMP_352RegClassID;
1809 case 384:
1810 return TTMP_384RegClassID;
1811 case 512:
1812 return TTMP_512RegClassID;
1813 }
1814 llvm_unreachable("Invalid register width!");
1815}
1816
1817int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
1818 using namespace AMDGPU::EncValues;
1819
1820 unsigned TTmpMin = isGFX9Plus() ? TTMP_GFX9PLUS_MIN : TTMP_VI_MIN;
1821 unsigned TTmpMax = isGFX9Plus() ? TTMP_GFX9PLUS_MAX : TTMP_VI_MAX;
1822
1823 return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
1824}
1825
1826MCOperand AMDGPUDisassembler::decodeSrcOp(unsigned Width, unsigned Val) const {
1827 using namespace AMDGPU::EncValues;
1828
1829 assert(Val < 1024); // enum10
1830
1831 bool IsAGPR = Val & 512;
1832 Val &= 511;
1833
1834 if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
1835 return createRegOperand(IsAGPR ? getAgprClassId(Width)
1836 : getVgprClassId(Width), Val - VGPR_MIN);
1837 }
1838 return decodeNonVGPRSrcOp(Width, Val & 0xFF);
1839}
1840
1842 unsigned Val) const {
1843 // Cases when Val{8} is 1 (vgpr, agpr or true 16 vgpr) should have been
1844 // decoded earlier.
1845 assert(Val < (1 << 8) && "9-bit Src encoding when Val{8} is 0");
1846 using namespace AMDGPU::EncValues;
1847
1848 if (Val <= SGPR_MAX) {
1849 // "SGPR_MIN <= Val" is always true and causes compilation warning.
1850 static_assert(SGPR_MIN == 0);
1851 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
1852 }
1853
1854 int TTmpIdx = getTTmpIdx(Val);
1855 if (TTmpIdx >= 0) {
1856 return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
1857 }
1858
1859 if ((INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) ||
1860 (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) ||
1861 Val == LITERAL_CONST)
1862 return MCOperand::createImm(Val);
1863
1864 if (Val == LITERAL64_CONST && STI.hasFeature(AMDGPU::Feature64BitLiterals)) {
1865 return decodeLiteral64Constant();
1866 }
1867
1868 switch (Width) {
1869 case 32:
1870 case 16:
1871 return decodeSpecialReg32(Val);
1872 case 64:
1873 return decodeSpecialReg64(Val);
1874 case 96:
1875 case 128:
1876 case 256:
1877 case 512:
1878 return decodeSpecialReg96Plus(Val);
1879 default:
1880 llvm_unreachable("unexpected immediate type");
1881 }
1882}
1883
1884// Bit 0 of DstY isn't stored in the instruction, because it's always the
1885// opposite of bit 0 of DstX.
1887 unsigned Val) const {
1888 int VDstXInd =
1889 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::vdstX);
1890 assert(VDstXInd != -1);
1891 assert(Inst.getOperand(VDstXInd).isReg());
1892 unsigned XDstReg = MRI.getEncodingValue(Inst.getOperand(VDstXInd).getReg());
1893 Val |= ~XDstReg & 1;
1894 return createRegOperand(getVgprClassId(32), Val);
1895}
1896
1898 using namespace AMDGPU;
1899
1900 switch (Val) {
1901 // clang-format off
1902 case 102: return createRegOperand(FLAT_SCR_LO);
1903 case 103: return createRegOperand(FLAT_SCR_HI);
1904 case 104: return createRegOperand(XNACK_MASK_LO);
1905 case 105: return createRegOperand(XNACK_MASK_HI);
1906 case 106: return createRegOperand(VCC_LO);
1907 case 107: return createRegOperand(VCC_HI);
1908 case 108: return createRegOperand(TBA_LO);
1909 case 109: return createRegOperand(TBA_HI);
1910 case 110: return createRegOperand(TMA_LO);
1911 case 111: return createRegOperand(TMA_HI);
1912 case 124:
1913 return isGFX11Plus() ? createRegOperand(SGPR_NULL) : createRegOperand(M0);
1914 case 125:
1915 return isGFX11Plus() ? createRegOperand(M0) : createRegOperand(SGPR_NULL);
1916 case 126: return createRegOperand(EXEC_LO);
1917 case 127: return createRegOperand(EXEC_HI);
1918 case 230: return createRegOperand(SRC_FLAT_SCRATCH_BASE_LO);
1919 case 231: return createRegOperand(SRC_FLAT_SCRATCH_BASE_HI);
1920 case 235: return createRegOperand(SRC_SHARED_BASE_LO);
1921 case 236: return createRegOperand(SRC_SHARED_LIMIT_LO);
1922 case 237: return createRegOperand(SRC_PRIVATE_BASE_LO);
1923 case 238: return createRegOperand(SRC_PRIVATE_LIMIT_LO);
1924 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1925 case 251: return createRegOperand(SRC_VCCZ);
1926 case 252: return createRegOperand(SRC_EXECZ);
1927 case 253: return createRegOperand(SRC_SCC);
1928 case 254: return createRegOperand(LDS_DIRECT);
1929 default: break;
1930 // clang-format on
1931 }
1932 return errOperand(Val, "unknown operand encoding " + Twine(Val));
1933}
1934
1936 using namespace AMDGPU;
1937
1938 switch (Val) {
1939 case 102: return createRegOperand(FLAT_SCR);
1940 case 104: return createRegOperand(XNACK_MASK);
1941 case 106: return createRegOperand(VCC);
1942 case 108: return createRegOperand(TBA);
1943 case 110: return createRegOperand(TMA);
1944 case 124:
1945 if (isGFX11Plus())
1946 return createRegOperand(SGPR_NULL);
1947 break;
1948 case 125:
1949 if (!isGFX11Plus())
1950 return createRegOperand(SGPR_NULL);
1951 break;
1952 case 126: return createRegOperand(EXEC);
1953 case 230: return createRegOperand(SRC_FLAT_SCRATCH_BASE_LO);
1954 case 235: return createRegOperand(SRC_SHARED_BASE);
1955 case 236: return createRegOperand(SRC_SHARED_LIMIT);
1956 case 237: return createRegOperand(SRC_PRIVATE_BASE);
1957 case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
1958 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1959 case 251: return createRegOperand(SRC_VCCZ);
1960 case 252: return createRegOperand(SRC_EXECZ);
1961 case 253: return createRegOperand(SRC_SCC);
1962 default: break;
1963 }
1964 return errOperand(Val, "unknown operand encoding " + Twine(Val));
1965}
1966
1968 using namespace AMDGPU;
1969
1970 switch (Val) {
1971 case 124:
1972 if (isGFX11Plus())
1973 return createRegOperand(SGPR_NULL);
1974 break;
1975 case 125:
1976 if (!isGFX11Plus())
1977 return createRegOperand(SGPR_NULL);
1978 break;
1979 default:
1980 break;
1981 }
1982 return errOperand(Val, "unknown operand encoding " + Twine(Val));
1983}
1984
1986 const unsigned Val) const {
1987 using namespace AMDGPU::SDWA;
1988 using namespace AMDGPU::EncValues;
1989
1990 if (STI.hasFeature(AMDGPU::FeatureGFX9) ||
1991 STI.hasFeature(AMDGPU::FeatureGFX10)) {
1992 // XXX: cast to int is needed to avoid stupid warning:
1993 // compare with unsigned is always true
1994 if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) &&
1995 Val <= SDWA9EncValues::SRC_VGPR_MAX) {
1996 return createRegOperand(getVgprClassId(Width),
1997 Val - SDWA9EncValues::SRC_VGPR_MIN);
1998 }
1999 if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
2000 Val <= (isGFX10Plus() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
2001 : SDWA9EncValues::SRC_SGPR_MAX_SI)) {
2002 return createSRegOperand(getSgprClassId(Width),
2003 Val - SDWA9EncValues::SRC_SGPR_MIN);
2004 }
2005 if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
2006 Val <= SDWA9EncValues::SRC_TTMP_MAX) {
2007 return createSRegOperand(getTtmpClassId(Width),
2008 Val - SDWA9EncValues::SRC_TTMP_MIN);
2009 }
2010
2011 const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
2012
2013 if ((INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX) ||
2014 (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX))
2015 return MCOperand::createImm(SVal);
2016
2017 return decodeSpecialReg32(SVal);
2018 }
2019 if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands))
2020 return createRegOperand(getVgprClassId(Width), Val);
2021 llvm_unreachable("unsupported target");
2022}
2023
2025 return decodeSDWASrc(16, Val);
2026}
2027
2029 return decodeSDWASrc(32, Val);
2030}
2031
2033 using namespace AMDGPU::SDWA;
2034
2035 assert((STI.hasFeature(AMDGPU::FeatureGFX9) ||
2036 STI.hasFeature(AMDGPU::FeatureGFX10)) &&
2037 "SDWAVopcDst should be present only on GFX9+");
2038
2039 bool IsWave32 = STI.hasFeature(AMDGPU::FeatureWavefrontSize32);
2040
2041 if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
2042 Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
2043
2044 int TTmpIdx = getTTmpIdx(Val);
2045 if (TTmpIdx >= 0) {
2046 auto TTmpClsId = getTtmpClassId(IsWave32 ? 32 : 64);
2047 return createSRegOperand(TTmpClsId, TTmpIdx);
2048 }
2049 if (Val > SGPR_MAX) {
2050 return IsWave32 ? decodeSpecialReg32(Val) : decodeSpecialReg64(Val);
2051 }
2052 return createSRegOperand(getSgprClassId(IsWave32 ? 32 : 64), Val);
2053 }
2054 return createRegOperand(IsWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC);
2055}
2056
2058 return STI.hasFeature(AMDGPU::FeatureWavefrontSize32) ? decodeSrcOp(32, Val)
2059 : decodeSrcOp(64, Val);
2060}
2061
2063 return decodeSrcOp(32, Val);
2064}
2065
2068 return MCOperand();
2069 return MCOperand::createImm(Val);
2070}
2071
2073 using VersionField = AMDGPU::EncodingField<7, 0>;
2074 using W64Bit = AMDGPU::EncodingBit<13>;
2075 using W32Bit = AMDGPU::EncodingBit<14>;
2076 using MDPBit = AMDGPU::EncodingBit<15>;
2078
2079 auto [Version, W64, W32, MDP] = Encoding::decode(Imm);
2080
2081 // Decode into a plain immediate if any unused bits are raised.
2082 if (Encoding::encode(Version, W64, W32, MDP) != Imm)
2083 return MCOperand::createImm(Imm);
2084
2085 const auto &Versions = AMDGPU::UCVersion::getGFXVersions();
2086 const auto *I = find_if(
2087 Versions, [Version = Version](const AMDGPU::UCVersion::GFXVersion &V) {
2088 return V.Code == Version;
2089 });
2090 MCContext &Ctx = getContext();
2091 const MCExpr *E;
2092 if (I == Versions.end())
2094 else
2095 E = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(I->Symbol), Ctx);
2096
2097 if (W64)
2098 E = MCBinaryExpr::createOr(E, UCVersionW64Expr, Ctx);
2099 if (W32)
2100 E = MCBinaryExpr::createOr(E, UCVersionW32Expr, Ctx);
2101 if (MDP)
2102 E = MCBinaryExpr::createOr(E, UCVersionMDPExpr, Ctx);
2103
2104 return MCOperand::createExpr(E);
2105}
2106
2108 return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
2109}
2110
2112
2114 return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
2115}
2116
2118
2120
2122 return AMDGPU::isGFX10Plus(STI);
2123}
2124
2126 return STI.hasFeature(AMDGPU::FeatureGFX11);
2127}
2128
2130 return AMDGPU::isGFX11Plus(STI);
2131}
2132
2134 return STI.hasFeature(AMDGPU::FeatureGFX12);
2135}
2136
2138 return AMDGPU::isGFX12Plus(STI);
2139}
2140
2142
2144 return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
2145}
2146
2149}
2150
2151//===----------------------------------------------------------------------===//
2152// AMDGPU specific symbol handling
2153//===----------------------------------------------------------------------===//
2154
2155/// Print a string describing the reserved bit range specified by Mask with
2156/// offset BaseBytes for use in error comments. Mask is a single continuous
2157/// range of 1s surrounded by zeros. The format here is meant to align with the
2158/// tables that describe these bits in llvm.org/docs/AMDGPUUsage.html.
2159static SmallString<32> getBitRangeFromMask(uint32_t Mask, unsigned BaseBytes) {
2160 SmallString<32> Result;
2161 raw_svector_ostream S(Result);
2162
2163 int TrailingZeros = llvm::countr_zero(Mask);
2164 int PopCount = llvm::popcount(Mask);
2165
2166 if (PopCount == 1) {
2167 S << "bit (" << (TrailingZeros + BaseBytes * CHAR_BIT) << ')';
2168 } else {
2169 S << "bits in range ("
2170 << (TrailingZeros + PopCount - 1 + BaseBytes * CHAR_BIT) << ':'
2171 << (TrailingZeros + BaseBytes * CHAR_BIT) << ')';
2172 }
2173
2174 return Result;
2175}
2176
2177#define GET_FIELD(MASK) (AMDHSA_BITS_GET(FourByteBuffer, MASK))
2178#define PRINT_DIRECTIVE(DIRECTIVE, MASK) \
2179 do { \
2180 KdStream << Indent << DIRECTIVE " " << GET_FIELD(MASK) << '\n'; \
2181 } while (0)
2182#define PRINT_PSEUDO_DIRECTIVE_COMMENT(DIRECTIVE, MASK) \
2183 do { \
2184 KdStream << Indent << MAI.getCommentString() << ' ' << DIRECTIVE " " \
2185 << GET_FIELD(MASK) << '\n'; \
2186 } while (0)
2187
2188#define CHECK_RESERVED_BITS_IMPL(MASK, DESC, MSG) \
2189 do { \
2190 if (FourByteBuffer & (MASK)) { \
2191 return createStringError(std::errc::invalid_argument, \
2192 "kernel descriptor " DESC \
2193 " reserved %s set" MSG, \
2194 getBitRangeFromMask((MASK), 0).c_str()); \
2195 } \
2196 } while (0)
2197
2198#define CHECK_RESERVED_BITS(MASK) CHECK_RESERVED_BITS_IMPL(MASK, #MASK, "")
2199#define CHECK_RESERVED_BITS_MSG(MASK, MSG) \
2200 CHECK_RESERVED_BITS_IMPL(MASK, #MASK, ", " MSG)
2201#define CHECK_RESERVED_BITS_DESC(MASK, DESC) \
2202 CHECK_RESERVED_BITS_IMPL(MASK, DESC, "")
2203#define CHECK_RESERVED_BITS_DESC_MSG(MASK, DESC, MSG) \
2204 CHECK_RESERVED_BITS_IMPL(MASK, DESC, ", " MSG)
2205
2206// NOLINTNEXTLINE(readability-identifier-naming)
2208 uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
2209 using namespace amdhsa;
2210 StringRef Indent = "\t";
2211
2212 // We cannot accurately backward compute #VGPRs used from
2213 // GRANULATED_WORKITEM_VGPR_COUNT. But we are concerned with getting the same
2214 // value of GRANULATED_WORKITEM_VGPR_COUNT in the reassembled binary. So we
2215 // simply calculate the inverse of what the assembler does.
2216
2217 uint32_t GranulatedWorkitemVGPRCount =
2218 GET_FIELD(COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT);
2219
2220 uint32_t NextFreeVGPR =
2221 (GranulatedWorkitemVGPRCount + 1) *
2222 AMDGPU::IsaInfo::getVGPREncodingGranule(&STI, EnableWavefrontSize32);
2223
2224 KdStream << Indent << ".amdhsa_next_free_vgpr " << NextFreeVGPR << '\n';
2225
2226 // We cannot backward compute values used to calculate
2227 // GRANULATED_WAVEFRONT_SGPR_COUNT. Hence the original values for following
2228 // directives can't be computed:
2229 // .amdhsa_reserve_vcc
2230 // .amdhsa_reserve_flat_scratch
2231 // .amdhsa_reserve_xnack_mask
2232 // They take their respective default values if not specified in the assembly.
2233 //
2234 // GRANULATED_WAVEFRONT_SGPR_COUNT
2235 // = f(NEXT_FREE_SGPR + VCC + FLAT_SCRATCH + XNACK_MASK)
2236 //
2237 // We compute the inverse as though all directives apart from NEXT_FREE_SGPR
2238 // are set to 0. So while disassembling we consider that:
2239 //
2240 // GRANULATED_WAVEFRONT_SGPR_COUNT
2241 // = f(NEXT_FREE_SGPR + 0 + 0 + 0)
2242 //
2243 // The disassembler cannot recover the original values of those 3 directives.
2244
2245 uint32_t GranulatedWavefrontSGPRCount =
2246 GET_FIELD(COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT);
2247
2248 if (isGFX10Plus())
2249 CHECK_RESERVED_BITS_MSG(COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT,
2250 "must be zero on gfx10+");
2251
2252 uint32_t NextFreeSGPR = (GranulatedWavefrontSGPRCount + 1) *
2254
2255 KdStream << Indent << ".amdhsa_reserve_vcc " << 0 << '\n';
2257 KdStream << Indent << ".amdhsa_reserve_flat_scratch " << 0 << '\n';
2258 KdStream << Indent << ".amdhsa_reserve_xnack_mask " << 0 << '\n';
2259 KdStream << Indent << ".amdhsa_next_free_sgpr " << NextFreeSGPR << "\n";
2260
2261 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_PRIORITY);
2262
2263 PRINT_DIRECTIVE(".amdhsa_float_round_mode_32",
2264 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32);
2265 PRINT_DIRECTIVE(".amdhsa_float_round_mode_16_64",
2266 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64);
2267 PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_32",
2268 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32);
2269 PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_16_64",
2270 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64);
2271
2272 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_PRIV);
2273
2274 if (!isGFX12Plus())
2275 PRINT_DIRECTIVE(".amdhsa_dx10_clamp",
2276 COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP);
2277
2278 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_DEBUG_MODE);
2279
2280 if (!isGFX12Plus())
2281 PRINT_DIRECTIVE(".amdhsa_ieee_mode",
2282 COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE);
2283
2284 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_BULKY);
2285 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_CDBG_USER);
2286
2287 // Bits [26].
2288 if (isGFX9Plus()) {
2289 PRINT_DIRECTIVE(".amdhsa_fp16_overflow", COMPUTE_PGM_RSRC1_GFX9_PLUS_FP16_OVFL);
2290 } else {
2291 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC1_GFX6_GFX8_RESERVED0,
2292 "COMPUTE_PGM_RSRC1", "must be zero pre-gfx9");
2293 }
2294
2295 // Bits [27].
2296 if (isGFX1250()) {
2297 PRINT_PSEUDO_DIRECTIVE_COMMENT("FLAT_SCRATCH_IS_NV",
2298 COMPUTE_PGM_RSRC1_GFX125_FLAT_SCRATCH_IS_NV);
2299 } else {
2300 CHECK_RESERVED_BITS_DESC(COMPUTE_PGM_RSRC1_GFX6_GFX120_RESERVED1,
2301 "COMPUTE_PGM_RSRC1");
2302 }
2303
2304 // Bits [28].
2305 CHECK_RESERVED_BITS_DESC(COMPUTE_PGM_RSRC1_RESERVED2, "COMPUTE_PGM_RSRC1");
2306
2307 // Bits [29-31].
2308 if (isGFX10Plus()) {
2309 // WGP_MODE is not available on GFX1250.
2310 if (!isGFX1250()) {
2311 PRINT_DIRECTIVE(".amdhsa_workgroup_processor_mode",
2312 COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE);
2313 }
2314 PRINT_DIRECTIVE(".amdhsa_memory_ordered", COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED);
2315 PRINT_DIRECTIVE(".amdhsa_forward_progress", COMPUTE_PGM_RSRC1_GFX10_PLUS_FWD_PROGRESS);
2316 } else {
2317 CHECK_RESERVED_BITS_DESC(COMPUTE_PGM_RSRC1_GFX6_GFX9_RESERVED3,
2318 "COMPUTE_PGM_RSRC1");
2319 }
2320
2321 if (isGFX12Plus())
2322 PRINT_DIRECTIVE(".amdhsa_round_robin_scheduling",
2323 COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN);
2324
2325 return true;
2326}
2327
2328// NOLINTNEXTLINE(readability-identifier-naming)
2330 uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
2331 using namespace amdhsa;
2332 StringRef Indent = "\t";
2334 PRINT_DIRECTIVE(".amdhsa_enable_private_segment",
2335 COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
2336 else
2337 PRINT_DIRECTIVE(".amdhsa_system_sgpr_private_segment_wavefront_offset",
2338 COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
2339 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_x",
2340 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X);
2341 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_y",
2342 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y);
2343 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_z",
2344 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z);
2345 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_info",
2346 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO);
2347 PRINT_DIRECTIVE(".amdhsa_system_vgpr_workitem_id",
2348 COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID);
2349
2350 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_ADDRESS_WATCH);
2351 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_MEMORY);
2352 CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC2_GRANULATED_LDS_SIZE);
2353
2355 ".amdhsa_exception_fp_ieee_invalid_op",
2356 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION);
2357 PRINT_DIRECTIVE(".amdhsa_exception_fp_denorm_src",
2358 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE);
2360 ".amdhsa_exception_fp_ieee_div_zero",
2361 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO);
2362 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_overflow",
2363 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW);
2364 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_underflow",
2365 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW);
2366 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_inexact",
2367 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT);
2368 PRINT_DIRECTIVE(".amdhsa_exception_int_div_zero",
2369 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO);
2370
2371 CHECK_RESERVED_BITS_DESC(COMPUTE_PGM_RSRC2_RESERVED0, "COMPUTE_PGM_RSRC2");
2372
2373 return true;
2374}
2375
2376// NOLINTNEXTLINE(readability-identifier-naming)
2378 uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
2379 using namespace amdhsa;
2380 StringRef Indent = "\t";
2381 if (isGFX90A()) {
2382 KdStream << Indent << ".amdhsa_accum_offset "
2383 << (GET_FIELD(COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET) + 1) * 4
2384 << '\n';
2385
2386 PRINT_DIRECTIVE(".amdhsa_tg_split", COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT);
2387
2388 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX90A_RESERVED0,
2389 "COMPUTE_PGM_RSRC3", "must be zero on gfx90a");
2390 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX90A_RESERVED1,
2391 "COMPUTE_PGM_RSRC3", "must be zero on gfx90a");
2392 } else if (isGFX10Plus()) {
2393 // Bits [0-3].
2394 if (!isGFX12Plus()) {
2395 if (!EnableWavefrontSize32 || !*EnableWavefrontSize32) {
2396 PRINT_DIRECTIVE(".amdhsa_shared_vgpr_count",
2397 COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT);
2398 } else {
2400 "SHARED_VGPR_COUNT",
2401 COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT);
2402 }
2403 } else {
2404 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX12_PLUS_RESERVED0,
2405 "COMPUTE_PGM_RSRC3",
2406 "must be zero on gfx12+");
2407 }
2408
2409 // Bits [4-11].
2410 if (isGFX11()) {
2411 PRINT_DIRECTIVE(".amdhsa_inst_pref_size",
2412 COMPUTE_PGM_RSRC3_GFX11_INST_PREF_SIZE);
2413 PRINT_PSEUDO_DIRECTIVE_COMMENT("TRAP_ON_START",
2414 COMPUTE_PGM_RSRC3_GFX11_TRAP_ON_START);
2415 PRINT_PSEUDO_DIRECTIVE_COMMENT("TRAP_ON_END",
2416 COMPUTE_PGM_RSRC3_GFX11_TRAP_ON_END);
2417 } else if (isGFX12Plus()) {
2418 PRINT_DIRECTIVE(".amdhsa_inst_pref_size",
2419 COMPUTE_PGM_RSRC3_GFX12_PLUS_INST_PREF_SIZE);
2420 } else {
2421 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_RESERVED1,
2422 "COMPUTE_PGM_RSRC3",
2423 "must be zero on gfx10");
2424 }
2425
2426 // Bits [12].
2427 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_PLUS_RESERVED2,
2428 "COMPUTE_PGM_RSRC3", "must be zero on gfx10+");
2429
2430 // Bits [13].
2431 if (isGFX12Plus()) {
2433 COMPUTE_PGM_RSRC3_GFX12_PLUS_GLG_EN);
2434 } else {
2435 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_GFX11_RESERVED3,
2436 "COMPUTE_PGM_RSRC3",
2437 "must be zero on gfx10 or gfx11");
2438 }
2439
2440 // Bits [14-21].
2441 if (isGFX1250()) {
2442 PRINT_DIRECTIVE(".amdhsa_named_barrier_count",
2443 COMPUTE_PGM_RSRC3_GFX125_NAMED_BAR_CNT);
2445 "ENABLE_DYNAMIC_VGPR", COMPUTE_PGM_RSRC3_GFX125_ENABLE_DYNAMIC_VGPR);
2447 COMPUTE_PGM_RSRC3_GFX125_TCP_SPLIT);
2449 "ENABLE_DIDT_THROTTLE",
2450 COMPUTE_PGM_RSRC3_GFX125_ENABLE_DIDT_THROTTLE);
2451 } else {
2452 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_GFX120_RESERVED4,
2453 "COMPUTE_PGM_RSRC3",
2454 "must be zero on gfx10+");
2455 }
2456
2457 // Bits [22-30].
2458 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_PLUS_RESERVED5,
2459 "COMPUTE_PGM_RSRC3", "must be zero on gfx10+");
2460
2461 // Bits [31].
2462 if (isGFX11Plus()) {
2464 COMPUTE_PGM_RSRC3_GFX11_PLUS_IMAGE_OP);
2465 } else {
2466 CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_RESERVED6,
2467 "COMPUTE_PGM_RSRC3",
2468 "must be zero on gfx10");
2469 }
2470 } else if (FourByteBuffer) {
2471 return createStringError(
2472 std::errc::invalid_argument,
2473 "kernel descriptor COMPUTE_PGM_RSRC3 must be all zero before gfx9");
2474 }
2475 return true;
2476}
2477#undef PRINT_PSEUDO_DIRECTIVE_COMMENT
2478#undef PRINT_DIRECTIVE
2479#undef GET_FIELD
2480#undef CHECK_RESERVED_BITS_IMPL
2481#undef CHECK_RESERVED_BITS
2482#undef CHECK_RESERVED_BITS_MSG
2483#undef CHECK_RESERVED_BITS_DESC
2484#undef CHECK_RESERVED_BITS_DESC_MSG
2485
2486/// Create an error object to return from onSymbolStart for reserved kernel
2487/// descriptor bits being set.
2488static Error createReservedKDBitsError(uint32_t Mask, unsigned BaseBytes,
2489 const char *Msg = "") {
2490 return createStringError(
2491 std::errc::invalid_argument, "kernel descriptor reserved %s set%s%s",
2492 getBitRangeFromMask(Mask, BaseBytes).c_str(), *Msg ? ", " : "", Msg);
2493}
2494
2495/// Create an error object to return from onSymbolStart for reserved kernel
2496/// descriptor bytes being set.
2497static Error createReservedKDBytesError(unsigned BaseInBytes,
2498 unsigned WidthInBytes) {
2499 // Create an error comment in the same format as the "Kernel Descriptor"
2500 // table here: https://llvm.org/docs/AMDGPUUsage.html#kernel-descriptor .
2501 return createStringError(
2502 std::errc::invalid_argument,
2503 "kernel descriptor reserved bits in range (%u:%u) set",
2504 (BaseInBytes + WidthInBytes) * CHAR_BIT - 1, BaseInBytes * CHAR_BIT);
2505}
2506
2509 raw_string_ostream &KdStream) const {
2510#define PRINT_DIRECTIVE(DIRECTIVE, MASK) \
2511 do { \
2512 KdStream << Indent << DIRECTIVE " " \
2513 << ((TwoByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n'; \
2514 } while (0)
2515
2516 uint16_t TwoByteBuffer = 0;
2517 uint32_t FourByteBuffer = 0;
2518
2519 StringRef ReservedBytes;
2520 StringRef Indent = "\t";
2521
2522 assert(Bytes.size() == 64);
2523 DataExtractor DE(Bytes, /*IsLittleEndian=*/true, /*AddressSize=*/8);
2524
2525 switch (Cursor.tell()) {
2527 FourByteBuffer = DE.getU32(Cursor);
2528 KdStream << Indent << ".amdhsa_group_segment_fixed_size " << FourByteBuffer
2529 << '\n';
2530 return true;
2531
2533 FourByteBuffer = DE.getU32(Cursor);
2534 KdStream << Indent << ".amdhsa_private_segment_fixed_size "
2535 << FourByteBuffer << '\n';
2536 return true;
2537
2539 FourByteBuffer = DE.getU32(Cursor);
2540 KdStream << Indent << ".amdhsa_kernarg_size "
2541 << FourByteBuffer << '\n';
2542 return true;
2543
2545 // 4 reserved bytes, must be 0.
2546 ReservedBytes = DE.getBytes(Cursor, 4);
2547 for (int I = 0; I < 4; ++I) {
2548 if (ReservedBytes[I] != 0)
2550 }
2551 return true;
2552
2554 // KERNEL_CODE_ENTRY_BYTE_OFFSET
2555 // So far no directive controls this for Code Object V3, so simply skip for
2556 // disassembly.
2557 DE.skip(Cursor, 8);
2558 return true;
2559
2561 // 20 reserved bytes, must be 0.
2562 ReservedBytes = DE.getBytes(Cursor, 20);
2563 for (int I = 0; I < 20; ++I) {
2564 if (ReservedBytes[I] != 0)
2566 }
2567 return true;
2568
2570 FourByteBuffer = DE.getU32(Cursor);
2571 return decodeCOMPUTE_PGM_RSRC3(FourByteBuffer, KdStream);
2572
2574 FourByteBuffer = DE.getU32(Cursor);
2575 return decodeCOMPUTE_PGM_RSRC1(FourByteBuffer, KdStream);
2576
2578 FourByteBuffer = DE.getU32(Cursor);
2579 return decodeCOMPUTE_PGM_RSRC2(FourByteBuffer, KdStream);
2580
2582 using namespace amdhsa;
2583 TwoByteBuffer = DE.getU16(Cursor);
2584
2586 PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_buffer",
2587 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER);
2588 PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_ptr",
2589 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR);
2590 PRINT_DIRECTIVE(".amdhsa_user_sgpr_queue_ptr",
2591 KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR);
2592 PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_segment_ptr",
2593 KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR);
2594 PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_id",
2595 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID);
2597 PRINT_DIRECTIVE(".amdhsa_user_sgpr_flat_scratch_init",
2598 KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT);
2599 PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_size",
2600 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE);
2601 if (isGFX1250())
2602 PRINT_DIRECTIVE(".amdhsa_uses_cu_stores",
2603 KERNEL_CODE_PROPERTY_USES_CU_STORES);
2604
2605 if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED0)
2606 return createReservedKDBitsError(KERNEL_CODE_PROPERTY_RESERVED0,
2608
2609 // Reserved for GFX9
2610 if (isGFX9() &&
2611 (TwoByteBuffer & KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32)) {
2613 KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
2614 amdhsa::KERNEL_CODE_PROPERTIES_OFFSET, "must be zero on gfx9");
2615 }
2616 if (isGFX10Plus()) {
2617 PRINT_DIRECTIVE(".amdhsa_wavefront_size32",
2618 KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
2619 }
2620
2621 if (CodeObjectVersion >= AMDGPU::AMDHSA_COV5)
2622 PRINT_DIRECTIVE(".amdhsa_uses_dynamic_stack",
2623 KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK);
2624
2625 if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED1) {
2626 return createReservedKDBitsError(KERNEL_CODE_PROPERTY_RESERVED1,
2628 }
2629
2630 return true;
2631
2633 using namespace amdhsa;
2634 TwoByteBuffer = DE.getU16(Cursor);
2635 if (TwoByteBuffer & KERNARG_PRELOAD_SPEC_LENGTH) {
2636 PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_preload_length",
2637 KERNARG_PRELOAD_SPEC_LENGTH);
2638 }
2639
2640 if (TwoByteBuffer & KERNARG_PRELOAD_SPEC_OFFSET) {
2641 PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_preload_offset",
2642 KERNARG_PRELOAD_SPEC_OFFSET);
2643 }
2644 return true;
2645
2647 // 4 bytes from here are reserved, must be 0.
2648 ReservedBytes = DE.getBytes(Cursor, 4);
2649 for (int I = 0; I < 4; ++I) {
2650 if (ReservedBytes[I] != 0)
2652 }
2653 return true;
2654
2655 default:
2656 llvm_unreachable("Unhandled index. Case statements cover everything.");
2657 return true;
2658 }
2659#undef PRINT_DIRECTIVE
2660}
2661
2663 StringRef KdName, ArrayRef<uint8_t> Bytes, uint64_t KdAddress) const {
2664
2665 // CP microcode requires the kernel descriptor to be 64 aligned.
2666 if (Bytes.size() != 64 || KdAddress % 64 != 0)
2667 return createStringError(std::errc::invalid_argument,
2668 "kernel descriptor must be 64-byte aligned");
2669
2670 // FIXME: We can't actually decode "in order" as is done below, as e.g. GFX10
2671 // requires us to know the setting of .amdhsa_wavefront_size32 in order to
2672 // accurately produce .amdhsa_next_free_vgpr, and they appear in the wrong
2673 // order. Workaround this by first looking up .amdhsa_wavefront_size32 here
2674 // when required.
2675 if (isGFX10Plus()) {
2676 uint16_t KernelCodeProperties =
2679 EnableWavefrontSize32 =
2680 AMDHSA_BITS_GET(KernelCodeProperties,
2681 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
2682 }
2683
2684 std::string Kd;
2685 raw_string_ostream KdStream(Kd);
2686 KdStream << ".amdhsa_kernel " << KdName << '\n';
2687
2689 while (C && C.tell() < Bytes.size()) {
2690 Expected<bool> Res = decodeKernelDescriptorDirective(C, Bytes, KdStream);
2691
2692 cantFail(C.takeError());
2693
2694 if (!Res)
2695 return Res;
2696 }
2697 KdStream << ".end_amdhsa_kernel\n";
2698 outs() << KdStream.str();
2699 return true;
2700}
2701
2703 uint64_t &Size,
2704 ArrayRef<uint8_t> Bytes,
2705 uint64_t Address) const {
2706 // Right now only kernel descriptor needs to be handled.
2707 // We ignore all other symbols for target specific handling.
2708 // TODO:
2709 // Fix the spurious symbol issue for AMDGPU kernels. Exists for both Code
2710 // Object V2 and V3 when symbols are marked protected.
2711
2712 // amd_kernel_code_t for Code Object V2.
2713 if (Symbol.Type == ELF::STT_AMDGPU_HSA_KERNEL) {
2714 Size = 256;
2715 return createStringError(std::errc::invalid_argument,
2716 "code object v2 is not supported");
2717 }
2718
2719 // Code Object V3 kernel descriptors.
2720 StringRef Name = Symbol.Name;
2721 if (Symbol.Type == ELF::STT_OBJECT && Name.ends_with(StringRef(".kd"))) {
2722 Size = 64; // Size = 64 regardless of success or failure.
2723 return decodeKernelDescriptor(Name.drop_back(3), Bytes, Address);
2724 }
2725
2726 return false;
2727}
2728
2729const MCExpr *AMDGPUDisassembler::createConstantSymbolExpr(StringRef Id,
2730 int64_t Val) {
2731 MCContext &Ctx = getContext();
2732 MCSymbol *Sym = Ctx.getOrCreateSymbol(Id);
2733 // Note: only set value to Val on a new symbol in case an dissassembler
2734 // has already been initialized in this context.
2735 if (!Sym->isVariable()) {
2736 Sym->setVariableValue(MCConstantExpr::create(Val, Ctx));
2737 } else {
2738 int64_t Res = ~Val;
2739 bool Valid = Sym->getVariableValue()->evaluateAsAbsolute(Res);
2740 if (!Valid || Res != Val)
2741 Ctx.reportWarning(SMLoc(), "unsupported redefinition of " + Id);
2742 }
2743 return MCSymbolRefExpr::create(Sym, Ctx);
2744}
2745
2746//===----------------------------------------------------------------------===//
2747// AMDGPUSymbolizer
2748//===----------------------------------------------------------------------===//
2749
2750// Try to find symbol name for specified label
2752 MCInst &Inst, raw_ostream & /*cStream*/, int64_t Value,
2753 uint64_t /*Address*/, bool IsBranch, uint64_t /*Offset*/,
2754 uint64_t /*OpSize*/, uint64_t /*InstSize*/) {
2755
2756 if (!IsBranch) {
2757 return false;
2758 }
2759
2760 auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
2761 if (!Symbols)
2762 return false;
2763
2764 auto Result = llvm::find_if(*Symbols, [Value](const SymbolInfoTy &Val) {
2765 return Val.Addr == static_cast<uint64_t>(Value) &&
2766 Val.Type == ELF::STT_NOTYPE;
2767 });
2768 if (Result != Symbols->end()) {
2769 auto *Sym = Ctx.getOrCreateSymbol(Result->Name);
2770 const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
2772 return true;
2773 }
2774 // Add to list of referenced addresses, so caller can synthesize a label.
2775 ReferencedAddresses.push_back(static_cast<uint64_t>(Value));
2776 return false;
2777}
2778
2780 int64_t Value,
2781 uint64_t Address) {
2782 llvm_unreachable("unimplemented");
2783}
2784
2785//===----------------------------------------------------------------------===//
2786// Initialization
2787//===----------------------------------------------------------------------===//
2788
2790 LLVMOpInfoCallback /*GetOpInfo*/,
2791 LLVMSymbolLookupCallback /*SymbolLookUp*/,
2792 void *DisInfo,
2793 MCContext *Ctx,
2794 std::unique_ptr<MCRelocationInfo> &&RelInfo) {
2795 return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
2796}
2797
2799 const MCSubtargetInfo &STI,
2800 MCContext &Ctx) {
2801 return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
2802}
2803
2804extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
2810}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
static int IsAGPROperand(const MCInst &Inst, AMDGPU::OpName Name, const MCRegisterInfo *MRI)
#define CHECK_RESERVED_BITS_DESC(MASK, DESC)
static VOPModifiers collectVOPModifiers(const MCInst &MI, bool IsVOP3P=false)
static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op, AMDGPU::OpName Name)
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUDisassembler()
static DecodeStatus decodeOperand_VSrcT16_Lo128(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecodeStatus decodeOperand_KImmFP64(MCInst &Inst, uint64_t Imm, uint64_t Addr, const MCDisassembler *Decoder)
static SmallString< 32 > getBitRangeFromMask(uint32_t Mask, unsigned BaseBytes)
Print a string describing the reserved bit range specified by Mask with offset BaseBytes for use in e...
#define DECODE_OPERAND_SREG_8(RegClass, OpWidth)
static DecodeStatus decodeSMEMOffset(MCInst &Inst, unsigned Imm, uint64_t Addr, const MCDisassembler *Decoder)
static DecodeStatus decodeVersionImm(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
#define DECODE_OPERAND_SREG_7(RegClass, OpWidth)
static DecodeStatus decodeSrcA9(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecodeStatus decodeOperand_VGPR_16(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecoderUInt128 eat12Bytes(ArrayRef< uint8_t > &Bytes)
#define PRINT_PSEUDO_DIRECTIVE_COMMENT(DIRECTIVE, MASK)
static DecodeStatus decodeSrcOp(MCInst &Inst, unsigned EncSize, unsigned OpWidth, unsigned Imm, unsigned EncImm, const MCDisassembler *Decoder)
static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr, const MCDisassembler *Decoder)
static DecodeStatus decodeOperand_VSrc_f64(MCInst &Inst, unsigned Imm, uint64_t Addr, const MCDisassembler *Decoder)
static int64_t getInlineImmValBF16(unsigned Imm)
#define DECODE_SDWA(DecName)
static DecodeStatus decodeSOPPBrTarget(MCInst &Inst, unsigned Imm, uint64_t Addr, const MCDisassembler *Decoder)
#define DECODE_OPERAND_REG_8(RegClass)
#define PRINT_DIRECTIVE(DIRECTIVE, MASK)
static DecodeStatus decodeSrcRegOrImm9(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecoderUInt128 eat16Bytes(ArrayRef< uint8_t > &Bytes)
static DecodeStatus DecodeVGPR_16RegisterClass(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecodeStatus decodeSrcReg9(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static int64_t getInlineImmVal32(unsigned Imm)
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
#define CHECK_RESERVED_BITS(MASK)
static DecodeStatus decodeSrcAV10(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
#define SGPR_MAX
static int64_t getInlineImmVal64(unsigned Imm)
static T eatBytes(ArrayRef< uint8_t > &Bytes)
static DecodeStatus decodeOperand_KImmFP(MCInst &Inst, unsigned Imm, uint64_t Addr, const MCDisassembler *Decoder)
static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm, unsigned Opw, const MCDisassembler *Decoder)
static MCDisassembler * createAMDGPUDisassembler(const Target &T, const MCSubtargetInfo &STI, MCContext &Ctx)
static DecodeStatus decodeSrcRegOrImmA9(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static DecodeStatus DecodeVGPR_16_Lo128RegisterClass(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
#define CHECK_RESERVED_BITS_MSG(MASK, MSG)
static DecodeStatus decodeOperandVOPDDstY(MCInst &Inst, unsigned Val, uint64_t Addr, const void *Decoder)
static MCSymbolizer * createAMDGPUSymbolizer(const Triple &, LLVMOpInfoCallback, LLVMSymbolLookupCallback, void *DisInfo, MCContext *Ctx, std::unique_ptr< MCRelocationInfo > &&RelInfo)
static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, uint64_t Addr, const MCDisassembler *Decoder)
static int64_t getInlineImmValF16(unsigned Imm)
#define GET_FIELD(MASK)
static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
static Error createReservedKDBytesError(unsigned BaseInBytes, unsigned WidthInBytes)
Create an error object to return from onSymbolStart for reserved kernel descriptor bytes being set.
static DecodeStatus decodeSplitBarrier(MCInst &Inst, unsigned Val, uint64_t Addr, const MCDisassembler *Decoder)
static DecodeStatus decodeAV10(MCInst &Inst, unsigned Imm, uint64_t, const MCDisassembler *Decoder)
#define CHECK_RESERVED_BITS_DESC_MSG(MASK, DESC, MSG)
static Error createReservedKDBitsError(uint32_t Mask, unsigned BaseBytes, const char *Msg="")
Create an error object to return from onSymbolStart for reserved kernel descriptor bits being set.
static void adjustMFMA_F8F6F4OpRegClass(const MCRegisterInfo &MRI, MCOperand &MO, uint8_t NumRegs)
Adjust the register values used by V_MFMA_F8F6F4_f8_f8 instructions to the appropriate subregister fo...
This file contains declaration for AMDGPU ISA disassembler.
Provides AMDGPU specific target descriptions.
AMDHSA kernel descriptor definitions.
#define AMDHSA_BITS_GET(SRC, MSK)
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define LLVM_ABI
Definition: Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:132
uint64_t Addr
std::string Name
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
MachineInstr unsigned OpIdx
Interface definition for SIRegisterInfo.
MCOperand decodeLiteral64Constant() const
void convertVOPC64DPPInst(MCInst &MI) const
void convertEXPInst(MCInst &MI) const
MCOperand createRegOperand(unsigned int RegId) const
MCOperand decodeSpecialReg64(unsigned Val) const
const char * getRegClassName(unsigned RegClassID) const
Expected< bool > decodeCOMPUTE_PGM_RSRC1(uint32_t FourByteBuffer, raw_string_ostream &KdStream) const
Decode as directives that handle COMPUTE_PGM_RSRC1.
Expected< bool > decodeKernelDescriptorDirective(DataExtractor::Cursor &Cursor, ArrayRef< uint8_t > Bytes, raw_string_ostream &KdStream) const
void convertVOPCDPPInst(MCInst &MI) const
MCOperand decodeSpecialReg96Plus(unsigned Val) const
MCOperand decodeSDWASrc32(unsigned Val) const
void setABIVersion(unsigned Version) override
ELF-specific, set the ABI version from the object header.
Expected< bool > decodeCOMPUTE_PGM_RSRC2(uint32_t FourByteBuffer, raw_string_ostream &KdStream) const
Decode as directives that handle COMPUTE_PGM_RSRC2.
unsigned getAgprClassId(unsigned Width) const
MCOperand decodeDpp8FI(unsigned Val) const
MCOperand decodeSDWASrc(unsigned Width, unsigned Val) const
void convertFMAanyK(MCInst &MI) const
DecodeStatus tryDecodeInst(const uint8_t *Table, MCInst &MI, InsnType Inst, uint64_t Address, raw_ostream &Comments) const
void convertMacDPPInst(MCInst &MI) const
MCOperand decodeVOPDDstYOp(MCInst &Inst, unsigned Val) const
MCOperand decodeBoolReg(unsigned Val) const
void convertDPP8Inst(MCInst &MI) const
MCOperand createVGPR16Operand(unsigned RegIdx, bool IsHi) const
MCOperand errOperand(unsigned V, const Twine &ErrMsg) const
MCOperand decodeVersionImm(unsigned Imm) const
Expected< bool > decodeKernelDescriptor(StringRef KdName, ArrayRef< uint8_t > Bytes, uint64_t KdAddress) const
MCOperand decodeSplitBarrier(unsigned Val) const
void convertVOP3DPPInst(MCInst &MI) const
void convertTrue16OpSel(MCInst &MI) const
MCOperand decodeMandatoryLiteralConstant(unsigned Imm) const
Expected< bool > decodeCOMPUTE_PGM_RSRC3(uint32_t FourByteBuffer, raw_string_ostream &KdStream) const
Decode as directives that handle COMPUTE_PGM_RSRC3.
MCOperand decodeNonVGPRSrcOp(unsigned Width, unsigned Val) const
AMDGPUDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx, MCInstrInfo const *MCII)
MCOperand decodeSpecialReg32(unsigned Val) const
MCOperand decodeLiteralConstant(bool ExtendFP64) const
MCOperand decodeSDWAVopcDst(unsigned Val) const
void convertVINTERPInst(MCInst &MI) const
void convertSDWAInst(MCInst &MI) const
MCOperand decodeSrcOp(unsigned Width, unsigned Val) const
unsigned getSgprClassId(unsigned Width) const
static MCOperand decodeIntImmed(unsigned Imm)
void convertWMMAInst(MCInst &MI) const
unsigned getVgprClassId(unsigned Width) const
void convertMAIInst(MCInst &MI) const
f8f6f4 instructions have different pseudos depending on the used formats.
unsigned getTtmpClassId(unsigned Width) const
DecodeStatus getInstruction(MCInst &MI, uint64_t &Size, ArrayRef< uint8_t > Bytes, uint64_t Address, raw_ostream &CS) const override
Returns the disassembly of a single instruction.
MCOperand decodeMandatoryLiteral64Constant(uint64_t Imm) const
void convertMIMGInst(MCInst &MI) const
bool isMacDPP(MCInst &MI) const
int getTTmpIdx(unsigned Val) const
void convertVOP3PDPPInst(MCInst &MI) const
MCOperand createSRegOperand(unsigned SRegClassID, unsigned Val) const
MCOperand decodeSDWASrc16(unsigned Val) const
Expected< bool > onSymbolStart(SymbolInfoTy &Symbol, uint64_t &Size, ArrayRef< uint8_t > Bytes, uint64_t Address) const override
Used to perform separate target specific disassembly for a particular symbol.
bool tryAddingSymbolicOperand(MCInst &Inst, raw_ostream &cStream, int64_t Value, uint64_t Address, bool IsBranch, uint64_t Offset, uint64_t OpSize, uint64_t InstSize) override
Try to add a symbolic operand instead of Value to the MCInst.
void tryAddingPcLoadReferenceComment(raw_ostream &cStream, int64_t Value, uint64_t Address) override
Try to add a comment on the PC-relative load.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
const T * data() const
Definition: ArrayRef.h:144
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition: ArrayRef.h:191
This class represents an Operation in the Expression.
A class representing a position in a DataExtractor, as well as any error encountered during extractio...
Definition: DataExtractor.h:55
uint64_t tell() const
Return the current position of this Cursor.
Definition: DataExtractor.h:72
LLVM_ABI uint32_t getU32(uint64_t *offset_ptr, Error *Err=nullptr) const
Extract a uint32_t value from *offset_ptr.
LLVM_ABI uint16_t getU16(uint64_t *offset_ptr, Error *Err=nullptr) const
Extract a uint16_t value from *offset_ptr.
LLVM_ABI void skip(Cursor &C, uint64_t Length) const
Advance the Cursor position by the given number of bytes.
LLVM_ABI StringRef getBytes(uint64_t *OffsetPtr, uint64_t Length, Error *Err=nullptr) const
Extract a fixed number of bytes from the specified offset.
Lightweight error class with error context and mandatory checking.
Definition: Error.h:159
Tagged union holding either a T or a Error.
Definition: Error.h:485
static const MCBinaryExpr * createOr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:408
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:212
Context object for machine code objects.
Definition: MCContext.h:83
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:414
LLVM_ABI void reportWarning(SMLoc L, const Twine &Msg)
Definition: MCContext.cpp:1122
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:203
Superclass for all disassemblers.
MCContext & getContext() const
const MCSubtargetInfo & STI
raw_ostream * CommentStream
DecodeStatus
Ternary decode status.
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:188
unsigned getNumOperands() const
Definition: MCInst.h:212
unsigned getOpcode() const
Definition: MCInst.h:202
void addOperand(const MCOperand Op)
Definition: MCInst.h:215
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:210
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:199
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:238
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:240
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:220
LLVM_ABI bool hasImplicitDefOfPhysReg(MCRegister Reg, const MCRegisterInfo *MRI=nullptr) const
Return true if this instruction implicitly defines the specified physical register.
Definition: MCInstrDesc.cpp:32
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:27
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:64
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:40
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:166
int64_t getImm() const
Definition: MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition: MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:145
void setReg(MCRegister Reg)
Set the register number.
Definition: MCInst.h:79
bool isReg() const
Definition: MCInst.h:65
MCRegister getReg() const
Returns the register number.
Definition: MCInst.h:73
bool isValid() const
Definition: MCInst.h:64
MCRegisterClass - Base class of TargetRegisterClass.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition: MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
Symbolize and annotate disassembled instructions.
Definition: MCSymbolizer.h:40
MCContext & Ctx
Definition: MCSymbolizer.h:42
Represents a location in source code.
Definition: SMLoc.h:23
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:47
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
LLVM Value Representation.
Definition: Value.h:75
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:662
std::string & str()
Returns the string's reference.
Definition: raw_ostream.h:680
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:692
const char *(* LLVMSymbolLookupCallback)(void *DisInfo, uint64_t ReferenceValue, uint64_t *ReferenceType, uint64_t ReferencePC, const char **ReferenceName)
The type for the symbol lookup function.
int(* LLVMOpInfoCallback)(void *DisInfo, uint64_t PC, uint64_t Offset, uint64_t OpSize, uint64_t InstSize, int TagType, void *TagBuf)
The type for the operand information call back function.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, std::optional< bool > EnableWavefrontSize32)
unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI)
ArrayRef< GFXVersion > getGFXVersions()
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
bool isGFX10(const MCSubtargetInfo &STI)
bool isGFX12Plus(const MCSubtargetInfo &STI)
bool hasPackedD16(const MCSubtargetInfo &STI)
bool isVOPC64DPP(unsigned Opc)
unsigned getAMDHSACodeObjectVersion(const Module &M)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool isGFX9(const MCSubtargetInfo &STI)
LLVM_READONLY const MIMGDimInfo * getMIMGDimInfoByEncoding(uint8_t DimEnc)
const MFMA_F8F6F4_Info * getWMMA_F8F6F4_WithFormatArgs(unsigned FmtA, unsigned FmtB, unsigned F8F8Opcode)
bool hasG16(const MCSubtargetInfo &STI)
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
bool isGFX11Plus(const MCSubtargetInfo &STI)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_REG_IMM_INT64
Definition: SIDefines.h:202
@ OPERAND_REG_IMM_V2FP16
Definition: SIDefines.h:209
@ OPERAND_REG_INLINE_C_FP64
Definition: SIDefines.h:222
@ OPERAND_REG_INLINE_C_BF16
Definition: SIDefines.h:219
@ OPERAND_REG_INLINE_C_V2BF16
Definition: SIDefines.h:224
@ OPERAND_REG_IMM_BF16
Definition: SIDefines.h:206
@ OPERAND_SRC_FIRST
Definition: SIDefines.h:259
@ OPERAND_REG_IMM_V2BF16
Definition: SIDefines.h:208
@ OPERAND_REG_IMM_FP16
Definition: SIDefines.h:207
@ OPERAND_REG_INLINE_C_INT64
Definition: SIDefines.h:218
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
Definition: SIDefines.h:216
@ OPERAND_REG_IMM_FP64
Definition: SIDefines.h:205
@ OPERAND_REG_INLINE_C_V2FP16
Definition: SIDefines.h:225
@ OPERAND_REG_INLINE_AC_FP64
Definition: SIDefines.h:238
@ OPERAND_REG_INLINE_C_FP16
Definition: SIDefines.h:220
@ OPERAND_REG_IMM_INT16
Definition: SIDefines.h:203
@ OPERAND_SRC_LAST
Definition: SIDefines.h:260
bool hasGDS(const MCSubtargetInfo &STI)
bool isGFX9Plus(const MCSubtargetInfo &STI)
bool isGFX1250(const MCSubtargetInfo &STI)
unsigned hasKernargPreload(const MCSubtargetInfo &STI)
bool isMAC(unsigned Opc)
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
bool hasVOPD(const MCSubtargetInfo &STI)
const MFMA_F8F6F4_Info * getMFMA_F8F6F4_WithFormatArgs(unsigned CBSZ, unsigned BLGP, unsigned F8F8Opcode)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ STT_NOTYPE
Definition: ELF.h:1408
@ STT_AMDGPU_HSA_KERNEL
Definition: ELF.h:1422
@ STT_OBJECT
Definition: ELF.h:1409
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:62
uint16_t read16(const void *P, endianness E)
Definition: Endian.h:406
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:477
int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition: bit.h:307
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2491
LLVM_ABI raw_fd_ostream & outs()
This returns a reference to a raw_fd_ostream for standard output.
SmallVectorImpl< T >::const_pointer c_str(SmallVectorImpl< T > &str)
Error createStringError(std::error_code EC, char const *Fmt, const Ts &... Vals)
Create formatted StringError object.
Definition: Error.h:1305
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:157
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:769
Target & getTheGCNTarget()
The target for GCN GPUs.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
std::vector< SymbolInfoTy > SectionSymbolsTy
unsigned M0(unsigned Val)
Definition: VE.h:376
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1777
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition: Error.cpp:180
Description of the encoding of one expression Op.
static void RegisterMCSymbolizer(Target &T, Target::MCSymbolizerCtorTy Fn)
RegisterMCSymbolizer - Register an MCSymbolizer implementation for the given target.
static void RegisterMCDisassembler(Target &T, Target::MCDisassemblerCtorTy Fn)
RegisterMCDisassembler - Register a MCDisassembler implementation for the given target.