LLVM 21.0.0git
AMDGPUMCCodeEmitter.cpp
Go to the documentation of this file.
1//===-- AMDGPUMCCodeEmitter.cpp - AMDGPU Code Emitter ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// The AMDGPU code emitter produces machine code that can be executed
11/// directly on the GPU device.
12//
13//===----------------------------------------------------------------------===//
14
17#include "SIDefines.h"
19#include "llvm/ADT/APInt.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInstrInfo.h"
28#include <optional>
29
30using namespace llvm;
31
32namespace {
33
34class AMDGPUMCCodeEmitter : public MCCodeEmitter {
35 const MCRegisterInfo &MRI;
36 const MCInstrInfo &MCII;
37
38public:
39 AMDGPUMCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI)
40 : MRI(MRI), MCII(MCII) {}
41
42 /// Encode the instruction and write it to the OS.
45 const MCSubtargetInfo &STI) const override;
46
47 void getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &Op,
49 const MCSubtargetInfo &STI) const;
50
51 void getMachineOpValueT16(const MCInst &MI, unsigned OpNo, APInt &Op,
53 const MCSubtargetInfo &STI) const;
54
55 void getMachineOpValueT16Lo128(const MCInst &MI, unsigned OpNo, APInt &Op,
57 const MCSubtargetInfo &STI) const;
58
59 /// Use a fixup to encode the simm16 field for SOPP branch
60 /// instructions.
61 void getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
63 const MCSubtargetInfo &STI) const;
64
65 void getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
67 const MCSubtargetInfo &STI) const;
68
69 void getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
71 const MCSubtargetInfo &STI) const;
72
73 void getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
75 const MCSubtargetInfo &STI) const;
76
77 void getAVOperandEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
79 const MCSubtargetInfo &STI) const;
80
81private:
82 uint64_t getImplicitOpSelHiEncoding(int Opcode) const;
83 void getMachineOpValueCommon(const MCInst &MI, const MCOperand &MO,
84 unsigned OpNo, APInt &Op,
86 const MCSubtargetInfo &STI) const;
87
88 /// Encode an fp or int literal.
89 std::optional<uint32_t> getLitEncoding(const MCOperand &MO,
90 const MCOperandInfo &OpInfo,
91 const MCSubtargetInfo &STI) const;
92
93 void getBinaryCodeForInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups,
94 APInt &Inst, APInt &Scratch,
95 const MCSubtargetInfo &STI) const;
96};
97
98} // end anonymous namespace
99
101 MCContext &Ctx) {
102 return new AMDGPUMCCodeEmitter(MCII, *Ctx.getRegisterInfo());
103}
104
105// Returns the encoding value to use if the given integer is an integer inline
106// immediate value, or 0 if it is not.
107template <typename IntTy>
109 if (Imm >= 0 && Imm <= 64)
110 return 128 + Imm;
111
112 if (Imm >= -16 && Imm <= -1)
113 return 192 + std::abs(Imm);
114
115 return 0;
116}
117
119 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
120 if (IntImm != 0)
121 return IntImm;
122
123 if (Val == 0x3800) // 0.5
124 return 240;
125
126 if (Val == 0xB800) // -0.5
127 return 241;
128
129 if (Val == 0x3C00) // 1.0
130 return 242;
131
132 if (Val == 0xBC00) // -1.0
133 return 243;
134
135 if (Val == 0x4000) // 2.0
136 return 244;
137
138 if (Val == 0xC000) // -2.0
139 return 245;
140
141 if (Val == 0x4400) // 4.0
142 return 246;
143
144 if (Val == 0xC400) // -4.0
145 return 247;
146
147 if (Val == 0x3118 && // 1.0 / (2.0 * pi)
148 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
149 return 248;
150
151 return 255;
152}
153
155 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
156 if (IntImm != 0)
157 return IntImm;
158
159 // clang-format off
160 switch (Val) {
161 case 0x3F00: return 240; // 0.5
162 case 0xBF00: return 241; // -0.5
163 case 0x3F80: return 242; // 1.0
164 case 0xBF80: return 243; // -1.0
165 case 0x4000: return 244; // 2.0
166 case 0xC000: return 245; // -2.0
167 case 0x4080: return 246; // 4.0
168 case 0xC080: return 247; // -4.0
169 case 0x3E22: return 248; // 1.0 / (2.0 * pi)
170 default: return 255;
171 }
172 // clang-format on
173}
174
176 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val));
177 if (IntImm != 0)
178 return IntImm;
179
180 if (Val == llvm::bit_cast<uint32_t>(0.5f))
181 return 240;
182
183 if (Val == llvm::bit_cast<uint32_t>(-0.5f))
184 return 241;
185
186 if (Val == llvm::bit_cast<uint32_t>(1.0f))
187 return 242;
188
189 if (Val == llvm::bit_cast<uint32_t>(-1.0f))
190 return 243;
191
192 if (Val == llvm::bit_cast<uint32_t>(2.0f))
193 return 244;
194
195 if (Val == llvm::bit_cast<uint32_t>(-2.0f))
196 return 245;
197
198 if (Val == llvm::bit_cast<uint32_t>(4.0f))
199 return 246;
200
201 if (Val == llvm::bit_cast<uint32_t>(-4.0f))
202 return 247;
203
204 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi)
205 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
206 return 248;
207
208 return 255;
209}
210
212 return getLit32Encoding(Val, STI);
213}
214
216 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val));
217 if (IntImm != 0)
218 return IntImm;
219
220 if (Val == llvm::bit_cast<uint64_t>(0.5))
221 return 240;
222
223 if (Val == llvm::bit_cast<uint64_t>(-0.5))
224 return 241;
225
226 if (Val == llvm::bit_cast<uint64_t>(1.0))
227 return 242;
228
229 if (Val == llvm::bit_cast<uint64_t>(-1.0))
230 return 243;
231
232 if (Val == llvm::bit_cast<uint64_t>(2.0))
233 return 244;
234
235 if (Val == llvm::bit_cast<uint64_t>(-2.0))
236 return 245;
237
238 if (Val == llvm::bit_cast<uint64_t>(4.0))
239 return 246;
240
241 if (Val == llvm::bit_cast<uint64_t>(-4.0))
242 return 247;
243
244 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi)
245 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
246 return 248;
247
248 return 255;
249}
250
251std::optional<uint32_t>
252AMDGPUMCCodeEmitter::getLitEncoding(const MCOperand &MO,
253 const MCOperandInfo &OpInfo,
254 const MCSubtargetInfo &STI) const {
255 int64_t Imm;
256 if (MO.isExpr()) {
257 const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr());
258 if (!C)
259 return 255;
260
261 Imm = C->getValue();
262 } else {
263
264 assert(!MO.isDFPImm());
265
266 if (!MO.isImm())
267 return {};
268
269 Imm = MO.getImm();
270 }
271
272 switch (OpInfo.OperandType) {
285 return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
286
292 return getLit64Encoding(static_cast<uint64_t>(Imm), STI);
293
297 return getLit16IntEncoding(static_cast<uint32_t>(Imm), STI);
298
303 // FIXME Is this correct? What do inline immediates do on SI for f16 src
304 // which does not have f16 support?
305 return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
306
311 // We don't actually need to check Inv2Pi here because BF16 instructions can
312 // only be emitted for targets that already support the feature.
313 return getLitBF16Encoding(static_cast<uint16_t>(Imm));
314
318 return AMDGPU::getInlineEncodingV2I16(static_cast<uint32_t>(Imm))
319 .value_or(255);
320
324 return AMDGPU::getInlineEncodingV2F16(static_cast<uint32_t>(Imm))
325 .value_or(255);
326
330 return AMDGPU::getInlineEncodingV2BF16(static_cast<uint32_t>(Imm))
331 .value_or(255);
332
335 return MO.getImm();
336 default:
337 llvm_unreachable("invalid operand size");
338 }
339}
340
341uint64_t AMDGPUMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const {
342 using namespace AMDGPU::VOP3PEncoding;
343
344 if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::op_sel_hi)) {
345 if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src2))
346 return 0;
347 if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src1))
348 return OP_SEL_HI_2;
349 if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src0))
350 return OP_SEL_HI_1 | OP_SEL_HI_2;
351 }
353}
354
355static bool isVCMPX64(const MCInstrDesc &Desc) {
356 return (Desc.TSFlags & SIInstrFlags::VOP3) &&
357 Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC);
358}
359
360void AMDGPUMCCodeEmitter::encodeInstruction(const MCInst &MI,
363 const MCSubtargetInfo &STI) const {
364 int Opcode = MI.getOpcode();
365 APInt Encoding, Scratch;
366 getBinaryCodeForInstr(MI, Fixups, Encoding, Scratch, STI);
367 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
368 unsigned bytes = Desc.getSize();
369
370 // Set unused op_sel_hi bits to 1 for VOP3P and MAI instructions.
371 // Note that accvgpr_read/write are MAI, have src0, but do not use op_sel.
372 if ((Desc.TSFlags & SIInstrFlags::VOP3P) ||
373 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
374 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) {
375 Encoding |= getImplicitOpSelHiEncoding(Opcode);
376 }
377
378 // GFX10+ v_cmpx opcodes promoted to VOP3 have implied dst=EXEC.
379 // Documentation requires dst to be encoded as EXEC (0x7E),
380 // but it looks like the actual value encoded for dst operand
381 // is ignored by HW. It was decided to define dst as "do not care"
382 // in td files to allow disassembler accept any dst value.
383 // However, dst is encoded as EXEC for compatibility with SP3.
384 if (AMDGPU::isGFX10Plus(STI) && isVCMPX64(Desc)) {
385 assert((Encoding & 0xFF) == 0);
386 Encoding |= MRI.getEncodingValue(AMDGPU::EXEC_LO) &
388 }
389
390 for (unsigned i = 0; i < bytes; i++) {
391 CB.push_back((uint8_t)Encoding.extractBitsAsZExtValue(8, 8 * i));
392 }
393
394 // NSA encoding.
395 if (AMDGPU::isGFX10Plus(STI) && Desc.TSFlags & SIInstrFlags::MIMG) {
396 int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
397 AMDGPU::OpName::vaddr0);
398 int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
399 AMDGPU::OpName::srsrc);
400 assert(vaddr0 >= 0 && srsrc > vaddr0);
401 unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
402 unsigned NumPadding = (-NumExtraAddrs) & 3;
403
404 for (unsigned i = 0; i < NumExtraAddrs; ++i) {
405 getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups,
406 STI);
407 CB.push_back((uint8_t)Encoding.getLimitedValue());
408 }
409 CB.append(NumPadding, 0);
410 }
411
412 if ((bytes > 8 && STI.hasFeature(AMDGPU::FeatureVOP3Literal)) ||
413 (bytes > 4 && !STI.hasFeature(AMDGPU::FeatureVOP3Literal)))
414 return;
415
416 // Do not print literals from SISrc Operands for insts with mandatory literals
417 if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::imm))
418 return;
419
420 // Check for additional literals
421 for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) {
422
423 // Check if this operand should be encoded as [SV]Src
425 continue;
426
427 // Is this operand a literal immediate?
428 const MCOperand &Op = MI.getOperand(i);
429 auto Enc = getLitEncoding(Op, Desc.operands()[i], STI);
430 if (!Enc || *Enc != 255)
431 continue;
432
433 // Yes! Encode it
434 int64_t Imm = 0;
435
436 if (Op.isImm())
437 Imm = Op.getImm();
438 else if (Op.isExpr()) {
439 if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr()))
440 Imm = C->getValue();
441 } else // Exprs will be replaced with a fixup value.
442 llvm_unreachable("Must be immediate or expr");
443
444 if (Desc.operands()[i].OperandType == AMDGPU::OPERAND_REG_IMM_FP64)
445 Imm = Hi_32(Imm);
446
447 support::endian::write<uint32_t>(CB, Imm, llvm::endianness::little);
448
449 // Only one literal value allowed
450 break;
451 }
452}
453
454void AMDGPUMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
455 APInt &Op,
457 const MCSubtargetInfo &STI) const {
458 const MCOperand &MO = MI.getOperand(OpNo);
459
460 if (MO.isExpr()) {
461 const MCExpr *Expr = MO.getExpr();
463 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
464 Op = APInt::getZero(96);
465 } else {
466 getMachineOpValue(MI, MO, Op, Fixups, STI);
467 }
468}
469
470void AMDGPUMCCodeEmitter::getSMEMOffsetEncoding(
471 const MCInst &MI, unsigned OpNo, APInt &Op,
472 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
473 auto Offset = MI.getOperand(OpNo).getImm();
474 // VI only supports 20-bit unsigned offsets.
475 assert(!AMDGPU::isVI(STI) || isUInt<20>(Offset));
476 Op = Offset;
477}
478
479void AMDGPUMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo,
480 APInt &Op,
482 const MCSubtargetInfo &STI) const {
483 using namespace AMDGPU::SDWA;
484
485 uint64_t RegEnc = 0;
486
487 const MCOperand &MO = MI.getOperand(OpNo);
488
489 if (MO.isReg()) {
490 MCRegister Reg = MO.getReg();
491 RegEnc |= MRI.getEncodingValue(Reg);
492 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
494 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
495 }
496 Op = RegEnc;
497 return;
498 } else {
499 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
500 auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI);
501 if (Enc && *Enc != 255) {
502 Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK;
503 return;
504 }
505 }
506
507 llvm_unreachable("Unsupported operand kind");
508}
509
510void AMDGPUMCCodeEmitter::getSDWAVopcDstEncoding(
511 const MCInst &MI, unsigned OpNo, APInt &Op,
512 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
513 using namespace AMDGPU::SDWA;
514
515 uint64_t RegEnc = 0;
516
517 const MCOperand &MO = MI.getOperand(OpNo);
518
519 MCRegister Reg = MO.getReg();
520 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
521 RegEnc |= MRI.getEncodingValue(Reg);
522 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
523 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
524 }
525 Op = RegEnc;
526}
527
528void AMDGPUMCCodeEmitter::getAVOperandEncoding(
529 const MCInst &MI, unsigned OpNo, APInt &Op,
530 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
531 MCRegister Reg = MI.getOperand(OpNo).getReg();
532 unsigned Enc = MRI.getEncodingValue(Reg);
533 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
534 bool IsVGPROrAGPR =
536
537 // VGPR and AGPR have the same encoding, but SrcA and SrcB operands of mfma
538 // instructions use acc[0:1] modifier bits to distinguish. These bits are
539 // encoded as a virtual 9th bit of the register for these operands.
540 bool IsAGPR = Enc & AMDGPU::HWEncoding::IS_AGPR;
541
542 Op = Idx | (IsVGPROrAGPR << 8) | (IsAGPR << 9);
543}
544
545static bool needsPCRel(const MCExpr *Expr) {
546 switch (Expr->getKind()) {
547 case MCExpr::SymbolRef: {
548 auto *SE = cast<MCSymbolRefExpr>(Expr);
549 MCSymbolRefExpr::VariantKind Kind = SE->getKind();
552 }
553 case MCExpr::Binary: {
554 auto *BE = cast<MCBinaryExpr>(Expr);
555 if (BE->getOpcode() == MCBinaryExpr::Sub)
556 return false;
557 return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS());
558 }
559 case MCExpr::Unary:
560 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr());
561 case MCExpr::Target:
562 case MCExpr::Constant:
563 return false;
564 }
565 llvm_unreachable("invalid kind");
566}
567
568void AMDGPUMCCodeEmitter::getMachineOpValue(const MCInst &MI,
569 const MCOperand &MO, APInt &Op,
571 const MCSubtargetInfo &STI) const {
572 if (MO.isReg()){
573 unsigned Enc = MRI.getEncodingValue(MO.getReg());
574 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
575 bool IsVGPROrAGPR =
577 Op = Idx | (IsVGPROrAGPR << 8);
578 return;
579 }
580 unsigned OpNo = &MO - MI.begin();
581 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
582}
583
584void AMDGPUMCCodeEmitter::getMachineOpValueT16(
585 const MCInst &MI, unsigned OpNo, APInt &Op,
586 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
587 const MCOperand &MO = MI.getOperand(OpNo);
588 if (MO.isReg()) {
589 unsigned Enc = MRI.getEncodingValue(MO.getReg());
590 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
591 bool IsVGPR = Enc & AMDGPU::HWEncoding::IS_VGPR;
592 Op = Idx | (IsVGPR << 8);
593 return;
594 }
595 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
596 // VGPRs include the suffix/op_sel bit in the register encoding, but
597 // immediates and SGPRs include it in src_modifiers. Therefore, copy the
598 // op_sel bit from the src operands into src_modifier operands if Op is
599 // src_modifiers and the corresponding src is a VGPR
600 int SrcMOIdx = -1;
601 assert(OpNo < INT_MAX);
602 if ((int)OpNo == AMDGPU::getNamedOperandIdx(MI.getOpcode(),
603 AMDGPU::OpName::src0_modifiers)) {
604 SrcMOIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
605 int VDstMOIdx =
606 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst);
607 if (VDstMOIdx != -1) {
608 auto DstReg = MI.getOperand(VDstMOIdx).getReg();
609 if (AMDGPU::isHi16Reg(DstReg, MRI))
611 }
612 } else if ((int)OpNo == AMDGPU::getNamedOperandIdx(
613 MI.getOpcode(), AMDGPU::OpName::src1_modifiers))
614 SrcMOIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
615 else if ((int)OpNo == AMDGPU::getNamedOperandIdx(
616 MI.getOpcode(), AMDGPU::OpName::src2_modifiers))
617 SrcMOIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src2);
618 if (SrcMOIdx == -1)
619 return;
620
621 const MCOperand &SrcMO = MI.getOperand(SrcMOIdx);
622 if (!SrcMO.isReg())
623 return;
624 auto SrcReg = SrcMO.getReg();
625 if (AMDGPU::isSGPR(SrcReg, &MRI))
626 return;
627 if (AMDGPU::isHi16Reg(SrcReg, MRI))
629}
630
631void AMDGPUMCCodeEmitter::getMachineOpValueT16Lo128(
632 const MCInst &MI, unsigned OpNo, APInt &Op,
633 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
634 const MCOperand &MO = MI.getOperand(OpNo);
635 if (MO.isReg()) {
636 uint16_t Encoding = MRI.getEncodingValue(MO.getReg());
637 unsigned RegIdx = Encoding & AMDGPU::HWEncoding::REG_IDX_MASK;
638 bool IsHi = Encoding & AMDGPU::HWEncoding::IS_HI16;
639 bool IsVGPR = Encoding & AMDGPU::HWEncoding::IS_VGPR;
640 assert((!IsVGPR || isUInt<7>(RegIdx)) && "VGPR0-VGPR127 expected!");
641 Op = (IsVGPR ? 0x100 : 0) | (IsHi ? 0x80 : 0) | RegIdx;
642 return;
643 }
644 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
645}
646
647void AMDGPUMCCodeEmitter::getMachineOpValueCommon(
648 const MCInst &MI, const MCOperand &MO, unsigned OpNo, APInt &Op,
649 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
650 int64_t Val;
651 if (MO.isExpr() && MO.getExpr()->evaluateAsAbsolute(Val)) {
652 Op = Val;
653 return;
654 }
655
656 if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) {
657 // FIXME: If this is expression is PCRel or not should not depend on what
658 // the expression looks like. Given that this is just a general expression,
659 // it should probably be FK_Data_4 and whatever is producing
660 //
661 // s_add_u32 s2, s2, (extern_const_addrspace+16
662 //
663 // And expecting a PCRel should instead produce
664 //
665 // .Ltmp1:
666 // s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1
668 if (needsPCRel(MO.getExpr()))
670 else
671 Kind = FK_Data_4;
672
673 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
674 uint32_t Offset = Desc.getSize();
675 assert(Offset == 4 || Offset == 8);
676
677 Fixups.push_back(MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc()));
678 }
679
680 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
681 if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
682 if (auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI)) {
683 Op = *Enc;
684 return;
685 }
686 } else if (MO.isImm()) {
687 Op = MO.getImm();
688 return;
689 }
690
691 llvm_unreachable("Encoding of this operand type is not supported yet.");
692}
693
694#include "AMDGPUGenMCCodeEmitter.inc"
unsigned const MachineRegisterInfo * MRI
static uint32_t getLit16IntEncoding(uint32_t Val, const MCSubtargetInfo &STI)
static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI)
static uint32_t getLitBF16Encoding(uint16_t Val)
static bool isVCMPX64(const MCInstrDesc &Desc)
static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI)
static uint32_t getIntInlineImmEncoding(IntTy Imm)
static bool needsPCRel(const MCExpr *Expr)
static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI)
Provides AMDGPU specific target descriptions.
This file implements a class to represent arbitrary precision integral constant values and operations...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
IRTranslator LLVM IR MI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
Definition: APInt.h:78
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
Definition: APInt.cpp:493
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition: APInt.h:475
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:200
This class represents an Operation in the Expression.
@ Sub
Subtraction.
Definition: MCExpr.h:518
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:21
virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl< char > &CB, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Encode the given Inst to bytes and append to CB.
Context object for machine code objects.
Definition: MCContext.h:83
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:414
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
@ Unary
Unary expressions.
Definition: MCExpr.h:40
@ Constant
Constant expressions.
Definition: MCExpr.h:38
@ SymbolRef
References to labels and assigned expressions.
Definition: MCExpr.h:39
@ Target
Target specific expression.
Definition: MCExpr.h:41
@ Binary
Binary expressions.
Definition: MCExpr.h:37
ExprKind getKind() const
Definition: MCExpr.h:78
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, SMLoc Loc=SMLoc())
Definition: MCFixup.h:87
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:185
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:37
int64_t getImm() const
Definition: MCInst.h:81
bool isImm() const
Definition: MCInst.h:63
bool isReg() const
Definition: MCInst.h:62
MCRegister getReg() const
Returns the register number.
Definition: MCInst.h:70
bool isDFPImm() const
Definition: MCInst.h:65
const MCExpr * getExpr() const
Definition: MCInst.h:115
bool isExpr() const
Definition: MCInst.h:66
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
void push_back(const T &Elt)
Definition: SmallVector.h:413
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
@ fixup_si_sopp_br
16-bit PC relative fixup for SOPP branch instructions.
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this an AMDGPU specific source operand? These include registers, inline constants,...
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
Definition: SIDefines.h:234
@ OPERAND_REG_IMM_INT64
Definition: SIDefines.h:201
@ OPERAND_REG_IMM_V2FP16
Definition: SIDefines.h:211
@ OPERAND_REG_INLINE_C_V2INT32
Definition: SIDefines.h:227
@ OPERAND_REG_INLINE_C_FP64
Definition: SIDefines.h:223
@ OPERAND_REG_INLINE_C_BF16
Definition: SIDefines.h:220
@ OPERAND_REG_INLINE_C_V2BF16
Definition: SIDefines.h:225
@ OPERAND_REG_IMM_V2INT16
Definition: SIDefines.h:212
@ OPERAND_REG_IMM_BF16
Definition: SIDefines.h:205
@ OPERAND_REG_INLINE_AC_V2FP16
Definition: SIDefines.h:246
@ OPERAND_REG_IMM_INT32
Operands with register or 32-bit immediate.
Definition: SIDefines.h:200
@ OPERAND_REG_IMM_V2BF16
Definition: SIDefines.h:210
@ OPERAND_REG_IMM_BF16_DEFERRED
Definition: SIDefines.h:207
@ OPERAND_REG_IMM_FP16
Definition: SIDefines.h:206
@ OPERAND_REG_INLINE_C_INT64
Definition: SIDefines.h:219
@ OPERAND_REG_INLINE_AC_BF16
Definition: SIDefines.h:240
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
Definition: SIDefines.h:217
@ OPERAND_REG_INLINE_AC_INT16
Operands with an AccVGPR register or inline constant.
Definition: SIDefines.h:238
@ OPERAND_REG_IMM_FP64
Definition: SIDefines.h:204
@ OPERAND_REG_INLINE_C_V2FP16
Definition: SIDefines.h:226
@ OPERAND_REG_INLINE_AC_V2INT16
Definition: SIDefines.h:244
@ OPERAND_REG_INLINE_AC_FP16
Definition: SIDefines.h:241
@ OPERAND_REG_INLINE_AC_INT32
Definition: SIDefines.h:239
@ OPERAND_REG_INLINE_AC_FP32
Definition: SIDefines.h:242
@ OPERAND_REG_INLINE_AC_V2BF16
Definition: SIDefines.h:245
@ OPERAND_REG_IMM_V2INT32
Definition: SIDefines.h:213
@ OPERAND_REG_IMM_FP32
Definition: SIDefines.h:203
@ OPERAND_REG_INLINE_C_FP32
Definition: SIDefines.h:222
@ OPERAND_REG_INLINE_C_INT32
Definition: SIDefines.h:218
@ OPERAND_REG_INLINE_C_V2INT16
Definition: SIDefines.h:224
@ OPERAND_REG_IMM_V2FP32
Definition: SIDefines.h:214
@ OPERAND_REG_INLINE_AC_FP64
Definition: SIDefines.h:243
@ OPERAND_REG_INLINE_C_FP16
Definition: SIDefines.h:221
@ OPERAND_REG_IMM_INT16
Definition: SIDefines.h:202
@ OPERAND_REG_INLINE_C_V2FP32
Definition: SIDefines.h:228
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
Definition: SIDefines.h:231
@ OPERAND_REG_IMM_FP32_DEFERRED
Definition: SIDefines.h:209
@ OPERAND_REG_IMM_FP16_DEFERRED
Definition: SIDefines.h:208
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
bool isVI(const MCSubtargetInfo &STI)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition: MathExtras.h:155
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:21
@ FK_PCRel_4
A four-byte pc relative fixup.
Definition: MCFixup.h:30
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:25
MCCodeEmitter * createAMDGPUMCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
Description of the encoding of one expression Op.