55 void getMachineOpValueT16Lo128(
const MCInst &
MI,
unsigned OpNo,
APInt &
Op,
82 uint64_t getImplicitOpSelHiEncoding(
int Opcode)
const;
89 std::optional<uint32_t> getLitEncoding(
const MCOperand &MO,
107template <
typename IntTy>
109 if (Imm >= 0 && Imm <= 64)
112 if (Imm >= -16 && Imm <= -1)
113 return 192 + std::abs(Imm);
148 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
161 case 0x3F00:
return 240;
162 case 0xBF00:
return 241;
163 case 0x3F80:
return 242;
164 case 0xBF80:
return 243;
165 case 0x4000:
return 244;
166 case 0xC000:
return 245;
167 case 0x4080:
return 246;
168 case 0xC080:
return 247;
169 case 0x3E22:
return 248;
180 if (Val == llvm::bit_cast<uint32_t>(0.5f))
183 if (Val == llvm::bit_cast<uint32_t>(-0.5f))
186 if (Val == llvm::bit_cast<uint32_t>(1.0f))
189 if (Val == llvm::bit_cast<uint32_t>(-1.0f))
192 if (Val == llvm::bit_cast<uint32_t>(2.0f))
195 if (Val == llvm::bit_cast<uint32_t>(-2.0f))
198 if (Val == llvm::bit_cast<uint32_t>(4.0f))
201 if (Val == llvm::bit_cast<uint32_t>(-4.0f))
204 if (Val == 0x3e22f983 &&
205 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
220 if (Val == llvm::bit_cast<uint64_t>(0.5))
223 if (Val == llvm::bit_cast<uint64_t>(-0.5))
226 if (Val == llvm::bit_cast<uint64_t>(1.0))
229 if (Val == llvm::bit_cast<uint64_t>(-1.0))
232 if (Val == llvm::bit_cast<uint64_t>(2.0))
235 if (Val == llvm::bit_cast<uint64_t>(-2.0))
238 if (Val == llvm::bit_cast<uint64_t>(4.0))
241 if (Val == llvm::bit_cast<uint64_t>(-4.0))
244 if (Val == 0x3fc45f306dc9c882 &&
245 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
251std::optional<uint32_t>
252AMDGPUMCCodeEmitter::getLitEncoding(
const MCOperand &MO,
257 const auto *
C = dyn_cast<MCConstantExpr>(MO.
getExpr());
341uint64_t AMDGPUMCCodeEmitter::getImplicitOpSelHiEncoding(
int Opcode)
const {
342 using namespace AMDGPU::VOP3PEncoding;
357 Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC);
360void AMDGPUMCCodeEmitter::encodeInstruction(
const MCInst &
MI,
364 int Opcode =
MI.getOpcode();
365 APInt Encoding, Scratch;
366 getBinaryCodeForInstr(
MI, Fixups, Encoding, Scratch, STI);
368 unsigned bytes =
Desc.getSize();
373 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
374 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) {
375 Encoding |= getImplicitOpSelHiEncoding(Opcode);
385 assert((Encoding & 0xFF) == 0);
386 Encoding |=
MRI.getEncodingValue(AMDGPU::EXEC_LO) &
390 for (
unsigned i = 0; i < bytes; i++) {
396 int vaddr0 = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
397 AMDGPU::OpName::vaddr0);
398 int srsrc = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
399 AMDGPU::OpName::srsrc);
400 assert(vaddr0 >= 0 && srsrc > vaddr0);
401 unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
402 unsigned NumPadding = (-NumExtraAddrs) & 3;
404 for (
unsigned i = 0; i < NumExtraAddrs; ++i) {
405 getMachineOpValue(
MI,
MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups,
412 if ((bytes > 8 && STI.
hasFeature(AMDGPU::FeatureVOP3Literal)) ||
413 (bytes > 4 && !STI.
hasFeature(AMDGPU::FeatureVOP3Literal)))
421 for (
unsigned i = 0, e =
Desc.getNumOperands(); i < e; ++i) {
429 auto Enc = getLitEncoding(
Op,
Desc.operands()[i], STI);
430 if (!Enc || *Enc != 255)
438 else if (
Op.isExpr()) {
439 if (
const auto *
C = dyn_cast<MCConstantExpr>(
Op.getExpr()))
454void AMDGPUMCCodeEmitter::getSOPPBrEncoding(
const MCInst &
MI,
unsigned OpNo,
466 getMachineOpValue(
MI, MO,
Op, Fixups, STI);
470void AMDGPUMCCodeEmitter::getSMEMOffsetEncoding(
473 auto Offset =
MI.getOperand(OpNo).getImm();
479void AMDGPUMCCodeEmitter::getSDWASrcEncoding(
const MCInst &
MI,
unsigned OpNo,
483 using namespace AMDGPU::SDWA;
491 RegEnc |=
MRI.getEncodingValue(Reg);
492 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
494 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
500 auto Enc = getLitEncoding(MO,
Desc.operands()[OpNo], STI);
501 if (Enc && *Enc != 255) {
502 Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK;
510void AMDGPUMCCodeEmitter::getSDWAVopcDstEncoding(
513 using namespace AMDGPU::SDWA;
520 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
521 RegEnc |=
MRI.getEncodingValue(Reg);
522 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
523 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
528void AMDGPUMCCodeEmitter::getAVOperandEncoding(
532 unsigned Enc =
MRI.getEncodingValue(Reg);
542 Op =
Idx | (IsVGPROrAGPR << 8) | (IsAGPR << 9);
548 auto *SE = cast<MCSymbolRefExpr>(Expr);
554 auto *BE = cast<MCBinaryExpr>(Expr);
560 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr());
568void AMDGPUMCCodeEmitter::getMachineOpValue(
const MCInst &
MI,
573 unsigned Enc =
MRI.getEncodingValue(MO.
getReg());
577 Op =
Idx | (IsVGPROrAGPR << 8);
580 unsigned OpNo = &MO -
MI.begin();
581 getMachineOpValueCommon(
MI, MO, OpNo,
Op, Fixups, STI);
584void AMDGPUMCCodeEmitter::getMachineOpValueT16(
589 unsigned Enc =
MRI.getEncodingValue(MO.
getReg());
592 Op =
Idx | (IsVGPR << 8);
595 getMachineOpValueCommon(
MI, MO, OpNo,
Op, Fixups, STI);
602 if ((
int)OpNo == AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
603 AMDGPU::OpName::src0_modifiers)) {
604 SrcMOIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::src0);
606 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::vdst);
607 if (VDstMOIdx != -1) {
608 auto DstReg =
MI.getOperand(VDstMOIdx).getReg();
612 }
else if ((
int)OpNo == AMDGPU::getNamedOperandIdx(
613 MI.getOpcode(), AMDGPU::OpName::src1_modifiers))
614 SrcMOIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::src1);
615 else if ((
int)OpNo == AMDGPU::getNamedOperandIdx(
616 MI.getOpcode(), AMDGPU::OpName::src2_modifiers))
617 SrcMOIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::src2);
624 auto SrcReg = SrcMO.
getReg();
631void AMDGPUMCCodeEmitter::getMachineOpValueT16Lo128(
640 assert((!IsVGPR || isUInt<7>(RegIdx)) &&
"VGPR0-VGPR127 expected!");
641 Op = (IsVGPR ? 0x100 : 0) | (IsHi ? 0x80 : 0) | RegIdx;
644 getMachineOpValueCommon(
MI, MO, OpNo,
Op, Fixups, STI);
647void AMDGPUMCCodeEmitter::getMachineOpValueCommon(
682 if (
auto Enc = getLitEncoding(MO,
Desc.operands()[OpNo], STI)) {
686 }
else if (MO.
isImm()) {
694#include "AMDGPUGenMCCodeEmitter.inc"
unsigned const MachineRegisterInfo * MRI
static uint32_t getLit16IntEncoding(uint32_t Val, const MCSubtargetInfo &STI)
static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI)
static uint32_t getLitBF16Encoding(uint16_t Val)
static bool isVCMPX64(const MCInstrDesc &Desc)
static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI)
static uint32_t getIntInlineImmEncoding(IntTy Imm)
static bool needsPCRel(const MCExpr *Expr)
static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI)
Provides AMDGPU specific target descriptions.
This file implements a class to represent arbitrary precision integral constant values and operations...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
This class represents an Operation in the Expression.
MCCodeEmitter - Generic instruction encoding interface.
virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl< char > &CB, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Encode the given Inst to bytes and append to CB.
Context object for machine code objects.
const MCRegisterInfo * getRegisterInfo() const
Base class for the full range of assembler expressions which are needed for parsing.
@ Unary
Unary expressions.
@ Constant
Constant expressions.
@ SymbolRef
References to labels and assigned expressions.
@ Target
Target specific expression.
@ Binary
Binary expressions.
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, SMLoc Loc=SMLoc())
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
Interface to description of machine instruction set.
This holds information about one operand of a machine instruction, indicating the register class for ...
uint8_t OperandType
Information about the type of the operand.
Instances of this class represent operands of the MCInst class.
MCRegister getReg() const
Returns the register number.
const MCExpr * getExpr() const
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
@ fixup_si_sopp_br
16-bit PC relative fixup for SOPP branch instructions.
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this an AMDGPU specific source operand? These include registers, inline constants,...
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
@ OPERAND_REG_INLINE_C_V2INT32
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_BF16
@ OPERAND_REG_INLINE_C_V2BF16
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_INLINE_AC_V2FP16
@ OPERAND_REG_IMM_INT32
Operands with register or 32-bit immediate.
@ OPERAND_REG_IMM_BF16_DEFERRED
@ OPERAND_REG_INLINE_C_INT64
@ OPERAND_REG_INLINE_AC_BF16
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
@ OPERAND_REG_INLINE_AC_INT16
Operands with an AccVGPR register or inline constant.
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_V2INT16
@ OPERAND_REG_INLINE_AC_FP16
@ OPERAND_REG_INLINE_AC_INT32
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_INLINE_AC_V2BF16
@ OPERAND_REG_IMM_V2INT32
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_C_INT32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
@ OPERAND_REG_INLINE_C_V2FP32
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
@ OPERAND_REG_IMM_FP32_DEFERRED
@ OPERAND_REG_IMM_FP16_DEFERRED
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
bool isVI(const MCSubtargetInfo &STI)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
@ C
The default llvm calling convention, compatible with C.
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
MCFixupKind
Extensible enumeration to represent the type of a fixup.
@ FK_PCRel_4
A four-byte pc relative fixup.
@ FK_Data_4
A four-byte fixup.
MCCodeEmitter * createAMDGPUMCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
Description of the encoding of one expression Op.