56 void getMachineOpValueT16Lo128(
const MCInst &
MI,
unsigned OpNo,
APInt &
Op,
83 uint64_t getImplicitOpSelHiEncoding(
int Opcode)
const;
90 std::optional<uint64_t>
93 bool HasMandatoryLiteral =
false)
const;
114template <
typename IntTy>
116 if (Imm >= 0 && Imm <= 64)
119 if (Imm >= -16 && Imm <= -1)
120 return 192 + std::abs(Imm);
155 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
168 case 0x3F00:
return 240;
169 case 0xBF00:
return 241;
170 case 0x3F80:
return 242;
171 case 0xBF80:
return 243;
172 case 0x4000:
return 244;
173 case 0xC000:
return 245;
174 case 0x4080:
return 246;
175 case 0xC080:
return 247;
176 case 0x3E22:
return 248;
187 if (Val == llvm::bit_cast<uint32_t>(0.5f))
190 if (Val == llvm::bit_cast<uint32_t>(-0.5f))
193 if (Val == llvm::bit_cast<uint32_t>(1.0f))
196 if (Val == llvm::bit_cast<uint32_t>(-1.0f))
199 if (Val == llvm::bit_cast<uint32_t>(2.0f))
202 if (Val == llvm::bit_cast<uint32_t>(-2.0f))
205 if (Val == llvm::bit_cast<uint32_t>(4.0f))
208 if (Val == llvm::bit_cast<uint32_t>(-4.0f))
211 if (Val == 0x3e22f983 &&
212 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
228 if (Val == llvm::bit_cast<uint64_t>(0.5))
231 if (Val == llvm::bit_cast<uint64_t>(-0.5))
234 if (Val == llvm::bit_cast<uint64_t>(1.0))
237 if (Val == llvm::bit_cast<uint64_t>(-1.0))
240 if (Val == llvm::bit_cast<uint64_t>(2.0))
243 if (Val == llvm::bit_cast<uint64_t>(-2.0))
246 if (Val == llvm::bit_cast<uint64_t>(4.0))
249 if (Val == llvm::bit_cast<uint64_t>(-4.0))
252 if (Val == 0x3fc45f306dc9c882 &&
253 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
259 return STI.
hasFeature(AMDGPU::Feature64BitLiterals) &&
Lo_32(Val) ? 254
263 return STI.
hasFeature(AMDGPU::Feature64BitLiterals) &&
264 (!isInt<32>(Val) || !isUInt<32>(Val))
269std::optional<uint64_t> AMDGPUMCCodeEmitter::getLitEncoding(
274 if (!MO.
getExpr()->evaluateAsAbsolute(Imm))
307 return (HasMandatoryLiteral && Enc == 255) ? 254 : Enc;
353uint64_t AMDGPUMCCodeEmitter::getImplicitOpSelHiEncoding(
int Opcode)
const {
354 using namespace AMDGPU::VOP3PEncoding;
369 Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC);
372void AMDGPUMCCodeEmitter::encodeInstruction(
const MCInst &
MI,
376 int Opcode =
MI.getOpcode();
377 APInt Encoding, Scratch;
378 getBinaryCodeForInstr(
MI, Fixups, Encoding, Scratch, STI);
380 unsigned bytes =
Desc.getSize();
385 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
386 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) &&
393 Encoding |= getImplicitOpSelHiEncoding(Opcode);
403 assert((Encoding & 0xFF) == 0);
404 Encoding |=
MRI.getEncodingValue(AMDGPU::EXEC_LO) &
408 for (
unsigned i = 0; i < bytes; i++) {
414 int vaddr0 = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
415 AMDGPU::OpName::vaddr0);
416 int srsrc = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
417 AMDGPU::OpName::srsrc);
418 assert(vaddr0 >= 0 && srsrc > vaddr0);
419 unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
420 unsigned NumPadding = (-NumExtraAddrs) & 3;
422 for (
unsigned i = 0; i < NumExtraAddrs; ++i) {
423 getMachineOpValue(
MI,
MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups,
430 if ((bytes > 8 && STI.
hasFeature(AMDGPU::FeatureVOP3Literal)) ||
431 (bytes > 4 && !STI.
hasFeature(AMDGPU::FeatureVOP3Literal)))
439 for (
unsigned i = 0, e =
Desc.getNumOperands(); i < e; ++i) {
447 auto Enc = getLitEncoding(
Op,
Desc.operands()[i], STI);
448 if (!Enc || (*Enc != 255 && *Enc != 254))
456 else if (
Op.isExpr()) {
457 if (
const auto *
C = dyn_cast<MCConstantExpr>(
Op.getExpr()))
476void AMDGPUMCCodeEmitter::getSOPPBrEncoding(
const MCInst &
MI,
unsigned OpNo,
487 getMachineOpValue(
MI, MO,
Op, Fixups, STI);
491void AMDGPUMCCodeEmitter::getSMEMOffsetEncoding(
494 auto Offset =
MI.getOperand(OpNo).getImm();
500void AMDGPUMCCodeEmitter::getSDWASrcEncoding(
const MCInst &
MI,
unsigned OpNo,
504 using namespace AMDGPU::SDWA;
512 RegEnc |=
MRI.getEncodingValue(Reg);
513 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
515 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
521 auto Enc = getLitEncoding(MO,
Desc.operands()[OpNo], STI);
522 if (Enc && *Enc != 255) {
523 Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK;
531void AMDGPUMCCodeEmitter::getSDWAVopcDstEncoding(
534 using namespace AMDGPU::SDWA;
541 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
542 RegEnc |=
MRI.getEncodingValue(Reg);
543 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
544 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
549void AMDGPUMCCodeEmitter::getAVOperandEncoding(
553 unsigned Enc =
MRI.getEncodingValue(Reg);
563 Op =
Idx | (IsVGPROrAGPR << 8) | (IsAGPR << 9);
569 auto *SE = cast<MCSymbolRefExpr>(Expr);
575 auto *BE = cast<MCBinaryExpr>(Expr);
581 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr());
590void AMDGPUMCCodeEmitter::getMachineOpValue(
const MCInst &
MI,
595 unsigned Enc =
MRI.getEncodingValue(MO.
getReg());
599 Op =
Idx | (IsVGPROrAGPR << 8);
602 unsigned OpNo = &MO -
MI.begin();
603 getMachineOpValueCommon(
MI, MO, OpNo,
Op, Fixups, STI);
606void AMDGPUMCCodeEmitter::getMachineOpValueT16(
611 unsigned Enc =
MRI.getEncodingValue(MO.
getReg());
614 Op =
Idx | (IsVGPR << 8);
617 getMachineOpValueCommon(
MI, MO, OpNo,
Op, Fixups, STI);
624 if ((
int)OpNo == AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
625 AMDGPU::OpName::src0_modifiers)) {
626 SrcMOIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::src0);
628 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::vdst);
629 if (VDstMOIdx != -1) {
630 auto DstReg =
MI.getOperand(VDstMOIdx).getReg();
634 }
else if ((
int)OpNo == AMDGPU::getNamedOperandIdx(
635 MI.getOpcode(), AMDGPU::OpName::src1_modifiers))
636 SrcMOIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::src1);
637 else if ((
int)OpNo == AMDGPU::getNamedOperandIdx(
638 MI.getOpcode(), AMDGPU::OpName::src2_modifiers))
639 SrcMOIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::src2);
646 auto SrcReg = SrcMO.
getReg();
653void AMDGPUMCCodeEmitter::getMachineOpValueT16Lo128(
662 assert((!IsVGPR || isUInt<7>(RegIdx)) &&
"VGPR0-VGPR127 expected!");
663 Op = (IsVGPR ? 0x100 : 0) | (IsHi ? 0x80 : 0) | RegIdx;
666 getMachineOpValueCommon(
MI, MO, OpNo,
Op, Fixups, STI);
669void AMDGPUMCCodeEmitter::getMachineOpValueCommon(
672 bool isLikeImm =
false;
678 }
else if (MO.
isExpr() && MO.
getExpr()->evaluateAsAbsolute(Val)) {
702 bool HasMandatoryLiteral =
704 if (
auto Enc = getLitEncoding(MO,
Desc.operands()[OpNo], STI,
705 HasMandatoryLiteral)) {
721#include "AMDGPUGenMCCodeEmitter.inc"
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static uint32_t getLit16IntEncoding(uint32_t Val, const MCSubtargetInfo &STI)
static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI, bool IsFP)
static void addFixup(SmallVectorImpl< MCFixup > &Fixups, uint32_t Offset, const MCExpr *Value, uint16_t Kind, bool PCRel=false)
static uint32_t getLitBF16Encoding(uint16_t Val)
static bool isVCMPX64(const MCInstrDesc &Desc)
static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI)
static uint32_t getIntInlineImmEncoding(IntTy Imm)
static bool needsPCRel(const MCExpr *Expr)
static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI)
Provides AMDGPU specific target descriptions.
This file implements a class to represent arbitrary precision integral constant values and operations...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Class for arbitrary precision integers.
LLVM_ABI uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
This class represents an Operation in the Expression.
MCCodeEmitter - Generic instruction encoding interface.
virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl< char > &CB, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Encode the given Inst to bytes and append to CB.
Context object for machine code objects.
const MCRegisterInfo * getRegisterInfo() const
Base class for the full range of assembler expressions which are needed for parsing.
@ Unary
Unary expressions.
@ Constant
Constant expressions.
@ SymbolRef
References to labels and assigned expressions.
@ Target
Target specific expression.
@ Specifier
Expression with a relocation specifier.
@ Binary
Binary expressions.
static MCFixupKind getDataKindForSize(unsigned Size)
Return the generic fixup kind for a value with the given size.
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, bool PCRel=false)
Consider bit fields if we need more flags.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
Interface to description of machine instruction set.
This holds information about one operand of a machine instruction, indicating the register class for ...
uint8_t OperandType
Information about the type of the operand.
Instances of this class represent operands of the MCInst class.
MCRegister getReg() const
Returns the register number.
const MCExpr * getExpr() const
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
LLVM Value Representation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
static AMDGPUMCExpr::Specifier getSpecifier(const MCSymbolRefExpr *SRE)
@ fixup_si_sopp_br
16-bit PC relative fixup for SOPP branch instructions.
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this an AMDGPU specific source operand? These include registers, inline constants,...
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_BF16
@ OPERAND_REG_INLINE_C_V2BF16
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
@ OPERAND_REG_INLINE_C_INT64
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
@ OPERAND_REG_IMM_NOINLINE_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_IMM_V2INT32
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_C_INT32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
bool isVI(const MCSubtargetInfo &STI)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
LLVM_READNONE unsigned getOperandSize(const MCOperandInfo &OpInfo)
@ C
The default llvm calling convention, compatible with C.
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
MCCodeEmitter * createAMDGPUMCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
Description of the encoding of one expression Op.