21 for (
auto Instr : Res) {
23 bool Compressed =
false;
24 switch (Instr.getOpcode()) {
36 Compressed = isInt<6>(Instr.getImm());
55 bool IsRV64 = STI.
hasFeature(RISCV::Feature64Bit);
59 (!isInt<32>(Val) || Val == 0x800)) {
64 if (!IsRV64 && STI.
hasFeature(RISCV::FeatureVendorXqcili)) {
65 bool FitsOneStandardInst = ((Val & 0xFFF) == 0) || isInt<12>(Val);
69 if (!FitsOneStandardInst && isInt<20>(Val)) {
76 if (!FitsOneStandardInst && isInt<32>(Val)) {
90 int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
91 int64_t Lo12 = SignExtend64<12>(Val);
96 if (Lo12 || Hi20 == 0) {
97 unsigned AddiOpc = RISCV::ADDI;
102 int64_t LuiRes = SignExtend64<32>(Hi20 << 12);
103 if (!isInt<32>(LuiRes + Lo12))
104 AddiOpc = RISCV::ADDIW;
111 assert(IsRV64 &&
"Can't emit >32-bit imm for non-RV64 target");
136 int64_t Lo12 = SignExtend64<12>(Val);
143 if (!isInt<32>(Val)) {
150 if (ShiftAmount > 12 && !isInt<12>(Val)) {
151 if (isInt<32>((
uint64_t)Val << 12)) {
156 }
else if (isUInt<32>((
uint64_t)Val << 12) &&
161 Val = ((
uint64_t)Val << 12) | (0xffffffffull << 32);
167 if (isUInt<32>(Val) && !isInt<32>(Val) &&
171 Val = ((
uint64_t)Val) | (0xffffffffull << 32);
180 unsigned Opc =
Unsigned ? RISCV::SLLI_UW : RISCV::SLLI;
192 if (TrailingOnes > 0 && TrailingOnes < 64 &&
193 (LeadingOnes + TrailingOnes) > (64 - 12))
194 return 64 - TrailingOnes;
199 if (UpperTrailingOnes < 32 &&
200 (UpperTrailingOnes + LowerLeadingOnes) > (64 - 12))
201 return 32 - UpperTrailingOnes;
208 assert(Val > 0 &&
"Expected positive val");
215 ShiftedVal |= maskTrailingOnes<uint64_t>(LeadingZeros);
221 if ((TmpSeq.
size() + 1) < Res.
size() ||
228 ShiftedVal &= maskTrailingZeros<uint64_t>(LeadingZeros);
233 if ((TmpSeq.
size() + 1) < Res.
size() ||
241 if (LeadingZeros == 32 && STI.
hasFeature(RISCV::FeatureStdExtZba)) {
243 uint64_t LeadingOnesVal = Val | maskLeadingOnes<uint64_t>(LeadingZeros);
248 if ((TmpSeq.
size() + 1) < Res.
size() ||
264 if ((Val & 0xfff) != 0 && (Val & 1) == 0 && Res.
size() >= 2) {
266 int64_t ShiftedVal = Val >> TrailingZeros;
271 bool IsShiftedCompressible =
272 isInt<6>(ShiftedVal) && !STI.
hasFeature(RISCV::TuneLUIADDIFusion);
277 if ((TmpSeq.
size() + 1) < Res.
size() || IsShiftedCompressible) {
289 "Expected RV32 to only need 2 instructions");
296 if ((Val & 0xfff) != 0 && (Val & 0x1800) == 0x1000) {
297 int64_t Imm12 = -(0x800 - (Val & 0xfff));
298 int64_t AdjustedVal = Val - Imm12;
303 if ((TmpSeq.
size() + 1) < Res.
size()) {
311 if (Val > 0 && Res.
size() > 2) {
317 if (Val < 0 && Res.
size() > 3) {
333 int64_t LoVal = SignExtend64<32>(Val);
334 int64_t HiVal = SignExtend64<32>(Val >> 32);
335 if (LoVal == HiVal) {
338 if ((TmpSeq.
size() + 1) < Res.
size()) {
366 if (Res[0].
getOpcode() == RISCV::ADDI && Res[0].getImm() == 1 &&
367 Res[1].getOpcode() == RISCV::SLLI) {
400 if ((Val % 3) == 0 && isInt<32>(Val / 3)) {
403 }
else if ((Val % 5) == 0 && isInt<32>(Val / 5)) {
406 }
else if ((Val % 9) == 0 && isInt<32>(Val / 9)) {
413 if ((TmpSeq.
size() + 1) < Res.
size()) {
419 int64_t Hi52 = ((
uint64_t)Val + 0x800ull) & ~0xfffull;
420 int64_t Lo12 = SignExtend64<12>(Val);
422 if (isInt<32>(Hi52 / 3) && (Hi52 % 3) == 0) {
425 }
else if (isInt<32>(Hi52 / 5) && (Hi52 % 5) == 0) {
428 }
else if (isInt<32>(Hi52 / 9) && (Hi52 % 9) == 0) {
437 "unexpected instruction sequence for immediate materialisation");
440 if ((TmpSeq.
size() + 2) < Res.
size()) {
452 STI.
hasFeature(RISCV::FeatureVendorXTHeadBb))) {
455 uint64_t NegImm12 = llvm::rotl<uint64_t>(Val, Rotate);
456 assert(isInt<12>(NegImm12));
506 unsigned &ShiftAmt,
unsigned &AddOpc) {
507 int64_t LoVal = SignExtend64<32>(Val);
521 assert(TzLo < 32 && TzHi >= 32);
522 ShiftAmt = TzHi - TzLo;
525 if (Tmp == ((
uint64_t)LoVal << ShiftAmt))
531 AddOpc = RISCV::ADD_UW;
539 bool CompressionCost,
bool FreeZeroes) {
540 bool IsRV64 = STI.
hasFeature(RISCV::Feature64Bit);
542 int PlatRegSize = IsRV64 ? 64 : 32;
547 for (
unsigned ShiftVal = 0; ShiftVal <
Size; ShiftVal += PlatRegSize) {
554 return std::max(FreeZeroes ? 0 : 1,
Cost);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
static void generateInstSeqLeadingZeros(int64_t Val, const MCSubtargetInfo &STI, RISCVMatInt::InstSeq &Res)
static void generateInstSeqImpl(int64_t Val, const MCSubtargetInfo &STI, RISCVMatInt::InstSeq &Res)
static unsigned extractRotateInfo(int64_t Val)
static int getInstSeqCost(RISCVMatInt::InstSeq &Res, bool HasRVC)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Class for arbitrary precision integers.
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
int64_t getSExtValue() const
Get sign extended value.
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Wrapper class representing physical registers. Should be passed by value.
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
unsigned getOpcode() const
OpndKind getOpndKind() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
iterator erase(const_iterator CI)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI, bool CompressionCost, bool FreeZeroes)
SmallVector< Inst, 8 > InstSeq
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
void generateMCInstSeq(int64_t Val, const MCSubtargetInfo &STI, MCRegister DestReg, SmallVectorImpl< MCInst > &Insts)
This is an optimization pass for GlobalISel generic memory operations.
int popcount(T Value) noexcept
Count the number of set bits in a value.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
int countl_one(T Value)
Count the number of ones from the most significant bit to the first zero bit.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.