LLVM 22.0.0git
RISCVInstructionSelector.cpp
Go to the documentation of this file.
1//===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// RISC-V.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
16#include "RISCVSubtarget.h"
17#include "RISCVTargetMachine.h"
25#include "llvm/IR/IntrinsicsRISCV.h"
26#include "llvm/Support/Debug.h"
27
28#define DEBUG_TYPE "riscv-isel"
29
30using namespace llvm;
31using namespace MIPatternMatch;
32
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
36
37namespace {
38
39class RISCVInstructionSelector : public InstructionSelector {
40public:
41 RISCVInstructionSelector(const RISCVTargetMachine &TM,
42 const RISCVSubtarget &STI,
43 const RISCVRegisterBankInfo &RBI);
44
45 bool select(MachineInstr &MI) override;
46
48 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49 BlockFrequencyInfo *BFI) override {
50 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
51 MRI = &MF.getRegInfo();
52 }
53
54 static const char *getName() { return DEBUG_TYPE; }
55
56private:
58 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
59
60 static constexpr unsigned MaxRecursionDepth = 6;
61
62 bool hasAllNBitUsers(const MachineInstr &MI, unsigned Bits,
63 const unsigned Depth = 0) const;
64 bool hasAllHUsers(const MachineInstr &MI) const {
65 return hasAllNBitUsers(MI, 16);
66 }
67 bool hasAllWUsers(const MachineInstr &MI) const {
68 return hasAllNBitUsers(MI, 32);
69 }
70
71 bool isRegInGprb(Register Reg) const;
72 bool isRegInFprb(Register Reg) const;
73
74 // tblgen-erated 'select' implementation, used as the initial selector for
75 // the patterns that don't require complex C++.
76 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
77
78 // A lowering phase that runs before any selection attempts.
79 // Returns true if the instruction was modified.
80 void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
81
82 bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
83
84 // Custom selection methods
85 bool selectCopy(MachineInstr &MI) const;
86 bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB) const;
87 bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
88 bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, bool IsLocal = true,
89 bool IsExternWeak = false) const;
90 bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB) const;
91 bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB) const;
92 void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
93 MachineIRBuilder &MIB) const;
95
96 ComplexRendererFns selectShiftMask(MachineOperand &Root,
97 unsigned ShiftWidth) const;
98 ComplexRendererFns selectShiftMaskXLen(MachineOperand &Root) const {
99 return selectShiftMask(Root, STI.getXLen());
100 }
101 ComplexRendererFns selectShiftMask32(MachineOperand &Root) const {
102 return selectShiftMask(Root, 32);
103 }
104 ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
105
106 ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;
107 template <unsigned Bits>
108 ComplexRendererFns selectSExtBits(MachineOperand &Root) const {
109 return selectSExtBits(Root, Bits);
110 }
111
112 ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
113 template <unsigned Bits>
114 ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
115 return selectZExtBits(Root, Bits);
116 }
117
118 ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
119 template <unsigned ShAmt>
120 ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
121 return selectSHXADDOp(Root, ShAmt);
122 }
123
124 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
125 unsigned ShAmt) const;
126 template <unsigned ShAmt>
127 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
128 return selectSHXADD_UWOp(Root, ShAmt);
129 }
130
131 ComplexRendererFns renderVLOp(MachineOperand &Root) const;
132
133 // Custom renderers for tablegen
134 void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
135 int OpIdx) const;
136 void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
137 int OpIdx) const;
138 void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
139 int OpIdx) const;
140 void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
141 int OpIdx) const;
142 void renderFrameIndex(MachineInstrBuilder &MIB, const MachineInstr &MI,
143 int OpIdx) const;
144
145 void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
146 int OpIdx) const;
147 void renderXLenSubTrailingOnes(MachineInstrBuilder &MIB,
148 const MachineInstr &MI, int OpIdx) const;
149
150 void renderAddiPairImmLarge(MachineInstrBuilder &MIB, const MachineInstr &MI,
151 int OpIdx) const;
152 void renderAddiPairImmSmall(MachineInstrBuilder &MIB, const MachineInstr &MI,
153 int OpIdx) const;
154
155 const RISCVSubtarget &STI;
156 const RISCVInstrInfo &TII;
157 const RISCVRegisterInfo &TRI;
158 const RISCVRegisterBankInfo &RBI;
159 const RISCVTargetMachine &TM;
160
161 MachineRegisterInfo *MRI = nullptr;
162
163 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
164 // uses "STI." in the code generated by TableGen. We need to unify the name of
165 // Subtarget variable.
166 const RISCVSubtarget *Subtarget = &STI;
167
168#define GET_GLOBALISEL_PREDICATES_DECL
169#include "RISCVGenGlobalISel.inc"
170#undef GET_GLOBALISEL_PREDICATES_DECL
171
172#define GET_GLOBALISEL_TEMPORARIES_DECL
173#include "RISCVGenGlobalISel.inc"
174#undef GET_GLOBALISEL_TEMPORARIES_DECL
175};
176
177} // end anonymous namespace
178
179#define GET_GLOBALISEL_IMPL
180#include "RISCVGenGlobalISel.inc"
181#undef GET_GLOBALISEL_IMPL
182
183RISCVInstructionSelector::RISCVInstructionSelector(
184 const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
185 const RISCVRegisterBankInfo &RBI)
186 : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
187 TM(TM),
188
190#include "RISCVGenGlobalISel.inc"
193#include "RISCVGenGlobalISel.inc"
195{
196}
197
198// Mimics optimizations in ISel and RISCVOptWInst Pass
199bool RISCVInstructionSelector::hasAllNBitUsers(const MachineInstr &MI,
200 unsigned Bits,
201 const unsigned Depth) const {
202
203 assert((MI.getOpcode() == TargetOpcode::G_ADD ||
204 MI.getOpcode() == TargetOpcode::G_SUB ||
205 MI.getOpcode() == TargetOpcode::G_MUL ||
206 MI.getOpcode() == TargetOpcode::G_SHL ||
207 MI.getOpcode() == TargetOpcode::G_LSHR ||
208 MI.getOpcode() == TargetOpcode::G_AND ||
209 MI.getOpcode() == TargetOpcode::G_OR ||
210 MI.getOpcode() == TargetOpcode::G_XOR ||
211 MI.getOpcode() == TargetOpcode::G_SEXT_INREG || Depth != 0) &&
212 "Unexpected opcode");
213
214 if (Depth >= RISCVInstructionSelector::MaxRecursionDepth)
215 return false;
216
217 auto DestReg = MI.getOperand(0).getReg();
218 for (auto &UserOp : MRI->use_nodbg_operands(DestReg)) {
219 assert(UserOp.getParent() && "UserOp must have a parent");
220 const MachineInstr &UserMI = *UserOp.getParent();
221 unsigned OpIdx = UserOp.getOperandNo();
222
223 switch (UserMI.getOpcode()) {
224 default:
225 return false;
226 case RISCV::ADDW:
227 case RISCV::ADDIW:
228 case RISCV::SUBW:
229 if (Bits >= 32)
230 break;
231 return false;
232 case RISCV::SLL:
233 case RISCV::SRA:
234 case RISCV::SRL:
235 // Shift amount operands only use log2(Xlen) bits.
236 if (OpIdx == 2 && Bits >= Log2_32(Subtarget->getXLen()))
237 break;
238 return false;
239 case RISCV::SLLI:
240 // SLLI only uses the lower (XLen - ShAmt) bits.
241 if (Bits >= Subtarget->getXLen() - UserMI.getOperand(2).getImm())
242 break;
243 return false;
244 case RISCV::ANDI:
245 if (Bits >= (unsigned)llvm::bit_width<uint64_t>(
246 (uint64_t)UserMI.getOperand(2).getImm()))
247 break;
248 goto RecCheck;
249 case RISCV::AND:
250 case RISCV::OR:
251 case RISCV::XOR:
252 RecCheck:
253 if (hasAllNBitUsers(UserMI, Bits, Depth + 1))
254 break;
255 return false;
256 case RISCV::SRLI: {
257 unsigned ShAmt = UserMI.getOperand(2).getImm();
258 // If we are shifting right by less than Bits, and users don't demand any
259 // bits that were shifted into [Bits-1:0], then we can consider this as an
260 // N-Bit user.
261 if (Bits > ShAmt && hasAllNBitUsers(UserMI, Bits - ShAmt, Depth + 1))
262 break;
263 return false;
264 }
265 }
266 }
267
268 return true;
269}
270
272RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
273 unsigned ShiftWidth) const {
274 if (!Root.isReg())
275 return std::nullopt;
276
277 using namespace llvm::MIPatternMatch;
278
279 Register ShAmtReg = Root.getReg();
280 // Peek through zext.
281 Register ZExtSrcReg;
282 if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg))))
283 ShAmtReg = ZExtSrcReg;
284
285 APInt AndMask;
286 Register AndSrcReg;
287 // Try to combine the following pattern (applicable to other shift
288 // instructions as well as 32-bit ones):
289 //
290 // %4:gprb(s64) = G_AND %3, %2
291 // %5:gprb(s64) = G_LSHR %1, %4(s64)
292 //
293 // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
294 // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
295 // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
296 // then it can be eliminated. Given register rs1 or rs2 holding a constant
297 // (the and mask), there are two cases G_AND can be erased:
298 //
299 // 1. the lowest log2(XLEN) bits of the and mask are all set
300 // 2. the bits of the register being masked are already unset (zero set)
301 if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
302 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
303 if (ShMask.isSubsetOf(AndMask)) {
304 ShAmtReg = AndSrcReg;
305 } else {
306 // SimplifyDemandedBits may have optimized the mask so try restoring any
307 // bits that are known zero.
308 KnownBits Known = VT->getKnownBits(AndSrcReg);
309 if (ShMask.isSubsetOf(AndMask | Known.Zero))
310 ShAmtReg = AndSrcReg;
311 }
312 }
313
314 APInt Imm;
316 if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
317 if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
318 // If we are shifting by X+N where N == 0 mod Size, then just shift by X
319 // to avoid the ADD.
320 ShAmtReg = Reg;
321 } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
322 if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
323 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
324 // to generate a NEG instead of a SUB of a constant.
325 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
326 unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
327 return {{[=](MachineInstrBuilder &MIB) {
328 MachineIRBuilder(*MIB.getInstr())
329 .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
330 MIB.addReg(ShAmtReg);
331 }}};
332 }
333 if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
334 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
335 // to generate a NOT instead of a SUB of a constant.
336 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
337 return {{[=](MachineInstrBuilder &MIB) {
338 MachineIRBuilder(*MIB.getInstr())
339 .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
340 .addImm(-1);
341 MIB.addReg(ShAmtReg);
342 }}};
343 }
344 }
345
346 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
347}
348
350RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
351 unsigned Bits) const {
352 if (!Root.isReg())
353 return std::nullopt;
354 Register RootReg = Root.getReg();
355 MachineInstr *RootDef = MRI->getVRegDef(RootReg);
356
357 if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&
358 RootDef->getOperand(2).getImm() == Bits) {
359 return {
360 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }}};
361 }
362
363 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
364 if ((Size - VT->computeNumSignBits(RootReg)) < Bits)
365 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
366
367 return std::nullopt;
368}
369
371RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
372 unsigned Bits) const {
373 if (!Root.isReg())
374 return std::nullopt;
375 Register RootReg = Root.getReg();
376
377 Register RegX;
378 uint64_t Mask = maskTrailingOnes<uint64_t>(Bits);
379 if (mi_match(RootReg, *MRI, m_GAnd(m_Reg(RegX), m_SpecificICst(Mask)))) {
380 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
381 }
382
383 if (mi_match(RootReg, *MRI, m_GZExt(m_Reg(RegX))) &&
384 MRI->getType(RegX).getScalarSizeInBits() == Bits)
385 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
386
387 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
388 if (VT->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
389 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
390
391 return std::nullopt;
392}
393
395RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
396 unsigned ShAmt) const {
397 using namespace llvm::MIPatternMatch;
398
399 if (!Root.isReg())
400 return std::nullopt;
401 Register RootReg = Root.getReg();
402
403 const unsigned XLen = STI.getXLen();
404 APInt Mask, C2;
405 Register RegY;
406 std::optional<bool> LeftShift;
407 // (and (shl y, c2), mask)
408 if (mi_match(RootReg, *MRI,
409 m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
410 LeftShift = true;
411 // (and (lshr y, c2), mask)
412 else if (mi_match(RootReg, *MRI,
413 m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
414 LeftShift = false;
415
416 if (LeftShift.has_value()) {
417 if (*LeftShift)
418 Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
419 else
420 Mask &= maskTrailingOnes<uint64_t>(XLen - C2.getLimitedValue());
421
422 if (Mask.isShiftedMask()) {
423 unsigned Leading = XLen - Mask.getActiveBits();
424 unsigned Trailing = Mask.countr_zero();
425 // Given (and (shl y, c2), mask) in which mask has no leading zeros and
426 // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
427 if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
428 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
429 return {{[=](MachineInstrBuilder &MIB) {
430 MachineIRBuilder(*MIB.getInstr())
431 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
432 .addImm(Trailing - C2.getLimitedValue());
433 MIB.addReg(DstReg);
434 }}};
435 }
436
437 // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
438 // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
439 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
440 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
441 return {{[=](MachineInstrBuilder &MIB) {
442 MachineIRBuilder(*MIB.getInstr())
443 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
444 .addImm(Leading + Trailing);
445 MIB.addReg(DstReg);
446 }}};
447 }
448 }
449 }
450
451 LeftShift.reset();
452
453 // (shl (and y, mask), c2)
454 if (mi_match(RootReg, *MRI,
455 m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
456 m_ICst(C2))))
457 LeftShift = true;
458 // (lshr (and y, mask), c2)
459 else if (mi_match(RootReg, *MRI,
461 m_ICst(C2))))
462 LeftShift = false;
463
464 if (LeftShift.has_value() && Mask.isShiftedMask()) {
465 unsigned Leading = XLen - Mask.getActiveBits();
466 unsigned Trailing = Mask.countr_zero();
467
468 // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
469 // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
470 bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
471 (Trailing + C2.getLimitedValue()) == ShAmt;
472 if (!Cond)
473 // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
474 // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
475 Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
476 (Trailing - C2.getLimitedValue()) == ShAmt;
477
478 if (Cond) {
479 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
480 return {{[=](MachineInstrBuilder &MIB) {
481 MachineIRBuilder(*MIB.getInstr())
482 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
483 .addImm(Trailing);
484 MIB.addReg(DstReg);
485 }}};
486 }
487 }
488
489 return std::nullopt;
490}
491
493RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
494 unsigned ShAmt) const {
495 using namespace llvm::MIPatternMatch;
496
497 if (!Root.isReg())
498 return std::nullopt;
499 Register RootReg = Root.getReg();
500
501 // Given (and (shl x, c2), mask) in which mask is a shifted mask with
502 // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
503 // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
504 APInt Mask, C2;
505 Register RegX;
506 if (mi_match(
507 RootReg, *MRI,
509 m_ICst(Mask))))) {
510 Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
511
512 if (Mask.isShiftedMask()) {
513 unsigned Leading = Mask.countl_zero();
514 unsigned Trailing = Mask.countr_zero();
515 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
516 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
517 return {{[=](MachineInstrBuilder &MIB) {
518 MachineIRBuilder(*MIB.getInstr())
519 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
520 .addImm(C2.getLimitedValue() - ShAmt);
521 MIB.addReg(DstReg);
522 }}};
523 }
524 }
525 }
526
527 return std::nullopt;
528}
529
531RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
532 assert(Root.isReg() && "Expected operand to be a Register");
533 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
534
535 if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
536 auto C = RootDef->getOperand(1).getCImm();
537 if (C->getValue().isAllOnes())
538 // If the operand is a G_CONSTANT with value of all ones it is larger than
539 // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
540 // recognized specially by the vsetvli insertion pass.
541 return {{[=](MachineInstrBuilder &MIB) {
542 MIB.addImm(RISCV::VLMaxSentinel);
543 }}};
544
545 if (isUInt<5>(C->getZExtValue())) {
546 uint64_t ZExtC = C->getZExtValue();
547 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
548 }
549 }
550 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};
551}
552
554RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
555 if (!Root.isReg())
556 return std::nullopt;
557
558 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
559 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
560 return {{
561 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
562 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
563 }};
564 }
565
566 if (isBaseWithConstantOffset(Root, *MRI)) {
567 MachineOperand &LHS = RootDef->getOperand(1);
568 MachineOperand &RHS = RootDef->getOperand(2);
569 MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
570 MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
571
572 int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
573 if (isInt<12>(RHSC)) {
574 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
575 return {{
576 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
577 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
578 }};
579
580 return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
581 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
582 }
583 }
584
585 // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
586 // the combiner?
587 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
588 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
589}
590
591/// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
592/// CC Must be an ICMP Predicate.
593static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
594 switch (CC) {
595 default:
596 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
597 case CmpInst::Predicate::ICMP_EQ:
598 return RISCVCC::COND_EQ;
599 case CmpInst::Predicate::ICMP_NE:
600 return RISCVCC::COND_NE;
601 case CmpInst::Predicate::ICMP_ULT:
602 return RISCVCC::COND_LTU;
603 case CmpInst::Predicate::ICMP_SLT:
604 return RISCVCC::COND_LT;
605 case CmpInst::Predicate::ICMP_UGE:
606 return RISCVCC::COND_GEU;
607 case CmpInst::Predicate::ICMP_SGE:
608 return RISCVCC::COND_GE;
609 }
610}
611
613 Register &LHS, Register &RHS,
615 // Try to fold an ICmp. If that fails, use a NE compare with X0.
617 if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
618 LHS = CondReg;
619 RHS = RISCV::X0;
620 CC = RISCVCC::COND_NE;
621 return;
622 }
623
624 // We found an ICmp, do some canonicalization.
625
626 // Adjust comparisons to use comparison with 0 if possible.
628 switch (Pred) {
629 case CmpInst::Predicate::ICMP_SGT:
630 // Convert X > -1 to X >= 0
631 if (*Constant == -1) {
632 CC = RISCVCC::COND_GE;
633 RHS = RISCV::X0;
634 return;
635 }
636 break;
637 case CmpInst::Predicate::ICMP_SLT:
638 // Convert X < 1 to 0 >= X
639 if (*Constant == 1) {
640 CC = RISCVCC::COND_GE;
641 RHS = LHS;
642 LHS = RISCV::X0;
643 return;
644 }
645 break;
646 default:
647 break;
648 }
649 }
650
651 switch (Pred) {
652 default:
653 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
654 case CmpInst::Predicate::ICMP_EQ:
655 case CmpInst::Predicate::ICMP_NE:
656 case CmpInst::Predicate::ICMP_ULT:
657 case CmpInst::Predicate::ICMP_SLT:
658 case CmpInst::Predicate::ICMP_UGE:
659 case CmpInst::Predicate::ICMP_SGE:
660 // These CCs are supported directly by RISC-V branches.
661 break;
662 case CmpInst::Predicate::ICMP_SGT:
663 case CmpInst::Predicate::ICMP_SLE:
664 case CmpInst::Predicate::ICMP_UGT:
665 case CmpInst::Predicate::ICMP_ULE:
666 // These CCs are not supported directly by RISC-V branches, but changing the
667 // direction of the CC and swapping LHS and RHS are.
668 Pred = CmpInst::getSwappedPredicate(Pred);
669 std::swap(LHS, RHS);
670 break;
671 }
672
673 CC = getRISCVCCFromICmp(Pred);
674}
675
676bool RISCVInstructionSelector::select(MachineInstr &MI) {
677 MachineIRBuilder MIB(MI);
678
679 preISelLower(MI, MIB);
680 const unsigned Opc = MI.getOpcode();
681
682 if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
683 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
684 const Register DefReg = MI.getOperand(0).getReg();
685 const LLT DefTy = MRI->getType(DefReg);
686
687 const RegClassOrRegBank &RegClassOrBank =
688 MRI->getRegClassOrRegBank(DefReg);
689
690 const TargetRegisterClass *DefRC =
691 dyn_cast<const TargetRegisterClass *>(RegClassOrBank);
692 if (!DefRC) {
693 if (!DefTy.isValid()) {
694 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
695 return false;
696 }
697
698 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
699 DefRC = getRegClassForTypeOnBank(DefTy, RB);
700 if (!DefRC) {
701 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
702 return false;
703 }
704 }
705
706 MI.setDesc(TII.get(TargetOpcode::PHI));
707 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
708 }
709
710 // Certain non-generic instructions also need some special handling.
711 if (MI.isCopy())
712 return selectCopy(MI);
713
714 return true;
715 }
716
717 if (selectImpl(MI, *CoverageInfo))
718 return true;
719
720 switch (Opc) {
721 case TargetOpcode::G_ANYEXT:
722 case TargetOpcode::G_PTRTOINT:
723 case TargetOpcode::G_INTTOPTR:
724 case TargetOpcode::G_TRUNC:
725 case TargetOpcode::G_FREEZE:
726 return selectCopy(MI);
727 case TargetOpcode::G_CONSTANT: {
728 Register DstReg = MI.getOperand(0).getReg();
729 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
730
731 if (!materializeImm(DstReg, Imm, MIB))
732 return false;
733
734 MI.eraseFromParent();
735 return true;
736 }
737 case TargetOpcode::G_FCONSTANT: {
738 // TODO: Use constant pool for complex constants.
739 Register DstReg = MI.getOperand(0).getReg();
740 const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
741 APInt Imm = FPimm.bitcastToAPInt();
742 unsigned Size = MRI->getType(DstReg).getSizeInBits();
743 if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
744 Register GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
745 if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))
746 return false;
747
748 unsigned Opcode = Size == 64 ? RISCV::FMV_D_X
749 : Size == 32 ? RISCV::FMV_W_X
750 : RISCV::FMV_H_X;
751 auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
752 if (!FMV.constrainAllUses(TII, TRI, RBI))
753 return false;
754 } else {
755 // s64 on rv32
756 assert(Size == 64 && !Subtarget->is64Bit() &&
757 "Unexpected size or subtarget");
758
759 if (Imm.isNonNegative() && Imm.isZero()) {
760 // Optimize +0.0 to use fcvt.d.w
762 MIB.buildInstr(RISCV::FCVT_D_W, {DstReg}, {Register(RISCV::X0)})
763 .addImm(RISCVFPRndMode::RNE);
764 if (!FCVT.constrainAllUses(TII, TRI, RBI))
765 return false;
766
767 MI.eraseFromParent();
768 return true;
769 }
770
771 // Split into two pieces and build through the stack.
772 Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
773 Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
774 if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
775 MIB))
776 return false;
777 if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB))
778 return false;
779 MachineInstrBuilder PairF64 = MIB.buildInstr(
780 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
781 if (!PairF64.constrainAllUses(TII, TRI, RBI))
782 return false;
783 }
784
785 MI.eraseFromParent();
786 return true;
787 }
788 case TargetOpcode::G_GLOBAL_VALUE: {
789 auto *GV = MI.getOperand(1).getGlobal();
790 if (GV->isThreadLocal()) {
791 // TODO: implement this case.
792 return false;
793 }
794
795 return selectAddr(MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
796 }
797 case TargetOpcode::G_JUMP_TABLE:
798 case TargetOpcode::G_CONSTANT_POOL:
799 return selectAddr(MI, MIB, MRI);
800 case TargetOpcode::G_BRCOND: {
803 getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
804
805 auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS})
806 .addMBB(MI.getOperand(1).getMBB());
807 MI.eraseFromParent();
808 return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
809 }
810 case TargetOpcode::G_BRINDIRECT:
811 MI.setDesc(TII.get(RISCV::PseudoBRIND));
812 MI.addOperand(MachineOperand::CreateImm(0));
814 case TargetOpcode::G_SELECT:
815 return selectSelect(MI, MIB);
816 case TargetOpcode::G_FCMP:
817 return selectFPCompare(MI, MIB);
818 case TargetOpcode::G_FENCE: {
819 AtomicOrdering FenceOrdering =
820 static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
821 SyncScope::ID FenceSSID =
822 static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
823 emitFence(FenceOrdering, FenceSSID, MIB);
824 MI.eraseFromParent();
825 return true;
826 }
827 case TargetOpcode::G_IMPLICIT_DEF:
828 return selectImplicitDef(MI, MIB);
829 case TargetOpcode::G_UNMERGE_VALUES:
830 return selectUnmergeValues(MI, MIB);
831 default:
832 return false;
833 }
834}
835
836bool RISCVInstructionSelector::selectUnmergeValues(
837 MachineInstr &MI, MachineIRBuilder &MIB) const {
838 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
839
840 if (!Subtarget->hasStdExtZfa())
841 return false;
842
843 // Split F64 Src into two s32 parts
844 if (MI.getNumOperands() != 3)
845 return false;
846 Register Src = MI.getOperand(2).getReg();
847 Register Lo = MI.getOperand(0).getReg();
848 Register Hi = MI.getOperand(1).getReg();
849 if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
850 return false;
851
852 MachineInstr *ExtractLo = MIB.buildInstr(RISCV::FMV_X_W_FPR64, {Lo}, {Src});
853 if (!constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI))
854 return false;
855
856 MachineInstr *ExtractHi = MIB.buildInstr(RISCV::FMVH_X_D, {Hi}, {Src});
857 if (!constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI))
858 return false;
859
860 MI.eraseFromParent();
861 return true;
862}
863
864bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
865 MachineIRBuilder &MIB) {
866 Register PtrReg = Op.getReg();
867 assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
868
869 const LLT sXLen = LLT::scalar(STI.getXLen());
870 auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);
871 MRI->setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
872 Op.setReg(PtrToInt.getReg(0));
873 return select(*PtrToInt);
874}
875
876void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
877 MachineIRBuilder &MIB) {
878 switch (MI.getOpcode()) {
879 case TargetOpcode::G_PTR_ADD: {
880 Register DstReg = MI.getOperand(0).getReg();
881 const LLT sXLen = LLT::scalar(STI.getXLen());
882
883 replacePtrWithInt(MI.getOperand(1), MIB);
884 MI.setDesc(TII.get(TargetOpcode::G_ADD));
885 MRI->setType(DstReg, sXLen);
886 break;
887 }
888 case TargetOpcode::G_PTRMASK: {
889 Register DstReg = MI.getOperand(0).getReg();
890 const LLT sXLen = LLT::scalar(STI.getXLen());
891 replacePtrWithInt(MI.getOperand(1), MIB);
892 MI.setDesc(TII.get(TargetOpcode::G_AND));
893 MRI->setType(DstReg, sXLen);
894 break;
895 }
896 }
897}
898
899void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
900 const MachineInstr &MI,
901 int OpIdx) const {
902 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
903 "Expected G_CONSTANT");
904 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
905 MIB.addImm(-CstVal);
906}
907
908void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
909 const MachineInstr &MI,
910 int OpIdx) const {
911 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
912 "Expected G_CONSTANT");
913 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
914 MIB.addImm(STI.getXLen() - CstVal);
915}
916
917void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
918 const MachineInstr &MI,
919 int OpIdx) const {
920 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
921 "Expected G_CONSTANT");
922 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
923 MIB.addImm(32 - CstVal);
924}
925
926void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
927 const MachineInstr &MI,
928 int OpIdx) const {
929 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
930 "Expected G_CONSTANT");
931 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
932 MIB.addImm(CstVal + 1);
933}
934
935void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
936 const MachineInstr &MI,
937 int OpIdx) const {
938 assert(MI.getOpcode() == TargetOpcode::G_FRAME_INDEX && OpIdx == -1 &&
939 "Expected G_FRAME_INDEX");
940 MIB.add(MI.getOperand(1));
941}
942
943void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
944 const MachineInstr &MI,
945 int OpIdx) const {
946 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
947 "Expected G_CONSTANT");
948 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
950}
951
952void RISCVInstructionSelector::renderXLenSubTrailingOnes(
953 MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
954 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
955 "Expected G_CONSTANT");
956 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
957 MIB.addImm(Subtarget->getXLen() - llvm::countr_one(C));
958}
959
960void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
961 const MachineInstr &MI,
962 int OpIdx) const {
963 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
964 "Expected G_CONSTANT");
965 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
966 int64_t Adj = Imm < 0 ? -2048 : 2047;
967 MIB.addImm(Imm - Adj);
968}
969
970void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
971 const MachineInstr &MI,
972 int OpIdx) const {
973 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
974 "Expected G_CONSTANT");
975 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
976 MIB.addImm(Imm);
977}
978
979const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
980 LLT Ty, const RegisterBank &RB) const {
981 if (RB.getID() == RISCV::GPRBRegBankID) {
982 if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
983 return &RISCV::GPRRegClass;
984 }
985
986 if (RB.getID() == RISCV::FPRBRegBankID) {
987 if (Ty.getSizeInBits() == 16)
988 return &RISCV::FPR16RegClass;
989 if (Ty.getSizeInBits() == 32)
990 return &RISCV::FPR32RegClass;
991 if (Ty.getSizeInBits() == 64)
992 return &RISCV::FPR64RegClass;
993 }
994
995 if (RB.getID() == RISCV::VRBRegBankID) {
996 if (Ty.getSizeInBits().getKnownMinValue() <= 64)
997 return &RISCV::VRRegClass;
998
999 if (Ty.getSizeInBits().getKnownMinValue() == 128)
1000 return &RISCV::VRM2RegClass;
1001
1002 if (Ty.getSizeInBits().getKnownMinValue() == 256)
1003 return &RISCV::VRM4RegClass;
1004
1005 if (Ty.getSizeInBits().getKnownMinValue() == 512)
1006 return &RISCV::VRM8RegClass;
1007 }
1008
1009 return nullptr;
1010}
1011
1012bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
1013 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
1014}
1015
1016bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
1017 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
1018}
1019
1020bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
1021 Register DstReg = MI.getOperand(0).getReg();
1022
1023 if (DstReg.isPhysical())
1024 return true;
1025
1026 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1027 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1028 assert(DstRC &&
1029 "Register class not available for LLT, register bank combination");
1030
1031 // No need to constrain SrcReg. It will get constrained when
1032 // we hit another of its uses or its defs.
1033 // Copies do not have constraints.
1034 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1035 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1036 << " operand\n");
1037 return false;
1038 }
1039
1040 MI.setDesc(TII.get(RISCV::COPY));
1041 return true;
1042}
1043
1044bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,
1045 MachineIRBuilder &MIB) const {
1046 assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1047
1048 const Register DstReg = MI.getOperand(0).getReg();
1049 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1050 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1051
1052 assert(DstRC &&
1053 "Register class not available for LLT, register bank combination");
1054
1055 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1056 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1057 << " operand\n");
1058 }
1059 MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1060 return true;
1061}
1062
1063bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
1064 MachineIRBuilder &MIB) const {
1065 if (Imm == 0) {
1066 MIB.buildCopy(DstReg, Register(RISCV::X0));
1067 RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
1068 return true;
1069 }
1070
1072 unsigned NumInsts = Seq.size();
1073 Register SrcReg = RISCV::X0;
1074
1075 for (unsigned i = 0; i < NumInsts; i++) {
1076 Register TmpReg = i < NumInsts - 1
1077 ? MRI->createVirtualRegister(&RISCV::GPRRegClass)
1078 : DstReg;
1079 const RISCVMatInt::Inst &I = Seq[i];
1081
1082 switch (I.getOpndKind()) {
1083 case RISCVMatInt::Imm:
1084 // clang-format off
1085 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {})
1086 .addImm(I.getImm());
1087 // clang-format on
1088 break;
1089 case RISCVMatInt::RegX0:
1090 Result = MIB.buildInstr(I.getOpcode(), {TmpReg},
1091 {SrcReg, Register(RISCV::X0)});
1092 break;
1094 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg});
1095 break;
1097 Result =
1098 MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm());
1099 break;
1100 }
1101
1102 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1103 return false;
1104
1105 SrcReg = TmpReg;
1106 }
1107
1108 return true;
1109}
1110
1111bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
1112 MachineIRBuilder &MIB, bool IsLocal,
1113 bool IsExternWeak) const {
1114 assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1115 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1116 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1117 "Unexpected opcode");
1118
1119 const MachineOperand &DispMO = MI.getOperand(1);
1120
1121 Register DefReg = MI.getOperand(0).getReg();
1122 const LLT DefTy = MRI->getType(DefReg);
1123
1124 // When HWASAN is used and tagging of global variables is enabled
1125 // they should be accessed via the GOT, since the tagged address of a global
1126 // is incompatible with existing code models. This also applies to non-pic
1127 // mode.
1128 if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1129 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1130 // Use PC-relative addressing to access the symbol. This generates the
1131 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1132 // %pcrel_lo(auipc)).
1133 MI.setDesc(TII.get(RISCV::PseudoLLA));
1135 }
1136
1137 // Use PC-relative addressing to access the GOT for this symbol, then
1138 // load the address from the GOT. This generates the pattern (PseudoLGA
1139 // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
1140 // %pcrel_lo(auipc))).
1141 MachineFunction &MF = *MI.getParent()->getParent();
1146 DefTy, Align(DefTy.getSizeInBits() / 8));
1147
1148 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1149 .addDisp(DispMO, 0)
1151
1152 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1153 return false;
1154
1155 MI.eraseFromParent();
1156 return true;
1157 }
1158
1159 switch (TM.getCodeModel()) {
1160 default: {
1161 reportGISelFailure(const_cast<MachineFunction &>(*MF), *TPC, *MORE,
1162 getName(), "Unsupported code model for lowering", MI);
1163 return false;
1164 }
1165 case CodeModel::Small: {
1166 // Must lie within a single 2 GiB address range and must lie between
1167 // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1168 // (lui %hi(sym)) %lo(sym)).
1169 Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1170 MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
1171 .addDisp(DispMO, 0, RISCVII::MO_HI);
1172
1173 if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI))
1174 return false;
1175
1176 auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
1177 .addDisp(DispMO, 0, RISCVII::MO_LO);
1178
1179 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1180 return false;
1181
1182 MI.eraseFromParent();
1183 return true;
1184 }
1185 case CodeModel::Medium:
1186 // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1187 // relocation needs to reference a label that points to the auipc
1188 // instruction itself, not the global. This cannot be done inside the
1189 // instruction selector.
1190 if (IsExternWeak) {
1191 // An extern weak symbol may be undefined, i.e. have value 0, which may
1192 // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1193 // symbol. This generates the pattern (PseudoLGA sym), which expands to
1194 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1195 MachineFunction &MF = *MI.getParent()->getParent();
1200 DefTy, Align(DefTy.getSizeInBits() / 8));
1201
1202 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1203 .addDisp(DispMO, 0)
1205
1206 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1207 return false;
1208
1209 MI.eraseFromParent();
1210 return true;
1211 }
1212
1213 // Generate a sequence for accessing addresses within any 2GiB range
1214 // within the address space. This generates the pattern (PseudoLLA sym),
1215 // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1216 MI.setDesc(TII.get(RISCV::PseudoLLA));
1218 }
1219
1220 return false;
1221}
1222
1223bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
1224 MachineIRBuilder &MIB) const {
1225 auto &SelectMI = cast<GSelect>(MI);
1226
1227 Register LHS, RHS;
1229 getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
1230
1231 Register DstReg = SelectMI.getReg(0);
1232
1233 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1234 if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1235 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1236 Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1237 : RISCV::Select_FPR64_Using_CC_GPR;
1238 }
1239
1241 .addDef(DstReg)
1242 .addReg(LHS)
1243 .addReg(RHS)
1244 .addImm(CC)
1245 .addReg(SelectMI.getTrueReg())
1246 .addReg(SelectMI.getFalseReg());
1247 MI.eraseFromParent();
1248 return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
1249}
1250
1251// Convert an FCMP predicate to one of the supported F or D instructions.
1252static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1253 assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1254 switch (Pred) {
1255 default:
1256 llvm_unreachable("Unsupported predicate");
1257 case CmpInst::FCMP_OLT:
1258 return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1259 case CmpInst::FCMP_OLE:
1260 return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1261 case CmpInst::FCMP_OEQ:
1262 return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1263 }
1264}
1265
1266// Try legalizing an FCMP by swapping or inverting the predicate to one that
1267// is supported.
1269 CmpInst::Predicate &Pred, bool &NeedInvert) {
1270 auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1271 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1272 Pred == CmpInst::FCMP_OEQ;
1273 };
1274
1275 assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1276
1278 if (isLegalFCmpPredicate(InvPred)) {
1279 Pred = InvPred;
1280 std::swap(LHS, RHS);
1281 return true;
1282 }
1283
1284 InvPred = CmpInst::getInversePredicate(Pred);
1285 NeedInvert = true;
1286 if (isLegalFCmpPredicate(InvPred)) {
1287 Pred = InvPred;
1288 return true;
1289 }
1290 InvPred = CmpInst::getSwappedPredicate(InvPred);
1291 if (isLegalFCmpPredicate(InvPred)) {
1292 Pred = InvPred;
1293 std::swap(LHS, RHS);
1294 return true;
1295 }
1296
1297 return false;
1298}
1299
1300// Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1301// the result in DstReg.
1302// FIXME: Maybe we should expand this earlier.
1303bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
1304 MachineIRBuilder &MIB) const {
1305 auto &CmpMI = cast<GFCmp>(MI);
1306 CmpInst::Predicate Pred = CmpMI.getCond();
1307
1308 Register DstReg = CmpMI.getReg(0);
1309 Register LHS = CmpMI.getLHSReg();
1310 Register RHS = CmpMI.getRHSReg();
1311
1312 unsigned Size = MRI->getType(LHS).getSizeInBits();
1313 assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1314
1315 Register TmpReg = DstReg;
1316
1317 bool NeedInvert = false;
1318 // First try swapping operands or inverting.
1319 if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1320 if (NeedInvert)
1321 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1322 auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS});
1323 if (!Cmp.constrainAllUses(TII, TRI, RBI))
1324 return false;
1325 } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1326 // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1327 NeedInvert = Pred == CmpInst::FCMP_UEQ;
1329 {&RISCV::GPRRegClass}, {LHS, RHS});
1330 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1331 return false;
1333 {&RISCV::GPRRegClass}, {RHS, LHS});
1334 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1335 return false;
1336 if (NeedInvert)
1337 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1338 auto Or =
1339 MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1340 if (!Or.constrainAllUses(TII, TRI, RBI))
1341 return false;
1342 } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1343 // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1344 // FIXME: If LHS and RHS are the same we can use a single FEQ.
1345 NeedInvert = Pred == CmpInst::FCMP_UNO;
1347 {&RISCV::GPRRegClass}, {LHS, LHS});
1348 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1349 return false;
1351 {&RISCV::GPRRegClass}, {RHS, RHS});
1352 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1353 return false;
1354 if (NeedInvert)
1355 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1356 auto And =
1357 MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1358 if (!And.constrainAllUses(TII, TRI, RBI))
1359 return false;
1360 } else
1361 llvm_unreachable("Unhandled predicate");
1362
1363 // Emit an XORI to invert the result if needed.
1364 if (NeedInvert) {
1365 auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1366 if (!Xor.constrainAllUses(TII, TRI, RBI))
1367 return false;
1368 }
1369
1370 MI.eraseFromParent();
1371 return true;
1372}
1373
1374void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1375 SyncScope::ID FenceSSID,
1376 MachineIRBuilder &MIB) const {
1377 if (STI.hasStdExtZtso()) {
1378 // The only fence that needs an instruction is a sequentially-consistent
1379 // cross-thread fence.
1380 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1381 FenceSSID == SyncScope::System) {
1382 // fence rw, rw
1383 MIB.buildInstr(RISCV::FENCE, {}, {})
1386 return;
1387 }
1388
1389 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1390 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1391 return;
1392 }
1393
1394 // singlethread fences only synchronize with signal handlers on the same
1395 // thread and thus only need to preserve instruction order, not actually
1396 // enforce memory ordering.
1397 if (FenceSSID == SyncScope::SingleThread) {
1398 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1399 return;
1400 }
1401
1402 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1403 // Manual: Volume I.
1404 unsigned Pred, Succ;
1405 switch (FenceOrdering) {
1406 default:
1407 llvm_unreachable("Unexpected ordering");
1408 case AtomicOrdering::AcquireRelease:
1409 // fence acq_rel -> fence.tso
1410 MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
1411 return;
1412 case AtomicOrdering::Acquire:
1413 // fence acquire -> fence r, rw
1414 Pred = RISCVFenceField::R;
1416 break;
1417 case AtomicOrdering::Release:
1418 // fence release -> fence rw, w
1420 Succ = RISCVFenceField::W;
1421 break;
1422 case AtomicOrdering::SequentiallyConsistent:
1423 // fence seq_cst -> fence rw, rw
1426 break;
1427 }
1428 MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
1429}
1430
1431namespace llvm {
1434 const RISCVSubtarget &Subtarget,
1435 const RISCVRegisterBankInfo &RBI) {
1436 return new RISCVInstructionSelector(TM, Subtarget, RBI);
1437}
1438} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
uint64_t Size
Provides analysis for querying information about KnownBits during GISel passes.
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
#define I(x, y, z)
Definition: MD5.cpp:58
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
MachineInstr unsigned OpIdx
static StringRef getName(Value *V)
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
#define LLVM_DEBUG(...)
Definition: Debug.h:119
Value * RHS
Value * LHS
support::ulittle16_t & Lo
Definition: aarch32.cpp:205
support::ulittle16_t & Hi
Definition: aarch32.cpp:204
APInt bitcastToAPInt() const
Definition: APFloat.h:1353
Class for arbitrary precision integers.
Definition: APInt.h:78
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1488
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition: APInt.h:1111
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition: APInt.h:475
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition: APInt.h:286
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:678
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:681
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:684
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:686
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:689
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:685
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:687
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:688
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:829
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:791
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:169
This is an important base class in LLVM.
Definition: Constant.h:43
This class represents an Operation in the Expression.
std::optional< SmallVector< std::function< void(MachineInstrBuilder &)>, 4 > > ComplexRendererFns
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
virtual bool select(MachineInstr &I)=0
Select the (possibly generic) instruction I to only use target-specific opcodes.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:43
constexpr bool isValid() const
Definition: LowLevelType.h:146
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:191
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:72
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:587
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:359
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
Definition: MachineInstr.h:773
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:595
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
This class provides the information for the target register banks.
unsigned getXLen() const
This class implements the register bank concept.
Definition: RegisterBank.h:29
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:46
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:78
size_t size() const
Definition: SmallVector.h:79
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
unsigned getID() const
Return the register class ID number.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:169
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:126
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
operand_type_match m_Reg()
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
SpecificConstantMatch m_SpecificICst(APInt RequestedValue)
Matches a constant equal to RequestedValue.
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition: LLVMContext.h:55
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:58
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition: bit.h:260
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:155
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition: Utils.cpp:314
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:157
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:336
LLVM_ABI void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition: Utils.cpp:259
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Xor
Bitwise or logical XOR of integers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:858
#define MORE()
Definition: regcomp.c:246
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Matching combinators.
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.