LLVM 22.0.0git
RISCVInstructionSelector.cpp
Go to the documentation of this file.
1//===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// RISC-V.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
16#include "RISCVSubtarget.h"
17#include "RISCVTargetMachine.h"
25#include "llvm/IR/IntrinsicsRISCV.h"
26#include "llvm/Support/Debug.h"
27
28#define DEBUG_TYPE "riscv-isel"
29
30using namespace llvm;
31using namespace MIPatternMatch;
32
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
36
37namespace {
38
39class RISCVInstructionSelector : public InstructionSelector {
40public:
41 RISCVInstructionSelector(const RISCVTargetMachine &TM,
42 const RISCVSubtarget &STI,
43 const RISCVRegisterBankInfo &RBI);
44
45 bool select(MachineInstr &MI) override;
46
47 void setupMF(MachineFunction &MF, GISelValueTracking *VT,
48 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49 BlockFrequencyInfo *BFI) override {
50 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
51 MRI = &MF.getRegInfo();
52 }
53
54 static const char *getName() { return DEBUG_TYPE; }
55
56private:
58 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
59
60 static constexpr unsigned MaxRecursionDepth = 6;
61
62 bool hasAllNBitUsers(const MachineInstr &MI, unsigned Bits,
63 const unsigned Depth = 0) const;
64 bool hasAllHUsers(const MachineInstr &MI) const {
65 return hasAllNBitUsers(MI, 16);
66 }
67 bool hasAllWUsers(const MachineInstr &MI) const {
68 return hasAllNBitUsers(MI, 32);
69 }
70
71 bool isRegInGprb(Register Reg) const;
72 bool isRegInFprb(Register Reg) const;
73
74 // tblgen-erated 'select' implementation, used as the initial selector for
75 // the patterns that don't require complex C++.
76 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
77
78 // A lowering phase that runs before any selection attempts.
79 // Returns true if the instruction was modified.
80 void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
81
82 bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
83
84 // Custom selection methods
85 bool selectCopy(MachineInstr &MI) const;
86 bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB) const;
87 bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
88 bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, bool IsLocal = true,
89 bool IsExternWeak = false) const;
90 bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB) const;
91 bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB) const;
92 void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
93 MachineIRBuilder &MIB) const;
95
96 ComplexRendererFns selectShiftMask(MachineOperand &Root,
97 unsigned ShiftWidth) const;
98 ComplexRendererFns selectShiftMaskXLen(MachineOperand &Root) const {
99 return selectShiftMask(Root, STI.getXLen());
100 }
101 ComplexRendererFns selectShiftMask32(MachineOperand &Root) const {
102 return selectShiftMask(Root, 32);
103 }
104 ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
105
106 ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;
107 template <unsigned Bits>
108 ComplexRendererFns selectSExtBits(MachineOperand &Root) const {
109 return selectSExtBits(Root, Bits);
110 }
111
112 ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
113 template <unsigned Bits>
114 ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
115 return selectZExtBits(Root, Bits);
116 }
117
118 ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
119 template <unsigned ShAmt>
120 ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
121 return selectSHXADDOp(Root, ShAmt);
122 }
123
124 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
125 unsigned ShAmt) const;
126 template <unsigned ShAmt>
127 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
128 return selectSHXADD_UWOp(Root, ShAmt);
129 }
130
131 ComplexRendererFns renderVLOp(MachineOperand &Root) const;
132
133 // Custom renderers for tablegen
134 void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
135 int OpIdx) const;
136 void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
137 int OpIdx) const;
138 void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
139 int OpIdx) const;
140 void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
141 int OpIdx) const;
142 void renderFrameIndex(MachineInstrBuilder &MIB, const MachineInstr &MI,
143 int OpIdx) const;
144
145 void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
146 int OpIdx) const;
147 void renderXLenSubTrailingOnes(MachineInstrBuilder &MIB,
148 const MachineInstr &MI, int OpIdx) const;
149
150 void renderAddiPairImmLarge(MachineInstrBuilder &MIB, const MachineInstr &MI,
151 int OpIdx) const;
152 void renderAddiPairImmSmall(MachineInstrBuilder &MIB, const MachineInstr &MI,
153 int OpIdx) const;
154
155 const RISCVSubtarget &STI;
156 const RISCVInstrInfo &TII;
157 const RISCVRegisterInfo &TRI;
158 const RISCVRegisterBankInfo &RBI;
159 const RISCVTargetMachine &TM;
160
161 MachineRegisterInfo *MRI = nullptr;
162
163 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
164 // uses "STI." in the code generated by TableGen. We need to unify the name of
165 // Subtarget variable.
166 const RISCVSubtarget *Subtarget = &STI;
167
168#define GET_GLOBALISEL_PREDICATES_DECL
169#include "RISCVGenGlobalISel.inc"
170#undef GET_GLOBALISEL_PREDICATES_DECL
171
172#define GET_GLOBALISEL_TEMPORARIES_DECL
173#include "RISCVGenGlobalISel.inc"
174#undef GET_GLOBALISEL_TEMPORARIES_DECL
175};
176
177} // end anonymous namespace
178
179#define GET_GLOBALISEL_IMPL
180#include "RISCVGenGlobalISel.inc"
181#undef GET_GLOBALISEL_IMPL
182
183RISCVInstructionSelector::RISCVInstructionSelector(
184 const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
185 const RISCVRegisterBankInfo &RBI)
186 : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
187 TM(TM),
188
190#include "RISCVGenGlobalISel.inc"
193#include "RISCVGenGlobalISel.inc"
195{
196}
197
198// Mimics optimizations in ISel and RISCVOptWInst Pass
199bool RISCVInstructionSelector::hasAllNBitUsers(const MachineInstr &MI,
200 unsigned Bits,
201 const unsigned Depth) const {
202
203 assert((MI.getOpcode() == TargetOpcode::G_ADD ||
204 MI.getOpcode() == TargetOpcode::G_SUB ||
205 MI.getOpcode() == TargetOpcode::G_MUL ||
206 MI.getOpcode() == TargetOpcode::G_SHL ||
207 MI.getOpcode() == TargetOpcode::G_LSHR ||
208 MI.getOpcode() == TargetOpcode::G_AND ||
209 MI.getOpcode() == TargetOpcode::G_OR ||
210 MI.getOpcode() == TargetOpcode::G_XOR ||
211 MI.getOpcode() == TargetOpcode::G_SEXT_INREG || Depth != 0) &&
212 "Unexpected opcode");
213
214 if (Depth >= RISCVInstructionSelector::MaxRecursionDepth)
215 return false;
216
217 auto DestReg = MI.getOperand(0).getReg();
218 for (auto &UserOp : MRI->use_nodbg_operands(DestReg)) {
219 assert(UserOp.getParent() && "UserOp must have a parent");
220 const MachineInstr &UserMI = *UserOp.getParent();
221 unsigned OpIdx = UserOp.getOperandNo();
222
223 switch (UserMI.getOpcode()) {
224 default:
225 return false;
226 case RISCV::ADDW:
227 case RISCV::ADDIW:
228 case RISCV::SUBW:
229 case RISCV::FCVT_D_W:
230 case RISCV::FCVT_S_W:
231 if (Bits >= 32)
232 break;
233 return false;
234 case RISCV::SLL:
235 case RISCV::SRA:
236 case RISCV::SRL:
237 // Shift amount operands only use log2(Xlen) bits.
238 if (OpIdx == 2 && Bits >= Log2_32(Subtarget->getXLen()))
239 break;
240 return false;
241 case RISCV::SLLI:
242 // SLLI only uses the lower (XLen - ShAmt) bits.
243 if (Bits >= Subtarget->getXLen() - UserMI.getOperand(2).getImm())
244 break;
245 return false;
246 case RISCV::ANDI:
247 if (Bits >= (unsigned)llvm::bit_width<uint64_t>(
248 (uint64_t)UserMI.getOperand(2).getImm()))
249 break;
250 goto RecCheck;
251 case RISCV::AND:
252 case RISCV::OR:
253 case RISCV::XOR:
254 RecCheck:
255 if (hasAllNBitUsers(UserMI, Bits, Depth + 1))
256 break;
257 return false;
258 case RISCV::SRLI: {
259 unsigned ShAmt = UserMI.getOperand(2).getImm();
260 // If we are shifting right by less than Bits, and users don't demand any
261 // bits that were shifted into [Bits-1:0], then we can consider this as an
262 // N-Bit user.
263 if (Bits > ShAmt && hasAllNBitUsers(UserMI, Bits - ShAmt, Depth + 1))
264 break;
265 return false;
266 }
267 }
268 }
269
270 return true;
271}
272
273InstructionSelector::ComplexRendererFns
274RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
275 unsigned ShiftWidth) const {
276 if (!Root.isReg())
277 return std::nullopt;
278
279 using namespace llvm::MIPatternMatch;
280
281 Register ShAmtReg = Root.getReg();
282 // Peek through zext.
283 Register ZExtSrcReg;
284 if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg))))
285 ShAmtReg = ZExtSrcReg;
286
287 APInt AndMask;
288 Register AndSrcReg;
289 // Try to combine the following pattern (applicable to other shift
290 // instructions as well as 32-bit ones):
291 //
292 // %4:gprb(s64) = G_AND %3, %2
293 // %5:gprb(s64) = G_LSHR %1, %4(s64)
294 //
295 // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
296 // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
297 // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
298 // then it can be eliminated. Given register rs1 or rs2 holding a constant
299 // (the and mask), there are two cases G_AND can be erased:
300 //
301 // 1. the lowest log2(XLEN) bits of the and mask are all set
302 // 2. the bits of the register being masked are already unset (zero set)
303 if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
304 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
305 if (ShMask.isSubsetOf(AndMask)) {
306 ShAmtReg = AndSrcReg;
307 } else {
308 // SimplifyDemandedBits may have optimized the mask so try restoring any
309 // bits that are known zero.
310 KnownBits Known = VT->getKnownBits(AndSrcReg);
311 if (ShMask.isSubsetOf(AndMask | Known.Zero))
312 ShAmtReg = AndSrcReg;
313 }
314 }
315
316 APInt Imm;
318 if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
319 if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
320 // If we are shifting by X+N where N == 0 mod Size, then just shift by X
321 // to avoid the ADD.
322 ShAmtReg = Reg;
323 } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
324 if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
325 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
326 // to generate a NEG instead of a SUB of a constant.
327 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
328 unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
329 return {{[=](MachineInstrBuilder &MIB) {
330 MachineIRBuilder(*MIB.getInstr())
331 .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
332 MIB.addReg(ShAmtReg);
333 }}};
334 }
335 if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
336 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
337 // to generate a NOT instead of a SUB of a constant.
338 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
339 return {{[=](MachineInstrBuilder &MIB) {
340 MachineIRBuilder(*MIB.getInstr())
341 .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
342 .addImm(-1);
343 MIB.addReg(ShAmtReg);
344 }}};
345 }
346 }
347
348 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
349}
350
351InstructionSelector::ComplexRendererFns
352RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
353 unsigned Bits) const {
354 if (!Root.isReg())
355 return std::nullopt;
356 Register RootReg = Root.getReg();
357 MachineInstr *RootDef = MRI->getVRegDef(RootReg);
358
359 if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&
360 RootDef->getOperand(2).getImm() == Bits) {
361 return {
362 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }}};
363 }
364
365 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
366 if ((Size - VT->computeNumSignBits(RootReg)) < Bits)
367 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
368
369 return std::nullopt;
370}
371
372InstructionSelector::ComplexRendererFns
373RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
374 unsigned Bits) const {
375 if (!Root.isReg())
376 return std::nullopt;
377 Register RootReg = Root.getReg();
378
379 Register RegX;
380 uint64_t Mask = maskTrailingOnes<uint64_t>(Bits);
381 if (mi_match(RootReg, *MRI, m_GAnd(m_Reg(RegX), m_SpecificICst(Mask)))) {
382 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
383 }
384
385 if (mi_match(RootReg, *MRI, m_GZExt(m_Reg(RegX))) &&
386 MRI->getType(RegX).getScalarSizeInBits() == Bits)
387 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
388
389 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
390 if (VT->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
391 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
392
393 return std::nullopt;
394}
395
396InstructionSelector::ComplexRendererFns
397RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
398 unsigned ShAmt) const {
399 using namespace llvm::MIPatternMatch;
400
401 if (!Root.isReg())
402 return std::nullopt;
403 Register RootReg = Root.getReg();
404
405 const unsigned XLen = STI.getXLen();
406 APInt Mask, C2;
407 Register RegY;
408 std::optional<bool> LeftShift;
409 // (and (shl y, c2), mask)
410 if (mi_match(RootReg, *MRI,
411 m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
412 LeftShift = true;
413 // (and (lshr y, c2), mask)
414 else if (mi_match(RootReg, *MRI,
415 m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
416 LeftShift = false;
417
418 if (LeftShift.has_value()) {
419 if (*LeftShift)
421 else
423
424 if (Mask.isShiftedMask()) {
425 unsigned Leading = XLen - Mask.getActiveBits();
426 unsigned Trailing = Mask.countr_zero();
427 // Given (and (shl y, c2), mask) in which mask has no leading zeros and
428 // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
429 if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
430 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
431 return {{[=](MachineInstrBuilder &MIB) {
432 MachineIRBuilder(*MIB.getInstr())
433 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
434 .addImm(Trailing - C2.getLimitedValue());
435 MIB.addReg(DstReg);
436 }}};
437 }
438
439 // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
440 // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
441 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
442 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
443 return {{[=](MachineInstrBuilder &MIB) {
444 MachineIRBuilder(*MIB.getInstr())
445 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
446 .addImm(Leading + Trailing);
447 MIB.addReg(DstReg);
448 }}};
449 }
450 }
451 }
452
453 LeftShift.reset();
454
455 // (shl (and y, mask), c2)
456 if (mi_match(RootReg, *MRI,
457 m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
458 m_ICst(C2))))
459 LeftShift = true;
460 // (lshr (and y, mask), c2)
461 else if (mi_match(RootReg, *MRI,
463 m_ICst(C2))))
464 LeftShift = false;
465
466 if (LeftShift.has_value() && Mask.isShiftedMask()) {
467 unsigned Leading = XLen - Mask.getActiveBits();
468 unsigned Trailing = Mask.countr_zero();
469
470 // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
471 // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
472 bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
473 (Trailing + C2.getLimitedValue()) == ShAmt;
474 if (!Cond)
475 // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
476 // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
477 Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
478 (Trailing - C2.getLimitedValue()) == ShAmt;
479
480 if (Cond) {
481 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
482 return {{[=](MachineInstrBuilder &MIB) {
483 MachineIRBuilder(*MIB.getInstr())
484 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
485 .addImm(Trailing);
486 MIB.addReg(DstReg);
487 }}};
488 }
489 }
490
491 return std::nullopt;
492}
493
494InstructionSelector::ComplexRendererFns
495RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
496 unsigned ShAmt) const {
497 using namespace llvm::MIPatternMatch;
498
499 if (!Root.isReg())
500 return std::nullopt;
501 Register RootReg = Root.getReg();
502
503 // Given (and (shl x, c2), mask) in which mask is a shifted mask with
504 // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
505 // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
506 APInt Mask, C2;
507 Register RegX;
508 if (mi_match(
509 RootReg, *MRI,
511 m_ICst(Mask))))) {
513
514 if (Mask.isShiftedMask()) {
515 unsigned Leading = Mask.countl_zero();
516 unsigned Trailing = Mask.countr_zero();
517 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
518 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
519 return {{[=](MachineInstrBuilder &MIB) {
520 MachineIRBuilder(*MIB.getInstr())
521 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
522 .addImm(C2.getLimitedValue() - ShAmt);
523 MIB.addReg(DstReg);
524 }}};
525 }
526 }
527 }
528
529 return std::nullopt;
530}
531
532InstructionSelector::ComplexRendererFns
533RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
534 assert(Root.isReg() && "Expected operand to be a Register");
535 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
536
537 if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
538 auto C = RootDef->getOperand(1).getCImm();
539 if (C->getValue().isAllOnes())
540 // If the operand is a G_CONSTANT with value of all ones it is larger than
541 // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
542 // recognized specially by the vsetvli insertion pass.
543 return {{[=](MachineInstrBuilder &MIB) {
544 MIB.addImm(RISCV::VLMaxSentinel);
545 }}};
546
547 if (isUInt<5>(C->getZExtValue())) {
548 uint64_t ZExtC = C->getZExtValue();
549 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
550 }
551 }
552 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};
553}
554
555InstructionSelector::ComplexRendererFns
556RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
557 if (!Root.isReg())
558 return std::nullopt;
559
560 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
561 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
562 return {{
563 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
564 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
565 }};
566 }
567
568 if (isBaseWithConstantOffset(Root, *MRI)) {
569 MachineOperand &LHS = RootDef->getOperand(1);
570 MachineOperand &RHS = RootDef->getOperand(2);
571 MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
572 MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
573
574 int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
575 if (isInt<12>(RHSC)) {
576 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
577 return {{
578 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
579 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
580 }};
581
582 return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
583 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
584 }
585 }
586
587 // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
588 // the combiner?
589 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
590 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
591}
592
593/// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
594/// CC Must be an ICMP Predicate.
595static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
596 switch (CC) {
597 default:
598 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
599 case CmpInst::Predicate::ICMP_EQ:
600 return RISCVCC::COND_EQ;
601 case CmpInst::Predicate::ICMP_NE:
602 return RISCVCC::COND_NE;
603 case CmpInst::Predicate::ICMP_ULT:
604 return RISCVCC::COND_LTU;
605 case CmpInst::Predicate::ICMP_SLT:
606 return RISCVCC::COND_LT;
607 case CmpInst::Predicate::ICMP_UGE:
608 return RISCVCC::COND_GEU;
609 case CmpInst::Predicate::ICMP_SGE:
610 return RISCVCC::COND_GE;
611 }
612}
613
617 // Try to fold an ICmp. If that fails, use a NE compare with X0.
619 if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
620 LHS = CondReg;
621 RHS = RISCV::X0;
622 CC = RISCVCC::COND_NE;
623 return;
624 }
625
626 // We found an ICmp, do some canonicalization.
627
628 // Adjust comparisons to use comparison with 0 if possible.
630 switch (Pred) {
632 // Convert X > -1 to X >= 0
633 if (*Constant == -1) {
634 CC = RISCVCC::COND_GE;
635 RHS = RISCV::X0;
636 return;
637 }
638 break;
640 // Convert X < 1 to 0 >= X
641 if (*Constant == 1) {
642 CC = RISCVCC::COND_GE;
643 RHS = LHS;
644 LHS = RISCV::X0;
645 return;
646 }
647 break;
648 default:
649 break;
650 }
651 }
652
653 switch (Pred) {
654 default:
655 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
662 // These CCs are supported directly by RISC-V branches.
663 break;
668 // These CCs are not supported directly by RISC-V branches, but changing the
669 // direction of the CC and swapping LHS and RHS are.
670 Pred = CmpInst::getSwappedPredicate(Pred);
671 std::swap(LHS, RHS);
672 break;
673 }
674
675 CC = getRISCVCCFromICmp(Pred);
676}
677
678/// Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation
679/// \p GenericOpc, appropriate for the GPR register bank and of memory access
680/// size \p OpSize.
681static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
682 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
683 switch (OpSize) {
684 default:
685 llvm_unreachable("Unexpected memory size");
686 case 8:
687 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
688 case 16:
689 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
690 case 32:
691 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
692 case 64:
693 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
694 }
695}
696
697/// Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation
698/// \p GenericOpc, appropriate for the GPR register bank and of memory access
699/// size \p OpSize. \returns \p GenericOpc if the combination is unsupported.
700static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
701 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
702 switch (OpSize) {
703 case 8:
704 // Prefer unsigned due to no c.lb in Zcb.
705 return IsStore ? RISCV::SB : RISCV::LBU;
706 case 16:
707 return IsStore ? RISCV::SH : RISCV::LH;
708 case 32:
709 return IsStore ? RISCV::SW : RISCV::LW;
710 case 64:
711 return IsStore ? RISCV::SD : RISCV::LD;
712 }
713
714 return GenericOpc;
715}
716
717bool RISCVInstructionSelector::select(MachineInstr &MI) {
718 MachineIRBuilder MIB(MI);
719
720 preISelLower(MI, MIB);
721 const unsigned Opc = MI.getOpcode();
722
723 if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
724 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
725 const Register DefReg = MI.getOperand(0).getReg();
726 const LLT DefTy = MRI->getType(DefReg);
727
728 const RegClassOrRegBank &RegClassOrBank =
729 MRI->getRegClassOrRegBank(DefReg);
730
731 const TargetRegisterClass *DefRC =
733 if (!DefRC) {
734 if (!DefTy.isValid()) {
735 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
736 return false;
737 }
738
739 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
740 DefRC = getRegClassForTypeOnBank(DefTy, RB);
741 if (!DefRC) {
742 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
743 return false;
744 }
745 }
746
747 MI.setDesc(TII.get(TargetOpcode::PHI));
748 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
749 }
750
751 // Certain non-generic instructions also need some special handling.
752 if (MI.isCopy())
753 return selectCopy(MI);
754
755 return true;
756 }
757
758 if (selectImpl(MI, *CoverageInfo))
759 return true;
760
761 switch (Opc) {
762 case TargetOpcode::G_ANYEXT:
763 case TargetOpcode::G_PTRTOINT:
764 case TargetOpcode::G_INTTOPTR:
765 case TargetOpcode::G_TRUNC:
766 case TargetOpcode::G_FREEZE:
767 return selectCopy(MI);
768 case TargetOpcode::G_CONSTANT: {
769 Register DstReg = MI.getOperand(0).getReg();
770 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
771
772 if (!materializeImm(DstReg, Imm, MIB))
773 return false;
774
775 MI.eraseFromParent();
776 return true;
777 }
778 case TargetOpcode::G_ZEXT:
779 case TargetOpcode::G_SEXT: {
780 bool IsSigned = Opc != TargetOpcode::G_ZEXT;
781 Register DstReg = MI.getOperand(0).getReg();
782 Register SrcReg = MI.getOperand(1).getReg();
783 LLT SrcTy = MRI->getType(SrcReg);
784 unsigned SrcSize = SrcTy.getSizeInBits();
785
786 if (SrcTy.isVector())
787 return false; // Should be handled by imported patterns.
788
789 assert((*RBI.getRegBank(DstReg, *MRI, TRI)).getID() ==
790 RISCV::GPRBRegBankID &&
791 "Unexpected ext regbank");
792
793 // Use addiw SrcReg, 0 (sext.w) for i32.
794 if (IsSigned && SrcSize == 32) {
795 MI.setDesc(TII.get(RISCV::ADDIW));
796 MI.addOperand(MachineOperand::CreateImm(0));
798 }
799
800 // Use add.uw SrcReg, X0 (zext.w) for i32 with Zba.
801 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
802 MI.setDesc(TII.get(RISCV::ADD_UW));
803 MI.addOperand(MachineOperand::CreateReg(RISCV::X0, /*isDef=*/false));
805 }
806
807 // Use sext.h/zext.h for i16 with Zbb.
808 if (SrcSize == 16 && STI.hasStdExtZbb()) {
809 MI.setDesc(TII.get(IsSigned ? RISCV::SEXT_H
810 : STI.isRV64() ? RISCV::ZEXT_H_RV64
811 : RISCV::ZEXT_H_RV32));
813 }
814
815 // Use pack(w) SrcReg, X0 for i16 zext with Zbkb.
816 if (!IsSigned && SrcSize == 16 && STI.hasStdExtZbkb()) {
817 MI.setDesc(TII.get(STI.is64Bit() ? RISCV::PACKW : RISCV::PACK));
818 MI.addOperand(MachineOperand::CreateReg(RISCV::X0, /*isDef=*/false));
820 }
821
822 // Fall back to shift pair.
823 auto ShiftLeft =
824 MIB.buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {SrcReg})
825 .addImm(STI.getXLen() - SrcSize);
826 constrainSelectedInstRegOperands(*ShiftLeft, TII, TRI, RBI);
827 auto ShiftRight = MIB.buildInstr(IsSigned ? RISCV::SRAI : RISCV::SRLI,
828 {DstReg}, {ShiftLeft})
829 .addImm(STI.getXLen() - SrcSize);
830 constrainSelectedInstRegOperands(*ShiftRight, TII, TRI, RBI);
831 MI.eraseFromParent();
832 return true;
833 }
834 case TargetOpcode::G_FCONSTANT: {
835 // TODO: Use constant pool for complex constants.
836 Register DstReg = MI.getOperand(0).getReg();
837 const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
838 unsigned Size = MRI->getType(DstReg).getSizeInBits();
839 if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
840 Register GPRReg;
841 if (FPimm.isPosZero()) {
842 GPRReg = RISCV::X0;
843 } else {
844 GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
845 APInt Imm = FPimm.bitcastToAPInt();
846 if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))
847 return false;
848 }
849
850 unsigned Opcode = Size == 64 ? RISCV::FMV_D_X
851 : Size == 32 ? RISCV::FMV_W_X
852 : RISCV::FMV_H_X;
853 auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
854 if (!FMV.constrainAllUses(TII, TRI, RBI))
855 return false;
856 } else {
857 // s64 on rv32
858 assert(Size == 64 && !Subtarget->is64Bit() &&
859 "Unexpected size or subtarget");
860
861 if (FPimm.isPosZero()) {
862 // Optimize +0.0 to use fcvt.d.w
863 MachineInstrBuilder FCVT =
864 MIB.buildInstr(RISCV::FCVT_D_W, {DstReg}, {Register(RISCV::X0)})
865 .addImm(RISCVFPRndMode::RNE);
866 if (!FCVT.constrainAllUses(TII, TRI, RBI))
867 return false;
868
869 MI.eraseFromParent();
870 return true;
871 }
872
873 // Split into two pieces and build through the stack.
874 Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
875 Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
876 APInt Imm = FPimm.bitcastToAPInt();
877 if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
878 MIB))
879 return false;
880 if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB))
881 return false;
882 MachineInstrBuilder PairF64 = MIB.buildInstr(
883 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
884 if (!PairF64.constrainAllUses(TII, TRI, RBI))
885 return false;
886 }
887
888 MI.eraseFromParent();
889 return true;
890 }
891 case TargetOpcode::G_GLOBAL_VALUE: {
892 auto *GV = MI.getOperand(1).getGlobal();
893 if (GV->isThreadLocal()) {
894 // TODO: implement this case.
895 return false;
896 }
897
898 return selectAddr(MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
899 }
900 case TargetOpcode::G_JUMP_TABLE:
901 case TargetOpcode::G_CONSTANT_POOL:
902 return selectAddr(MI, MIB, MRI);
903 case TargetOpcode::G_BRCOND: {
906 getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
907
908 auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS})
909 .addMBB(MI.getOperand(1).getMBB());
910 MI.eraseFromParent();
911 return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
912 }
913 case TargetOpcode::G_BRINDIRECT:
914 MI.setDesc(TII.get(RISCV::PseudoBRIND));
915 MI.addOperand(MachineOperand::CreateImm(0));
917 case TargetOpcode::G_SELECT:
918 return selectSelect(MI, MIB);
919 case TargetOpcode::G_FCMP:
920 return selectFPCompare(MI, MIB);
921 case TargetOpcode::G_FENCE: {
922 AtomicOrdering FenceOrdering =
923 static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
924 SyncScope::ID FenceSSID =
925 static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
926 emitFence(FenceOrdering, FenceSSID, MIB);
927 MI.eraseFromParent();
928 return true;
929 }
930 case TargetOpcode::G_IMPLICIT_DEF:
931 return selectImplicitDef(MI, MIB);
932 case TargetOpcode::G_UNMERGE_VALUES:
933 return selectUnmergeValues(MI, MIB);
934 case TargetOpcode::G_LOAD:
935 case TargetOpcode::G_STORE: {
936 GLoadStore &LdSt = cast<GLoadStore>(MI);
937 const Register ValReg = LdSt.getReg(0);
938 const Register PtrReg = LdSt.getPointerReg();
939 LLT PtrTy = MRI->getType(PtrReg);
940
941 const RegisterBank &RB = *RBI.getRegBank(ValReg, *MRI, TRI);
942 if (RB.getID() != RISCV::GPRBRegBankID)
943 return false;
944
945#ifndef NDEBUG
946 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, *MRI, TRI);
947 // Check that the pointer register is valid.
948 assert(PtrRB.getID() == RISCV::GPRBRegBankID &&
949 "Load/Store pointer operand isn't a GPR");
950 assert(PtrTy.isPointer() && "Load/Store pointer operand isn't a pointer");
951#endif
952
953 // Can only handle AddressSpace 0.
954 if (PtrTy.getAddressSpace() != 0)
955 return false;
956
957 unsigned MemSize = LdSt.getMemSizeInBits().getValue();
959
960 if (isStrongerThanMonotonic(Order)) {
961 MI.setDesc(TII.get(selectZalasrLoadStoreOp(Opc, MemSize)));
963 }
964
965 const unsigned NewOpc = selectRegImmLoadStoreOp(MI.getOpcode(), MemSize);
966 if (NewOpc == MI.getOpcode())
967 return false;
968
969 // Check if we can fold anything into the addressing mode.
970 auto AddrModeFns = selectAddrRegImm(MI.getOperand(1));
971 if (!AddrModeFns)
972 return false;
973
974 // Folded something. Create a new instruction and return it.
975 auto NewInst = MIB.buildInstr(NewOpc, {}, {}, MI.getFlags());
976 if (isa<GStore>(MI))
977 NewInst.addUse(ValReg);
978 else
979 NewInst.addDef(ValReg);
980 NewInst.cloneMemRefs(MI);
981 for (auto &Fn : *AddrModeFns)
982 Fn(NewInst);
983 MI.eraseFromParent();
984
985 return constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI);
986 }
987 default:
988 return false;
989 }
990}
991
992bool RISCVInstructionSelector::selectUnmergeValues(
993 MachineInstr &MI, MachineIRBuilder &MIB) const {
994 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
995
996 if (!Subtarget->hasStdExtZfa())
997 return false;
998
999 // Split F64 Src into two s32 parts
1000 if (MI.getNumOperands() != 3)
1001 return false;
1002 Register Src = MI.getOperand(2).getReg();
1003 Register Lo = MI.getOperand(0).getReg();
1004 Register Hi = MI.getOperand(1).getReg();
1005 if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
1006 return false;
1007
1008 MachineInstr *ExtractLo = MIB.buildInstr(RISCV::FMV_X_W_FPR64, {Lo}, {Src});
1009 if (!constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI))
1010 return false;
1011
1012 MachineInstr *ExtractHi = MIB.buildInstr(RISCV::FMVH_X_D, {Hi}, {Src});
1013 if (!constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI))
1014 return false;
1015
1016 MI.eraseFromParent();
1017 return true;
1018}
1019
1020bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
1021 MachineIRBuilder &MIB) {
1022 Register PtrReg = Op.getReg();
1023 assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
1024
1025 const LLT sXLen = LLT::scalar(STI.getXLen());
1026 auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);
1027 MRI->setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
1028 Op.setReg(PtrToInt.getReg(0));
1029 return select(*PtrToInt);
1030}
1031
1032void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
1033 MachineIRBuilder &MIB) {
1034 switch (MI.getOpcode()) {
1035 case TargetOpcode::G_PTR_ADD: {
1036 Register DstReg = MI.getOperand(0).getReg();
1037 const LLT sXLen = LLT::scalar(STI.getXLen());
1038
1039 replacePtrWithInt(MI.getOperand(1), MIB);
1040 MI.setDesc(TII.get(TargetOpcode::G_ADD));
1041 MRI->setType(DstReg, sXLen);
1042 break;
1043 }
1044 case TargetOpcode::G_PTRMASK: {
1045 Register DstReg = MI.getOperand(0).getReg();
1046 const LLT sXLen = LLT::scalar(STI.getXLen());
1047 replacePtrWithInt(MI.getOperand(1), MIB);
1048 MI.setDesc(TII.get(TargetOpcode::G_AND));
1049 MRI->setType(DstReg, sXLen);
1050 break;
1051 }
1052 }
1053}
1054
1055void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1056 const MachineInstr &MI,
1057 int OpIdx) const {
1058 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1059 "Expected G_CONSTANT");
1060 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1061 MIB.addImm(-CstVal);
1062}
1063
1064void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1065 const MachineInstr &MI,
1066 int OpIdx) const {
1067 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1068 "Expected G_CONSTANT");
1069 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1070 MIB.addImm(STI.getXLen() - CstVal);
1071}
1072
1073void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1074 const MachineInstr &MI,
1075 int OpIdx) const {
1076 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1077 "Expected G_CONSTANT");
1078 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1079 MIB.addImm(32 - CstVal);
1080}
1081
1082void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1083 const MachineInstr &MI,
1084 int OpIdx) const {
1085 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1086 "Expected G_CONSTANT");
1087 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1088 MIB.addImm(CstVal + 1);
1089}
1090
1091void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1092 const MachineInstr &MI,
1093 int OpIdx) const {
1094 assert(MI.getOpcode() == TargetOpcode::G_FRAME_INDEX && OpIdx == -1 &&
1095 "Expected G_FRAME_INDEX");
1096 MIB.add(MI.getOperand(1));
1097}
1098
1099void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1100 const MachineInstr &MI,
1101 int OpIdx) const {
1102 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1103 "Expected G_CONSTANT");
1104 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1106}
1107
1108void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1109 MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
1110 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1111 "Expected G_CONSTANT");
1112 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1113 MIB.addImm(Subtarget->getXLen() - llvm::countr_one(C));
1114}
1115
1116void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1117 const MachineInstr &MI,
1118 int OpIdx) const {
1119 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1120 "Expected G_CONSTANT");
1121 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
1122 int64_t Adj = Imm < 0 ? -2048 : 2047;
1123 MIB.addImm(Imm - Adj);
1124}
1125
1126void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1127 const MachineInstr &MI,
1128 int OpIdx) const {
1129 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1130 "Expected G_CONSTANT");
1131 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1132 MIB.addImm(Imm);
1133}
1134
1135const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
1136 LLT Ty, const RegisterBank &RB) const {
1137 if (RB.getID() == RISCV::GPRBRegBankID) {
1138 if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
1139 return &RISCV::GPRRegClass;
1140 }
1141
1142 if (RB.getID() == RISCV::FPRBRegBankID) {
1143 if (Ty.getSizeInBits() == 16)
1144 return &RISCV::FPR16RegClass;
1145 if (Ty.getSizeInBits() == 32)
1146 return &RISCV::FPR32RegClass;
1147 if (Ty.getSizeInBits() == 64)
1148 return &RISCV::FPR64RegClass;
1149 }
1150
1151 if (RB.getID() == RISCV::VRBRegBankID) {
1152 if (Ty.getSizeInBits().getKnownMinValue() <= 64)
1153 return &RISCV::VRRegClass;
1154
1155 if (Ty.getSizeInBits().getKnownMinValue() == 128)
1156 return &RISCV::VRM2RegClass;
1157
1158 if (Ty.getSizeInBits().getKnownMinValue() == 256)
1159 return &RISCV::VRM4RegClass;
1160
1161 if (Ty.getSizeInBits().getKnownMinValue() == 512)
1162 return &RISCV::VRM8RegClass;
1163 }
1164
1165 return nullptr;
1166}
1167
1168bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
1169 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
1170}
1171
1172bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
1173 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
1174}
1175
1176bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
1177 Register DstReg = MI.getOperand(0).getReg();
1178
1179 if (DstReg.isPhysical())
1180 return true;
1181
1182 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1183 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1184 assert(DstRC &&
1185 "Register class not available for LLT, register bank combination");
1186
1187 // No need to constrain SrcReg. It will get constrained when
1188 // we hit another of its uses or its defs.
1189 // Copies do not have constraints.
1190 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1191 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1192 << " operand\n");
1193 return false;
1194 }
1195
1196 MI.setDesc(TII.get(RISCV::COPY));
1197 return true;
1198}
1199
1200bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,
1201 MachineIRBuilder &MIB) const {
1202 assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1203
1204 const Register DstReg = MI.getOperand(0).getReg();
1205 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1206 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1207
1208 assert(DstRC &&
1209 "Register class not available for LLT, register bank combination");
1210
1211 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1212 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1213 << " operand\n");
1214 }
1215 MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1216 return true;
1217}
1218
1219bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
1220 MachineIRBuilder &MIB) const {
1221 if (Imm == 0) {
1222 MIB.buildCopy(DstReg, Register(RISCV::X0));
1223 RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
1224 return true;
1225 }
1226
1228 unsigned NumInsts = Seq.size();
1229 Register SrcReg = RISCV::X0;
1230
1231 for (unsigned i = 0; i < NumInsts; i++) {
1232 Register TmpReg = i < NumInsts - 1
1233 ? MRI->createVirtualRegister(&RISCV::GPRRegClass)
1234 : DstReg;
1235 const RISCVMatInt::Inst &I = Seq[i];
1236 MachineInstr *Result;
1237
1238 switch (I.getOpndKind()) {
1239 case RISCVMatInt::Imm:
1240 // clang-format off
1241 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {})
1242 .addImm(I.getImm());
1243 // clang-format on
1244 break;
1245 case RISCVMatInt::RegX0:
1246 Result = MIB.buildInstr(I.getOpcode(), {TmpReg},
1247 {SrcReg, Register(RISCV::X0)});
1248 break;
1250 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg});
1251 break;
1253 Result =
1254 MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm());
1255 break;
1256 }
1257
1258 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1259 return false;
1260
1261 SrcReg = TmpReg;
1262 }
1263
1264 return true;
1265}
1266
1267bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
1268 MachineIRBuilder &MIB, bool IsLocal,
1269 bool IsExternWeak) const {
1270 assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1271 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1272 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1273 "Unexpected opcode");
1274
1275 const MachineOperand &DispMO = MI.getOperand(1);
1276
1277 Register DefReg = MI.getOperand(0).getReg();
1278 const LLT DefTy = MRI->getType(DefReg);
1279
1280 // When HWASAN is used and tagging of global variables is enabled
1281 // they should be accessed via the GOT, since the tagged address of a global
1282 // is incompatible with existing code models. This also applies to non-pic
1283 // mode.
1284 if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1285 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1286 // Use PC-relative addressing to access the symbol. This generates the
1287 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1288 // %pcrel_lo(auipc)).
1289 MI.setDesc(TII.get(RISCV::PseudoLLA));
1291 }
1292
1293 // Use PC-relative addressing to access the GOT for this symbol, then
1294 // load the address from the GOT. This generates the pattern (PseudoLGA
1295 // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
1296 // %pcrel_lo(auipc))).
1297 MachineFunction &MF = *MI.getParent()->getParent();
1298 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1302 DefTy, Align(DefTy.getSizeInBits() / 8));
1303
1304 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1305 .addDisp(DispMO, 0)
1306 .addMemOperand(MemOp);
1307
1308 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1309 return false;
1310
1311 MI.eraseFromParent();
1312 return true;
1313 }
1314
1315 switch (TM.getCodeModel()) {
1316 default: {
1317 reportGISelFailure(*MF, *TPC, *MORE, getName(),
1318 "Unsupported code model for lowering", MI);
1319 return false;
1320 }
1321 case CodeModel::Small: {
1322 // Must lie within a single 2 GiB address range and must lie between
1323 // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1324 // (lui %hi(sym)) %lo(sym)).
1325 Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1326 MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
1327 .addDisp(DispMO, 0, RISCVII::MO_HI);
1328
1329 if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI))
1330 return false;
1331
1332 auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
1333 .addDisp(DispMO, 0, RISCVII::MO_LO);
1334
1335 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1336 return false;
1337
1338 MI.eraseFromParent();
1339 return true;
1340 }
1341 case CodeModel::Medium:
1342 // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1343 // relocation needs to reference a label that points to the auipc
1344 // instruction itself, not the global. This cannot be done inside the
1345 // instruction selector.
1346 if (IsExternWeak) {
1347 // An extern weak symbol may be undefined, i.e. have value 0, which may
1348 // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1349 // symbol. This generates the pattern (PseudoLGA sym), which expands to
1350 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1351 MachineFunction &MF = *MI.getParent()->getParent();
1352 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1356 DefTy, Align(DefTy.getSizeInBits() / 8));
1357
1358 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1359 .addDisp(DispMO, 0)
1360 .addMemOperand(MemOp);
1361
1362 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1363 return false;
1364
1365 MI.eraseFromParent();
1366 return true;
1367 }
1368
1369 // Generate a sequence for accessing addresses within any 2GiB range
1370 // within the address space. This generates the pattern (PseudoLLA sym),
1371 // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1372 MI.setDesc(TII.get(RISCV::PseudoLLA));
1374 }
1375
1376 return false;
1377}
1378
1379bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
1380 MachineIRBuilder &MIB) const {
1381 auto &SelectMI = cast<GSelect>(MI);
1382
1383 Register LHS, RHS;
1385 getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
1386
1387 Register DstReg = SelectMI.getReg(0);
1388
1389 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1390 if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1391 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1392 Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1393 : RISCV::Select_FPR64_Using_CC_GPR;
1394 }
1395
1396 MachineInstr *Result = MIB.buildInstr(Opc)
1397 .addDef(DstReg)
1398 .addReg(LHS)
1399 .addReg(RHS)
1400 .addImm(CC)
1401 .addReg(SelectMI.getTrueReg())
1402 .addReg(SelectMI.getFalseReg());
1403 MI.eraseFromParent();
1404 return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
1405}
1406
1407// Convert an FCMP predicate to one of the supported F or D instructions.
1408static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1409 assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1410 switch (Pred) {
1411 default:
1412 llvm_unreachable("Unsupported predicate");
1413 case CmpInst::FCMP_OLT:
1414 return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1415 case CmpInst::FCMP_OLE:
1416 return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1417 case CmpInst::FCMP_OEQ:
1418 return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1419 }
1420}
1421
1422// Try legalizing an FCMP by swapping or inverting the predicate to one that
1423// is supported.
1425 CmpInst::Predicate &Pred, bool &NeedInvert) {
1426 auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1427 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1428 Pred == CmpInst::FCMP_OEQ;
1429 };
1430
1431 assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1432
1434 if (isLegalFCmpPredicate(InvPred)) {
1435 Pred = InvPred;
1436 std::swap(LHS, RHS);
1437 return true;
1438 }
1439
1440 InvPred = CmpInst::getInversePredicate(Pred);
1441 NeedInvert = true;
1442 if (isLegalFCmpPredicate(InvPred)) {
1443 Pred = InvPred;
1444 return true;
1445 }
1446 InvPred = CmpInst::getSwappedPredicate(InvPred);
1447 if (isLegalFCmpPredicate(InvPred)) {
1448 Pred = InvPred;
1449 std::swap(LHS, RHS);
1450 return true;
1451 }
1452
1453 return false;
1454}
1455
1456// Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1457// the result in DstReg.
1458// FIXME: Maybe we should expand this earlier.
1459bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
1460 MachineIRBuilder &MIB) const {
1461 auto &CmpMI = cast<GFCmp>(MI);
1462 CmpInst::Predicate Pred = CmpMI.getCond();
1463
1464 Register DstReg = CmpMI.getReg(0);
1465 Register LHS = CmpMI.getLHSReg();
1466 Register RHS = CmpMI.getRHSReg();
1467
1468 unsigned Size = MRI->getType(LHS).getSizeInBits();
1469 assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1470
1471 Register TmpReg = DstReg;
1472
1473 bool NeedInvert = false;
1474 // First try swapping operands or inverting.
1475 if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1476 if (NeedInvert)
1477 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1478 auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS});
1479 if (!Cmp.constrainAllUses(TII, TRI, RBI))
1480 return false;
1481 } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1482 // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1483 NeedInvert = Pred == CmpInst::FCMP_UEQ;
1485 {&RISCV::GPRRegClass}, {LHS, RHS});
1486 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1487 return false;
1489 {&RISCV::GPRRegClass}, {RHS, LHS});
1490 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1491 return false;
1492 if (NeedInvert)
1493 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1494 auto Or =
1495 MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1496 if (!Or.constrainAllUses(TII, TRI, RBI))
1497 return false;
1498 } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1499 // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1500 // FIXME: If LHS and RHS are the same we can use a single FEQ.
1501 NeedInvert = Pred == CmpInst::FCMP_UNO;
1503 {&RISCV::GPRRegClass}, {LHS, LHS});
1504 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1505 return false;
1507 {&RISCV::GPRRegClass}, {RHS, RHS});
1508 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1509 return false;
1510 if (NeedInvert)
1511 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1512 auto And =
1513 MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1514 if (!And.constrainAllUses(TII, TRI, RBI))
1515 return false;
1516 } else
1517 llvm_unreachable("Unhandled predicate");
1518
1519 // Emit an XORI to invert the result if needed.
1520 if (NeedInvert) {
1521 auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1522 if (!Xor.constrainAllUses(TII, TRI, RBI))
1523 return false;
1524 }
1525
1526 MI.eraseFromParent();
1527 return true;
1528}
1529
1530void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1531 SyncScope::ID FenceSSID,
1532 MachineIRBuilder &MIB) const {
1533 if (STI.hasStdExtZtso()) {
1534 // The only fence that needs an instruction is a sequentially-consistent
1535 // cross-thread fence.
1536 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1537 FenceSSID == SyncScope::System) {
1538 // fence rw, rw
1539 MIB.buildInstr(RISCV::FENCE, {}, {})
1542 return;
1543 }
1544
1545 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1546 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1547 return;
1548 }
1549
1550 // singlethread fences only synchronize with signal handlers on the same
1551 // thread and thus only need to preserve instruction order, not actually
1552 // enforce memory ordering.
1553 if (FenceSSID == SyncScope::SingleThread) {
1554 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1555 return;
1556 }
1557
1558 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1559 // Manual: Volume I.
1560 unsigned Pred, Succ;
1561 switch (FenceOrdering) {
1562 default:
1563 llvm_unreachable("Unexpected ordering");
1564 case AtomicOrdering::AcquireRelease:
1565 // fence acq_rel -> fence.tso
1566 MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
1567 return;
1568 case AtomicOrdering::Acquire:
1569 // fence acquire -> fence r, rw
1570 Pred = RISCVFenceField::R;
1572 break;
1573 case AtomicOrdering::Release:
1574 // fence release -> fence rw, w
1576 Succ = RISCVFenceField::W;
1577 break;
1578 case AtomicOrdering::SequentiallyConsistent:
1579 // fence seq_cst -> fence rw, rw
1582 break;
1583 }
1584 MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
1585}
1586
1587namespace llvm {
1588InstructionSelector *
1590 const RISCVSubtarget &Subtarget,
1591 const RISCVRegisterBankInfo &RBI) {
1592 return new RISCVInstructionSelector(TM, Subtarget, RBI);
1593}
1594} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Provides analysis for querying information about KnownBits during GISel passes.
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
#define I(x, y, z)
Definition MD5.cpp:58
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition APFloat.h:1353
bool isPosZero() const
Definition APFloat.h:1460
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1488
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1111
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:475
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition APInt.h:286
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:169
This is an important base class in LLVM.
Definition Constant.h:43
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
This class provides the information for the target register banks.
unsigned getXLen() const
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
operand_type_match m_Reg()
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
SpecificConstantMatch m_SpecificICst(APInt RequestedValue)
Matches a constant equal to RequestedValue.
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
Definition RISCVMatInt.h:43
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition LLVMContext.h:55
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
This is an optimization pass for GlobalISel generic memory operations.
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition bit.h:279
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
Definition bit.h:289
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:314
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:186
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:342
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:259
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:198
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:103
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
Definition MathExtras.h:86
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define MORE()
Definition regcomp.c:246
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.