LLVM 22.0.0git
FastISel.cpp
Go to the documentation of this file.
1//===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation of the FastISel class.
10//
11// "Fast" instruction selection is designed to emit very poor code quickly.
12// Also, it is not designed to be able to do much lowering, so most illegal
13// types (e.g. i64 on 32-bit targets) and operations are not supported. It is
14// also not intended to be able to do much optimization, except in a few cases
15// where doing optimizations reduces overall compile time. For example, folding
16// constants into immediate fields is often done, because it's cheap and it
17// reduces the number of instructions later phases have to examine.
18//
19// "Fast" instruction selection is able to fail gracefully and transfer
20// control to the SelectionDAG selector for operations that it doesn't
21// support. In many cases, this allows us to avoid duplicating a lot of
22// the complicated lowering logic that SelectionDAG currently has.
23//
24// The intended use for "fast" instruction selection is "-O0" mode
25// compilation, where the quality of the generated code is irrelevant when
26// weighed against the speed at which the code can be generated. Also,
27// at -O0, the LLVM optimizers are not running, and this makes the
28// compile time of codegen a much higher portion of the overall compile
29// time. Despite its limitations, "fast" instruction selection is able to
30// handle enough code on its own to provide noticeable overall speedups
31// in -O0 compiles.
32//
33// Basic operations are supported in a target-independent way, by reading
34// the same instruction descriptions that the SelectionDAG selector reads,
35// and identifying simple arithmetic operations that can be directly selected
36// from simple operators. More complicated operations currently require
37// target-specific code.
38//
39//===----------------------------------------------------------------------===//
40
42#include "llvm/ADT/APFloat.h"
43#include "llvm/ADT/APSInt.h"
44#include "llvm/ADT/DenseMap.h"
48#include "llvm/ADT/Statistic.h"
68#include "llvm/IR/Argument.h"
69#include "llvm/IR/Attributes.h"
70#include "llvm/IR/BasicBlock.h"
71#include "llvm/IR/CallingConv.h"
72#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
74#include "llvm/IR/DataLayout.h"
75#include "llvm/IR/DebugLoc.h"
78#include "llvm/IR/Function.h"
80#include "llvm/IR/GlobalValue.h"
81#include "llvm/IR/InlineAsm.h"
82#include "llvm/IR/InstrTypes.h"
83#include "llvm/IR/Instruction.h"
86#include "llvm/IR/LLVMContext.h"
87#include "llvm/IR/Mangler.h"
88#include "llvm/IR/Metadata.h"
89#include "llvm/IR/Module.h"
90#include "llvm/IR/Operator.h"
92#include "llvm/IR/Type.h"
93#include "llvm/IR/User.h"
94#include "llvm/IR/Value.h"
95#include "llvm/MC/MCContext.h"
96#include "llvm/MC/MCInstrDesc.h"
98#include "llvm/Support/Debug.h"
104#include <cassert>
105#include <cstdint>
106#include <iterator>
107#include <optional>
108#include <utility>
109
110using namespace llvm;
111using namespace PatternMatch;
112
113#define DEBUG_TYPE "isel"
114
115STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
116 "target-independent selector");
117STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
118 "target-specific selector");
119STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
120
121/// Set the current block to which generated machine instructions will be
122/// appended.
124 assert(LocalValueMap.empty() &&
125 "local values should be cleared after finishing a BB");
126
127 // Instructions are appended to FuncInfo.MBB. If the basic block already
128 // contains labels or copies, use the last instruction as the last local
129 // value.
130 EmitStartPt = nullptr;
131 if (!FuncInfo.MBB->empty())
134}
135
136void FastISel::finishBasicBlock() { flushLocalValueMap(); }
137
140 // Fallback to SDISel argument lowering code to deal with sret pointer
141 // parameter.
142 return false;
143
144 if (!fastLowerArguments())
145 return false;
146
147 // Enter arguments into ValueMap for uses in non-entry BBs.
149 E = FuncInfo.Fn->arg_end();
150 I != E; ++I) {
152 assert(VI != LocalValueMap.end() && "Missed an argument?");
153 FuncInfo.ValueMap[&*I] = VI->second;
154 }
155 return true;
156}
157
158/// Return the defined register if this instruction defines exactly one
159/// virtual register and uses no other virtual registers. Otherwise return
160/// Register();
162 Register RegDef;
163 for (const MachineOperand &MO : MI.operands()) {
164 if (!MO.isReg())
165 continue;
166 if (MO.isDef()) {
167 if (RegDef)
168 return Register();
169 RegDef = MO.getReg();
170 } else if (MO.getReg().isVirtual()) {
171 // This is another use of a vreg. Don't delete it.
172 return Register();
173 }
174 }
175 return RegDef;
176}
177
178static bool isRegUsedByPhiNodes(Register DefReg,
179 FunctionLoweringInfo &FuncInfo) {
180 for (auto &P : FuncInfo.PHINodesToUpdate)
181 if (P.second == DefReg)
182 return true;
183 return false;
184}
185
186void FastISel::flushLocalValueMap() {
187 // If FastISel bails out, it could leave local value instructions behind
188 // that aren't used for anything. Detect and erase those.
190 // Save the first instruction after local values, for later.
192 ++FirstNonValue;
193
196 : FuncInfo.MBB->rend();
198 for (MachineInstr &LocalMI :
200 Register DefReg = findLocalRegDef(LocalMI);
201 if (!DefReg)
202 continue;
203 if (FuncInfo.RegsWithFixups.count(DefReg))
204 continue;
205 bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
206 if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
207 if (EmitStartPt == &LocalMI)
209 LLVM_DEBUG(dbgs() << "removing dead local value materialization"
210 << LocalMI);
211 LocalMI.eraseFromParent();
212 }
213 }
214
215 if (FirstNonValue != FuncInfo.MBB->end()) {
216 // See if there are any local value instructions left. If so, we want to
217 // make sure the first one has a debug location; if it doesn't, use the
218 // first non-value instruction's debug location.
219
220 // If EmitStartPt is non-null, this block had copies at the top before
221 // FastISel started doing anything; it points to the last one, so the
222 // first local value instruction is the one after EmitStartPt.
223 // If EmitStartPt is null, the first local value instruction is at the
224 // top of the block.
225 MachineBasicBlock::iterator FirstLocalValue =
227 : FuncInfo.MBB->begin();
228 if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc())
229 FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc());
230 }
231 }
232
233 LocalValueMap.clear();
236 SavedInsertPt = FuncInfo.InsertPt;
237}
238
240 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
241 // Don't handle non-simple values in FastISel.
242 if (!RealVT.isSimple())
243 return Register();
244
245 // Ignore illegal types. We must do this before looking up the value
246 // in ValueMap because Arguments are given virtual registers regardless
247 // of whether FastISel can handle them.
248 MVT VT = RealVT.getSimpleVT();
249 if (!TLI.isTypeLegal(VT)) {
250 // Handle integer promotions, though, because they're common and easy.
251 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
252 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
253 else
254 return Register();
255 }
256
257 // Look up the value to see if we already have a register for it.
259 if (Reg)
260 return Reg;
261
262 // In bottom-up mode, just create the virtual register which will be used
263 // to hold the value. It will be materialized later.
264 if (isa<Instruction>(V) &&
265 (!isa<AllocaInst>(V) ||
266 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
268
269 SavePoint SaveInsertPt = enterLocalValueArea();
270
271 // Materialize the value in a register. Emit any instructions in the
272 // local value area.
273 Reg = materializeRegForValue(V, VT);
274
275 leaveLocalValueArea(SaveInsertPt);
276
277 return Reg;
278}
279
280Register FastISel::materializeConstant(const Value *V, MVT VT) {
281 Register Reg;
282 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
283 if (CI->getValue().getActiveBits() <= 64)
284 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
285 } else if (isa<AllocaInst>(V))
286 Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
287 else if (isa<ConstantPointerNull>(V))
288 // Translate this as an integer zero so that it can be
289 // local-CSE'd with actual integer zeros.
290 Reg =
292 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
293 if (CF->isNullValue())
294 Reg = fastMaterializeFloatZero(CF);
295 else
296 // Try to emit the constant directly.
297 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
298
299 if (!Reg) {
300 // Try to emit the constant by using an integer constant with a cast.
301 const APFloat &Flt = CF->getValueAPF();
302 EVT IntVT = TLI.getPointerTy(DL);
303 uint32_t IntBitWidth = IntVT.getSizeInBits();
304 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
305 bool isExact;
306 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
307 if (isExact) {
308 Register IntegerReg =
309 getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
310 if (IntegerReg)
311 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
312 IntegerReg);
313 }
314 }
315 } else if (const auto *Op = dyn_cast<Operator>(V)) {
316 if (!selectOperator(Op, Op->getOpcode()))
317 if (!isa<Instruction>(Op) ||
318 !fastSelectInstruction(cast<Instruction>(Op)))
319 return Register();
321 } else if (isa<UndefValue>(V)) {
324 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
325 }
326 return Reg;
327}
328
329/// Helper for getRegForValue. This function is called when the value isn't
330/// already available in a register and must be materialized with new
331/// instructions.
332Register FastISel::materializeRegForValue(const Value *V, MVT VT) {
334 // Give the target-specific code a try first.
335 if (isa<Constant>(V))
336 Reg = fastMaterializeConstant(cast<Constant>(V));
337
338 // If target-specific code couldn't or didn't want to handle the value, then
339 // give target-independent code a try.
340 if (!Reg)
341 Reg = materializeConstant(V, VT);
342
343 // Don't cache constant materializations in the general ValueMap.
344 // To do so would require tracking what uses they dominate.
345 if (Reg) {
348 }
349 return Reg;
350}
351
353 // Look up the value to see if we already have a register for it. We
354 // cache values defined by Instructions across blocks, and other values
355 // only locally. This is because Instructions already have the SSA
356 // def-dominates-use requirement enforced.
358 if (I != FuncInfo.ValueMap.end())
359 return I->second;
360 return LocalValueMap[V];
361}
362
363void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) {
364 if (!isa<Instruction>(I)) {
365 LocalValueMap[I] = Reg;
366 return;
367 }
368
369 Register &AssignedReg = FuncInfo.ValueMap[I];
370 if (!AssignedReg)
371 // Use the new register.
372 AssignedReg = Reg;
373 else if (Reg != AssignedReg) {
374 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
375 for (unsigned i = 0; i < NumRegs; i++) {
376 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
378 }
379
380 AssignedReg = Reg;
381 }
382}
383
386 if (!IdxN)
387 // Unhandled operand. Halt "fast" selection and bail.
388 return Register();
389
390 // If the index is smaller or larger than intptr_t, truncate or extend it.
391 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
392 if (IdxVT.bitsLT(PtrVT)) {
393 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
394 } else if (IdxVT.bitsGT(PtrVT)) {
395 IdxN =
396 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
397 }
398 return IdxN;
399}
400
402 if (getLastLocalValue()) {
404 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
406 } else
408}
409
412 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
413 "Invalid iterator!");
414 while (I != E) {
415 if (SavedInsertPt == I)
416 SavedInsertPt = E;
417 if (EmitStartPt == I)
418 EmitStartPt = E.isValid() ? &*E : nullptr;
419 if (LastLocalValue == I)
420 LastLocalValue = E.isValid() ? &*E : nullptr;
421
422 MachineInstr *Dead = &*I;
423 ++I;
424 Dead->eraseFromParent();
425 ++NumFastIselDead;
426 }
428}
429
431 SavePoint OldInsertPt = FuncInfo.InsertPt;
433 return OldInsertPt;
434}
435
438 LastLocalValue = &*std::prev(FuncInfo.InsertPt);
439
440 // Restore the previous insert position.
441 FuncInfo.InsertPt = OldInsertPt;
442}
443
444bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
445 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
446 if (VT == MVT::Other || !VT.isSimple())
447 // Unhandled type. Halt "fast" selection and bail.
448 return false;
449
450 // We only handle legal types. For example, on x86-32 the instruction
451 // selector contains all of the 64-bit instructions from x86-64,
452 // under the assumption that i64 won't be used if the target doesn't
453 // support it.
454 if (!TLI.isTypeLegal(VT)) {
455 // MVT::i1 is special. Allow AND, OR, or XOR because they
456 // don't require additional zeroing, which makes them easy.
457 if (VT == MVT::i1 && ISD::isBitwiseLogicOp(ISDOpcode))
458 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
459 else
460 return false;
461 }
462
463 // Check if the first operand is a constant, and handle it as "ri". At -O0,
464 // we don't have anything that canonicalizes operand order.
465 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
466 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
467 Register Op1 = getRegForValue(I->getOperand(1));
468 if (!Op1)
469 return false;
470
471 Register ResultReg =
472 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(),
473 VT.getSimpleVT());
474 if (!ResultReg)
475 return false;
476
477 // We successfully emitted code for the given LLVM Instruction.
478 updateValueMap(I, ResultReg);
479 return true;
480 }
481
482 Register Op0 = getRegForValue(I->getOperand(0));
483 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
484 return false;
485
486 // Check if the second operand is a constant and handle it appropriately.
487 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
488 uint64_t Imm = CI->getSExtValue();
489
490 // Transform "sdiv exact X, 8" -> "sra X, 3".
491 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
492 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
493 Imm = Log2_64(Imm);
494 ISDOpcode = ISD::SRA;
495 }
496
497 // Transform "urem x, pow2" -> "and x, pow2-1".
498 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
499 isPowerOf2_64(Imm)) {
500 --Imm;
501 ISDOpcode = ISD::AND;
502 }
503
504 Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm,
505 VT.getSimpleVT());
506 if (!ResultReg)
507 return false;
508
509 // We successfully emitted code for the given LLVM Instruction.
510 updateValueMap(I, ResultReg);
511 return true;
512 }
513
514 Register Op1 = getRegForValue(I->getOperand(1));
515 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
516 return false;
517
518 // Now we have both operands in registers. Emit the instruction.
519 Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
520 ISDOpcode, Op0, Op1);
521 if (!ResultReg)
522 // Target-specific code wasn't able to find a machine opcode for
523 // the given ISD opcode and type. Halt "fast" selection and bail.
524 return false;
525
526 // We successfully emitted code for the given LLVM Instruction.
527 updateValueMap(I, ResultReg);
528 return true;
529}
530
532 Register N = getRegForValue(I->getOperand(0));
533 if (!N) // Unhandled operand. Halt "fast" selection and bail.
534 return false;
535
536 // FIXME: The code below does not handle vector GEPs. Halt "fast" selection
537 // and bail.
538 if (isa<VectorType>(I->getType()))
539 return false;
540
541 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
542 // into a single N = N + TotalOffset.
543 uint64_t TotalOffs = 0;
544 // FIXME: What's a good SWAG number for MaxOffs?
545 uint64_t MaxOffs = 2048;
546 MVT VT = TLI.getValueType(DL, I->getType()).getSimpleVT();
547
549 GTI != E; ++GTI) {
550 const Value *Idx = GTI.getOperand();
551 if (StructType *StTy = GTI.getStructTypeOrNull()) {
552 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
553 if (Field) {
554 // N = N + Offset
555 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
556 if (TotalOffs >= MaxOffs) {
557 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
558 if (!N) // Unhandled operand. Halt "fast" selection and bail.
559 return false;
560 TotalOffs = 0;
561 }
562 }
563 } else {
564 // If this is a constant subscript, handle it quickly.
565 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
566 if (CI->isZero())
567 continue;
568 // N = N + Offset
569 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
570 TotalOffs += GTI.getSequentialElementStride(DL) * IdxN;
571 if (TotalOffs >= MaxOffs) {
572 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
573 if (!N) // Unhandled operand. Halt "fast" selection and bail.
574 return false;
575 TotalOffs = 0;
576 }
577 continue;
578 }
579 if (TotalOffs) {
580 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
581 if (!N) // Unhandled operand. Halt "fast" selection and bail.
582 return false;
583 TotalOffs = 0;
584 }
585
586 // N = N + Idx * ElementSize;
587 uint64_t ElementSize = GTI.getSequentialElementStride(DL);
588 Register IdxN = getRegForGEPIndex(VT, Idx);
589 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
590 return false;
591
592 if (ElementSize != 1) {
593 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
594 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
595 return false;
596 }
597 N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
598 if (!N) // Unhandled operand. Halt "fast" selection and bail.
599 return false;
600 }
601 }
602 if (TotalOffs) {
603 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
604 if (!N) // Unhandled operand. Halt "fast" selection and bail.
605 return false;
606 }
607
608 // We successfully emitted code for the given LLVM Instruction.
610 return true;
611}
612
613bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
614 const CallInst *CI, unsigned StartIdx) {
615 for (unsigned i = StartIdx, e = CI->arg_size(); i != e; ++i) {
616 Value *Val = CI->getArgOperand(i);
617 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
618 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
619 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
620 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
621 } else if (isa<ConstantPointerNull>(Val)) {
622 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
624 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
625 // Values coming from a stack location also require a special encoding,
626 // but that is added later on by the target specific frame index
627 // elimination implementation.
628 auto SI = FuncInfo.StaticAllocaMap.find(AI);
629 if (SI != FuncInfo.StaticAllocaMap.end())
631 else
632 return false;
633 } else {
635 if (!Reg)
636 return false;
637 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
638 }
639 }
640 return true;
641}
642
644 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
645 // [live variables...])
646 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
647 "Stackmap cannot return a value.");
648
649 // The stackmap intrinsic only records the live variables (the arguments
650 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
651 // intrinsic, this won't be lowered to a function call. This means we don't
652 // have to worry about calling conventions and target-specific lowering code.
653 // Instead we perform the call lowering right here.
654 //
655 // CALLSEQ_START(0, 0...)
656 // STACKMAP(id, nbytes, ...)
657 // CALLSEQ_END(0, 0)
658 //
660
661 // Add the <id> and <numBytes> constants.
662 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
663 "Expected a constant integer.");
664 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
665 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
666
667 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
668 "Expected a constant integer.");
669 const auto *NumBytes =
670 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
671 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
672
673 // Push live variables for the stack map (skipping the first two arguments
674 // <id> and <numBytes>).
675 if (!addStackMapLiveVars(Ops, I, 2))
676 return false;
677
678 // We are not adding any register mask info here, because the stackmap doesn't
679 // clobber anything.
680
681 // Add scratch registers as implicit def and early clobber.
682 CallingConv::ID CC = I->getCallingConv();
683 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
684 for (unsigned i = 0; ScratchRegs[i]; ++i)
686 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
687 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
688
689 // Issue CALLSEQ_START
690 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
691 auto Builder =
692 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown));
693 const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
694 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
695 Builder.addImm(0);
696
697 // Issue STACKMAP.
699 TII.get(TargetOpcode::STACKMAP));
700 for (auto const &MO : Ops)
701 MIB.add(MO);
702
703 // Issue CALLSEQ_END
704 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
706 .addImm(0)
707 .addImm(0);
708
709 // Inform the Frame Information that we have a stackmap in this function.
711
712 return true;
713}
714
715/// Lower an argument list according to the target calling convention.
716///
717/// This is a helper for lowering intrinsics that follow a target calling
718/// convention or require stack pointer adjustment. Only a subset of the
719/// intrinsic's operands need to participate in the calling convention.
720bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
721 unsigned NumArgs, const Value *Callee,
722 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
723 ArgListTy Args;
724 Args.reserve(NumArgs);
725
726 // Populate the argument list.
727 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
728 Value *V = CI->getOperand(ArgI);
729
730 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
731
732 ArgListEntry Entry(V);
733 Entry.setAttributes(CI, ArgI);
734 Args.push_back(Entry);
735 }
736
737 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
738 : CI->getType();
739 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
740
741 return lowerCallTo(CLI);
742}
743
745 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
746 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
747 SmallString<32> MangledName;
748 Mangler::getNameWithPrefix(MangledName, Target, DL);
749 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
750 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
751}
752
754 // <ty> @llvm.experimental.patchpoint.<ty>(i64 <id>,
755 // i32 <numBytes>,
756 // i8* <target>,
757 // i32 <numArgs>,
758 // [Args...],
759 // [live variables...])
760 CallingConv::ID CC = I->getCallingConv();
761 bool IsAnyRegCC = CC == CallingConv::AnyReg;
762 bool HasDef = !I->getType()->isVoidTy();
763 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
764
765 // Check if we can lower the return type when using anyregcc.
767 if (IsAnyRegCC && HasDef) {
768 ValueType = TLI.getSimpleValueType(DL, I->getType(), /*AllowUnknown=*/true);
769 if (ValueType == MVT::Other)
770 return false;
771 }
772
773 // Get the real number of arguments participating in the call <numArgs>
774 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
775 "Expected a constant integer.");
776 const auto *NumArgsVal =
777 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
778 unsigned NumArgs = NumArgsVal->getZExtValue();
779
780 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
781 // This includes all meta-operands up to but not including CC.
782 unsigned NumMetaOpers = PatchPointOpers::CCPos;
783 assert(I->arg_size() >= NumMetaOpers + NumArgs &&
784 "Not enough arguments provided to the patchpoint intrinsic");
785
786 // For AnyRegCC the arguments are lowered later on manually.
787 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
789 CLI.setIsPatchPoint();
790 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
791 return false;
792
793 assert(CLI.Call && "No call instruction specified.");
794
796
797 // Add an explicit result reg if we use the anyreg calling convention.
798 if (IsAnyRegCC && HasDef) {
799 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
800 assert(ValueType.isValid());
802 CLI.NumResultRegs = 1;
803 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true));
804 }
805
806 // Add the <id> and <numBytes> constants.
807 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
808 "Expected a constant integer.");
809 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
810 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
811
812 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
813 "Expected a constant integer.");
814 const auto *NumBytes =
815 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
816 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
817
818 // Add the call target.
819 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
820 uint64_t CalleeConstAddr =
821 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
822 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
823 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
824 if (C->getOpcode() == Instruction::IntToPtr) {
825 uint64_t CalleeConstAddr =
826 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
827 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
828 } else
829 llvm_unreachable("Unsupported ConstantExpr.");
830 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
832 } else if (isa<ConstantPointerNull>(Callee))
834 else
835 llvm_unreachable("Unsupported callee address.");
836
837 // Adjust <numArgs> to account for any arguments that have been passed on
838 // the stack instead.
839 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
840 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
841
842 // Add the calling convention
843 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
844
845 // Add the arguments we omitted previously. The register allocator should
846 // place these in any free register.
847 if (IsAnyRegCC) {
848 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
849 Register Reg = getRegForValue(I->getArgOperand(i));
850 if (!Reg)
851 return false;
852 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
853 }
854 }
855
856 // Push the arguments from the call instruction.
857 for (auto Reg : CLI.OutRegs)
858 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
859
860 // Push live variables for the stack map.
861 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
862 return false;
863
864 // Push the register mask info.
867
868 // Add scratch registers as implicit def and early clobber.
869 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
870 for (unsigned i = 0; ScratchRegs[i]; ++i)
872 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
873 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
874
875 // Add implicit defs (return values).
876 for (auto Reg : CLI.InRegs)
877 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true,
878 /*isImp=*/true));
879
880 // Insert the patchpoint instruction before the call generated by the target.
882 TII.get(TargetOpcode::PATCHPOINT));
883
884 for (auto &MO : Ops)
885 MIB.add(MO);
886
888
889 // Delete the original call instruction.
890 CLI.Call->eraseFromParent();
891
892 // Inform the Frame Information that we have a patchpoint in this function.
894
895 if (CLI.NumResultRegs)
897 return true;
898}
899
901 const auto &Triple = TM.getTargetTriple();
903 return true; // don't do anything to this instruction.
906 /*isDef=*/false));
908 /*isDef=*/false));
911 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
912 for (auto &MO : Ops)
913 MIB.add(MO);
914
915 // Insert the Patchable Event Call instruction, that gets lowered properly.
916 return true;
917}
918
920 const auto &Triple = TM.getTargetTriple();
922 return true; // don't do anything to this instruction.
925 /*isDef=*/false));
927 /*isDef=*/false));
929 /*isDef=*/false));
932 TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
933 for (auto &MO : Ops)
934 MIB.add(MO);
935
936 // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
937 return true;
938}
939
940/// Returns an AttributeList representing the attributes applied to the return
941/// value of the given call.
944 if (CLI.RetSExt)
945 Attrs.push_back(Attribute::SExt);
946 if (CLI.RetZExt)
947 Attrs.push_back(Attribute::ZExt);
948 if (CLI.IsInReg)
949 Attrs.push_back(Attribute::InReg);
950
952 Attrs);
953}
954
955bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
956 unsigned NumArgs) {
957 MCContext &Ctx = MF->getContext();
958 SmallString<32> MangledName;
959 Mangler::getNameWithPrefix(MangledName, SymName, DL);
960 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
961 return lowerCallTo(CI, Sym, NumArgs);
962}
963
965 unsigned NumArgs) {
966 FunctionType *FTy = CI->getFunctionType();
967 Type *RetTy = CI->getType();
968
969 ArgListTy Args;
970 Args.reserve(NumArgs);
971
972 // Populate the argument list.
973 // Attributes for args start at offset 1, after the return attribute.
974 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
975 Value *V = CI->getOperand(ArgI);
976
977 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
978
979 ArgListEntry Entry(V);
980 Entry.setAttributes(CI, ArgI);
981 Args.push_back(Entry);
982 }
984
986 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs);
987
988 return lowerCallTo(CLI);
989}
990
992 // Handle the incoming return values from the call.
993 CLI.clearIns();
994 SmallVector<EVT, 4> RetTys;
995 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
996
998 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
999
1000 bool CanLowerReturn = TLI.CanLowerReturn(
1001 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext(), CLI.RetTy);
1002
1003 // FIXME: sret demotion isn't supported yet - bail out.
1004 if (!CanLowerReturn)
1005 return false;
1006
1007 for (EVT VT : RetTys) {
1008 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
1009 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
1010 for (unsigned i = 0; i != NumRegs; ++i) {
1011 ISD::ArgFlagsTy Flags;
1012 if (CLI.RetSExt)
1013 Flags.setSExt();
1014 if (CLI.RetZExt)
1015 Flags.setZExt();
1016 if (CLI.IsInReg)
1017 Flags.setInReg();
1018 ISD::InputArg Ret(Flags, RegisterVT, VT, CLI.RetTy, CLI.IsReturnValueUsed,
1020 CLI.Ins.push_back(Ret);
1021 }
1022 }
1023
1024 // Handle all of the outgoing arguments.
1025 CLI.clearOuts();
1026 for (auto &Arg : CLI.getArgs()) {
1027 Type *FinalType = Arg.Ty;
1028 if (Arg.IsByVal)
1029 FinalType = Arg.IndirectType;
1031 FinalType, CLI.CallConv, CLI.IsVarArg, DL);
1032
1033 ISD::ArgFlagsTy Flags;
1034 if (Arg.IsZExt)
1035 Flags.setZExt();
1036 if (Arg.IsSExt)
1037 Flags.setSExt();
1038 if (Arg.IsInReg)
1039 Flags.setInReg();
1040 if (Arg.IsSRet)
1041 Flags.setSRet();
1042 if (Arg.IsSwiftSelf)
1043 Flags.setSwiftSelf();
1044 if (Arg.IsSwiftAsync)
1045 Flags.setSwiftAsync();
1046 if (Arg.IsSwiftError)
1047 Flags.setSwiftError();
1048 if (Arg.IsCFGuardTarget)
1049 Flags.setCFGuardTarget();
1050 if (Arg.IsByVal)
1051 Flags.setByVal();
1052 if (Arg.IsInAlloca) {
1053 Flags.setInAlloca();
1054 // Set the byval flag for CCAssignFn callbacks that don't know about
1055 // inalloca. This way we can know how many bytes we should've allocated
1056 // and how many bytes a callee cleanup function will pop. If we port
1057 // inalloca to more targets, we'll have to add custom inalloca handling in
1058 // the various CC lowering callbacks.
1059 Flags.setByVal();
1060 }
1061 if (Arg.IsPreallocated) {
1062 Flags.setPreallocated();
1063 // Set the byval flag for CCAssignFn callbacks that don't know about
1064 // preallocated. This way we can know how many bytes we should've
1065 // allocated and how many bytes a callee cleanup function will pop. If we
1066 // port preallocated to more targets, we'll have to add custom
1067 // preallocated handling in the various CC lowering callbacks.
1068 Flags.setByVal();
1069 }
1070 MaybeAlign MemAlign = Arg.Alignment;
1071 if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) {
1072 unsigned FrameSize = DL.getTypeAllocSize(Arg.IndirectType);
1073
1074 // For ByVal, alignment should come from FE. BE will guess if this info
1075 // is not there, but there are cases it cannot get right.
1076 if (!MemAlign)
1077 MemAlign = TLI.getByValTypeAlignment(Arg.IndirectType, DL);
1078 Flags.setByValSize(FrameSize);
1079 } else if (!MemAlign) {
1080 MemAlign = DL.getABITypeAlign(Arg.Ty);
1081 }
1082 Flags.setMemAlign(*MemAlign);
1083 if (Arg.IsNest)
1084 Flags.setNest();
1085 if (NeedsRegBlock)
1086 Flags.setInConsecutiveRegs();
1087 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
1088 CLI.OutVals.push_back(Arg.Val);
1089 CLI.OutFlags.push_back(Flags);
1090 }
1091
1092 if (!fastLowerCall(CLI))
1093 return false;
1094
1095 // Set all unused physreg defs as dead.
1096 assert(CLI.Call && "No call instruction specified.");
1098
1099 if (CLI.NumResultRegs && CLI.CB)
1101
1102 // Set labels for heapallocsite call.
1103 if (CLI.CB)
1104 if (MDNode *MD = CLI.CB->getMetadata("heapallocsite"))
1105 CLI.Call->setHeapAllocMarker(*MF, MD);
1106
1107 return true;
1108}
1109
1111 FunctionType *FuncTy = CI->getFunctionType();
1112 Type *RetTy = CI->getType();
1113
1114 ArgListTy Args;
1115 Args.reserve(CI->arg_size());
1116
1117 for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) {
1118 Value *V = *i;
1119
1120 // Skip empty types
1121 if (V->getType()->isEmptyTy())
1122 continue;
1123
1124 ArgListEntry Entry(V);
1125 // Skip the first return-type Attribute to get to params.
1126 Entry.setAttributes(CI, i - CI->arg_begin());
1127 Args.push_back(Entry);
1128 }
1129
1130 // Check if target-independent constraints permit a tail call here.
1131 // Target-dependent constraints are checked within fastLowerCall.
1132 bool IsTailCall = CI->isTailCall();
1133 if (IsTailCall && !isInTailCallPosition(*CI, TM))
1134 IsTailCall = false;
1135 if (IsTailCall && !CI->isMustTailCall() &&
1136 MF->getFunction().getFnAttribute("disable-tail-calls").getValueAsBool())
1137 IsTailCall = false;
1138
1139 CallLoweringInfo CLI;
1140 CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI)
1141 .setTailCall(IsTailCall);
1142
1143 if (lowerCallTo(CLI)) {
1144 diagnoseDontCall(*CI);
1145 return true;
1146 }
1147
1148 return false;
1149}
1150
1152 const CallInst *Call = cast<CallInst>(I);
1153
1154 // Handle simple inline asms.
1155 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) {
1156 // Don't attempt to handle constraints.
1157 if (!IA->getConstraintString().empty())
1158 return false;
1159
1160 unsigned ExtraInfo = 0;
1161 if (IA->hasSideEffects())
1163 if (IA->isAlignStack())
1164 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1165 if (Call->isConvergent())
1166 ExtraInfo |= InlineAsm::Extra_IsConvergent;
1167 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
1168
1170 TII.get(TargetOpcode::INLINEASM));
1171 MIB.addExternalSymbol(IA->getAsmString().data());
1172 MIB.addImm(ExtraInfo);
1173
1174 const MDNode *SrcLoc = Call->getMetadata("srcloc");
1175 if (SrcLoc)
1176 MIB.addMetadata(SrcLoc);
1177
1178 return true;
1179 }
1180
1181 // Handle intrinsic function calls.
1182 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1183 return selectIntrinsicCall(II);
1184
1185 return lowerCall(Call);
1186}
1187
1189 if (!II->hasDbgRecords())
1190 return;
1191
1192 // Clear any metadata.
1193 MIMD = MIMetadata();
1194
1195 // Reverse order of debug records, because fast-isel walks through backwards.
1196 for (DbgRecord &DR : llvm::reverse(II->getDbgRecordRange())) {
1197 flushLocalValueMap();
1199
1200 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
1201 assert(DLR->getLabel() && "Missing label");
1202 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DLR->getDebugLoc(),
1203 TII.get(TargetOpcode::DBG_LABEL))
1204 .addMetadata(DLR->getLabel());
1205 continue;
1206 }
1207
1208 DbgVariableRecord &DVR = cast<DbgVariableRecord>(DR);
1209
1210 Value *V = nullptr;
1211 if (!DVR.hasArgList())
1212 V = DVR.getVariableLocationOp(0);
1213
1214 bool Res = false;
1217 Res = lowerDbgValue(V, DVR.getExpression(), DVR.getVariable(),
1218 DVR.getDebugLoc());
1219 } else {
1221 if (FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1222 continue;
1223 Res = lowerDbgDeclare(V, DVR.getExpression(), DVR.getVariable(),
1224 DVR.getDebugLoc());
1225 }
1226
1227 if (!Res)
1228 LLVM_DEBUG(dbgs() << "Dropping debug-info for " << DVR << "\n");
1229 }
1230}
1231
1233 DILocalVariable *Var, const DebugLoc &DL) {
1234 // This form of DBG_VALUE is target-independent.
1235 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1236 if (!V || isa<UndefValue>(V)) {
1237 // DI is either undef or cannot produce a valid DBG_VALUE, so produce an
1238 // undef DBG_VALUE to terminate any prior location.
1239 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, false, 0U, Var, Expr);
1240 return true;
1241 }
1242 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1243 // See if there's an expression to constant-fold.
1244 if (Expr)
1245 std::tie(Expr, CI) = Expr->constantFold(CI);
1246 if (CI->getBitWidth() > 64)
1248 .addCImm(CI)
1249 .addImm(0U)
1250 .addMetadata(Var)
1251 .addMetadata(Expr);
1252 else
1254 .addImm(CI->getZExtValue())
1255 .addImm(0U)
1256 .addMetadata(Var)
1257 .addMetadata(Expr);
1258 return true;
1259 }
1260 if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1262 .addFPImm(CF)
1263 .addImm(0U)
1264 .addMetadata(Var)
1265 .addMetadata(Expr);
1266 return true;
1267 }
1268 if (const auto *Arg = dyn_cast<Argument>(V);
1269 Arg && Expr && Expr->isEntryValue()) {
1270 // As per the Verifier, this case is only valid for swift async Args.
1271 assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
1272
1273 Register Reg = getRegForValue(Arg);
1274 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
1275 if (Reg == VirtReg || Reg == PhysReg) {
1276 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, false /*IsIndirect*/,
1277 PhysReg, Var, Expr);
1278 return true;
1279 }
1280
1281 LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
1282 "couldn't find a physical register\n");
1283 return false;
1284 }
1285 if (auto SI = FuncInfo.StaticAllocaMap.find(dyn_cast<AllocaInst>(V));
1286 SI != FuncInfo.StaticAllocaMap.end()) {
1287 MachineOperand FrameIndexOp = MachineOperand::CreateFI(SI->second);
1288 bool IsIndirect = false;
1289 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect, FrameIndexOp,
1290 Var, Expr);
1291 return true;
1292 }
1293 if (Register Reg = lookUpRegForValue(V)) {
1294 // FIXME: This does not handle register-indirect values at offset 0.
1295 if (!FuncInfo.MF->useDebugInstrRef()) {
1296 bool IsIndirect = false;
1297 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect, Reg, Var,
1298 Expr);
1299 return true;
1300 }
1301 // If using instruction referencing, produce this as a DBG_INSTR_REF,
1302 // to be later patched up by finalizeDebugInstrRefs.
1304 /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
1305 /* isKill */ false, /* isDead */ false,
1306 /* isUndef */ false, /* isEarlyClobber */ false,
1307 /* SubReg */ 0, /* isDebug */ true)});
1309 auto *NewExpr = DIExpression::prependOpcodes(Expr, Ops);
1311 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, MOs,
1312 Var, NewExpr);
1313 return true;
1314 }
1315 return false;
1316}
1317
1319 DILocalVariable *Var, const DebugLoc &DL) {
1320 if (!Address || isa<UndefValue>(Address)) {
1321 LLVM_DEBUG(dbgs() << "Dropping debug info (bad/undef address)\n");
1322 return false;
1323 }
1324
1325 std::optional<MachineOperand> Op;
1327 Op = MachineOperand::CreateReg(Reg, false);
1328
1329 // If we have a VLA that has a "use" in a metadata node that's then used
1330 // here but it has no other uses, then we have a problem. E.g.,
1331 //
1332 // int foo (const int *x) {
1333 // char a[*x];
1334 // return 0;
1335 // }
1336 //
1337 // If we assign 'a' a vreg and fast isel later on has to use the selection
1338 // DAG isel, it will want to copy the value to the vreg. However, there are
1339 // no uses, which goes counter to what selection DAG isel expects.
1340 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1341 (!isa<AllocaInst>(Address) ||
1342 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1344 false);
1345
1346 if (Op) {
1348 "Expected inlined-at fields to agree");
1349 if (FuncInfo.MF->useDebugInstrRef() && Op->isReg()) {
1350 // If using instruction referencing, produce this as a DBG_INSTR_REF,
1351 // to be later patched up by finalizeDebugInstrRefs. Tack a deref onto
1352 // the expression, we don't have an "indirect" flag in DBG_INSTR_REF.
1354 {dwarf::DW_OP_LLVM_arg, 0, dwarf::DW_OP_deref});
1355 auto *NewExpr = DIExpression::prependOpcodes(Expr, Ops);
1357 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, *Op,
1358 Var, NewExpr);
1359 return true;
1360 }
1361
1362 // A dbg.declare describes the address of a source variable, so lower it
1363 // into an indirect DBG_VALUE.
1365 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, *Op, Var,
1366 Expr);
1367 return true;
1368 }
1369
1370 // We can't yet handle anything else here because it would require
1371 // generating code, thus altering codegen because of debug info.
1372 LLVM_DEBUG(
1373 dbgs() << "Dropping debug info (no materialized reg for address)\n");
1374 return false;
1375}
1376
1378 switch (II->getIntrinsicID()) {
1379 default:
1380 break;
1381 // At -O0 we don't care about the lifetime intrinsics.
1382 case Intrinsic::lifetime_start:
1383 case Intrinsic::lifetime_end:
1384 // The donothing intrinsic does, well, nothing.
1385 case Intrinsic::donothing:
1386 // Neither does the sideeffect intrinsic.
1387 case Intrinsic::sideeffect:
1388 // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1389 case Intrinsic::assume:
1390 // Neither does the llvm.experimental.noalias.scope.decl intrinsic
1391 case Intrinsic::experimental_noalias_scope_decl:
1392 return true;
1393 case Intrinsic::objectsize:
1394 llvm_unreachable("llvm.objectsize.* should have been lowered already");
1395
1396 case Intrinsic::is_constant:
1397 llvm_unreachable("llvm.is.constant.* should have been lowered already");
1398
1399 case Intrinsic::allow_runtime_check:
1400 case Intrinsic::allow_ubsan_check: {
1401 Register ResultReg = getRegForValue(ConstantInt::getTrue(II->getType()));
1402 if (!ResultReg)
1403 return false;
1404 updateValueMap(II, ResultReg);
1405 return true;
1406 }
1407
1408 case Intrinsic::launder_invariant_group:
1409 case Intrinsic::strip_invariant_group:
1410 case Intrinsic::expect:
1411 case Intrinsic::expect_with_probability: {
1412 Register ResultReg = getRegForValue(II->getArgOperand(0));
1413 if (!ResultReg)
1414 return false;
1415 updateValueMap(II, ResultReg);
1416 return true;
1417 }
1418 case Intrinsic::fake_use:
1419 // At -O0, we don't need fake use, so just ignore it.
1420 return true;
1421 case Intrinsic::experimental_stackmap:
1422 return selectStackmap(II);
1423 case Intrinsic::experimental_patchpoint_void:
1424 case Intrinsic::experimental_patchpoint:
1425 return selectPatchpoint(II);
1426
1427 case Intrinsic::xray_customevent:
1428 return selectXRayCustomEvent(II);
1429 case Intrinsic::xray_typedevent:
1430 return selectXRayTypedEvent(II);
1431 }
1432
1433 return fastLowerIntrinsicCall(II);
1434}
1435
1436bool FastISel::selectCast(const User *I, unsigned Opcode) {
1437 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1438 EVT DstVT = TLI.getValueType(DL, I->getType());
1439
1440 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1441 !DstVT.isSimple())
1442 // Unhandled type. Halt "fast" selection and bail.
1443 return false;
1444
1445 // Check if the destination type is legal.
1446 if (!TLI.isTypeLegal(DstVT))
1447 return false;
1448
1449 // Check if the source operand is legal.
1450 if (!TLI.isTypeLegal(SrcVT))
1451 return false;
1452
1453 Register InputReg = getRegForValue(I->getOperand(0));
1454 if (!InputReg)
1455 // Unhandled operand. Halt "fast" selection and bail.
1456 return false;
1457
1458 Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1459 Opcode, InputReg);
1460 if (!ResultReg)
1461 return false;
1462
1463 updateValueMap(I, ResultReg);
1464 return true;
1465}
1466
1468 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1469 EVT DstEVT = TLI.getValueType(DL, I->getType());
1470 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1471 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1472 // Unhandled type. Halt "fast" selection and bail.
1473 return false;
1474
1475 MVT SrcVT = SrcEVT.getSimpleVT();
1476 MVT DstVT = DstEVT.getSimpleVT();
1477 Register Op0 = getRegForValue(I->getOperand(0));
1478 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1479 return false;
1480
1481 // If the bitcast doesn't change the type, just use the operand value.
1482 if (SrcVT == DstVT) {
1483 updateValueMap(I, Op0);
1484 return true;
1485 }
1486
1487 // Otherwise, select a BITCAST opcode.
1488 Register ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0);
1489 if (!ResultReg)
1490 return false;
1491
1492 updateValueMap(I, ResultReg);
1493 return true;
1494}
1495
1497 Register Reg = getRegForValue(I->getOperand(0));
1498 if (!Reg)
1499 // Unhandled operand.
1500 return false;
1501
1502 EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType());
1503 if (ETy == MVT::Other || !TLI.isTypeLegal(ETy))
1504 // Unhandled type, bail out.
1505 return false;
1506
1507 MVT Ty = ETy.getSimpleVT();
1508 const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty);
1509 Register ResultReg = createResultReg(TyRegClass);
1511 TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
1512
1513 updateValueMap(I, ResultReg);
1514 return true;
1515}
1516
1517// Remove local value instructions starting from the instruction after
1518// SavedLastLocalValue to the current function insert point.
1519void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1520{
1521 MachineInstr *CurLastLocalValue = getLastLocalValue();
1522 if (CurLastLocalValue != SavedLastLocalValue) {
1523 // Find the first local value instruction to be deleted.
1524 // This is the instruction after SavedLastLocalValue if it is non-NULL.
1525 // Otherwise it's the first instruction in the block.
1526 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1527 if (SavedLastLocalValue)
1528 ++FirstDeadInst;
1529 else
1530 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1531 setLastLocalValue(SavedLastLocalValue);
1532 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1533 }
1534}
1535
1537 // Flush the local value map before starting each instruction.
1538 // This improves locality and debugging, and can reduce spills.
1539 // Reuse of values across IR instructions is relatively uncommon.
1540 flushLocalValueMap();
1541
1542 MachineInstr *SavedLastLocalValue = getLastLocalValue();
1543 // Just before the terminator instruction, insert instructions to
1544 // feed PHI nodes in successor blocks.
1545 if (I->isTerminator()) {
1546 if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1547 // PHI node handling may have generated local value instructions,
1548 // even though it failed to handle all PHI nodes.
1549 // We remove these instructions because SelectionDAGISel will generate
1550 // them again.
1551 removeDeadLocalValueCode(SavedLastLocalValue);
1552 return false;
1553 }
1554 }
1555
1556 // FastISel does not handle any operand bundles except OB_funclet.
1557 if (auto *Call = dyn_cast<CallBase>(I))
1558 for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i)
1559 if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1560 return false;
1561
1562 MIMD = MIMetadata(*I);
1563
1564 SavedInsertPt = FuncInfo.InsertPt;
1565
1566 if (const auto *Call = dyn_cast<CallInst>(I)) {
1567 const Function *F = Call->getCalledFunction();
1568 LibFunc Func;
1569
1570 // As a special case, don't handle calls to builtin library functions that
1571 // may be translated directly to target instructions.
1572 if (F && !F->hasLocalLinkage() && F->hasName() &&
1573 LibInfo->getLibFunc(F->getName(), Func) &&
1575 return false;
1576
1577 // Don't handle Intrinsic::trap if a trap function is specified.
1578 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1579 Call->hasFnAttr("trap-func-name"))
1580 return false;
1581 }
1582
1583 // First, try doing target-independent selection.
1585 if (selectOperator(I, I->getOpcode())) {
1586 ++NumFastIselSuccessIndependent;
1587 MIMD = {};
1588 return true;
1589 }
1590 // Remove dead code.
1592 if (SavedInsertPt != FuncInfo.InsertPt)
1593 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1594 SavedInsertPt = FuncInfo.InsertPt;
1595 }
1596 // Next, try calling the target to attempt to handle the instruction.
1597 if (fastSelectInstruction(I)) {
1598 ++NumFastIselSuccessTarget;
1599 MIMD = {};
1600 return true;
1601 }
1602 // Remove dead code.
1604 if (SavedInsertPt != FuncInfo.InsertPt)
1605 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1606
1607 MIMD = {};
1608 // Undo phi node updates, because they will be added again by SelectionDAG.
1609 if (I->isTerminator()) {
1610 // PHI node handling may have generated local value instructions.
1611 // We remove them because SelectionDAGISel will generate them again.
1612 removeDeadLocalValueCode(SavedLastLocalValue);
1614 }
1615 return false;
1616}
1617
1618/// Emit an unconditional branch to the given block, unless it is the immediate
1619/// (fall-through) successor, and update the CFG.
1621 const DebugLoc &DbgLoc) {
1622 const BasicBlock *BB = FuncInfo.MBB->getBasicBlock();
1623 bool BlockHasMultipleInstrs = &BB->front() != &BB->back();
1624 if (BlockHasMultipleInstrs && FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1625 // For more accurate line information if this is the only non-debug
1626 // instruction in the block then emit it, otherwise we have the
1627 // unconditional fall-through case, which needs no instructions.
1628 } else {
1629 // The unconditional branch case.
1630 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1632 }
1633 if (FuncInfo.BPI) {
1637 } else
1639}
1640
1642 MachineBasicBlock *TrueMBB,
1643 MachineBasicBlock *FalseMBB) {
1644 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1645 // happen in degenerate IR and MachineIR forbids to have a block twice in the
1646 // successor/predecessor lists.
1647 if (TrueMBB != FalseMBB) {
1648 if (FuncInfo.BPI) {
1649 auto BranchProbability =
1650 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1652 } else
1654 }
1655
1656 fastEmitBranch(FalseMBB, MIMD.getDL());
1657}
1658
1659/// Emit an FNeg operation.
1660bool FastISel::selectFNeg(const User *I, const Value *In) {
1661 Register OpReg = getRegForValue(In);
1662 if (!OpReg)
1663 return false;
1664
1665 // If the target has ISD::FNEG, use it.
1666 EVT VT = TLI.getValueType(DL, I->getType());
1667 Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1668 OpReg);
1669 if (ResultReg) {
1670 updateValueMap(I, ResultReg);
1671 return true;
1672 }
1673
1674 // Bitcast the value to integer, twiddle the sign bit with xor,
1675 // and then bitcast it back to floating-point.
1676 if (VT.getSizeInBits() > 64)
1677 return false;
1678 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1679 if (!TLI.isTypeLegal(IntVT))
1680 return false;
1681
1682 Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1683 ISD::BITCAST, OpReg);
1684 if (!IntReg)
1685 return false;
1686
1687 Register IntResultReg = fastEmit_ri_(
1688 IntVT.getSimpleVT(), ISD::XOR, IntReg,
1689 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1690 if (!IntResultReg)
1691 return false;
1692
1693 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1694 IntResultReg);
1695 if (!ResultReg)
1696 return false;
1697
1698 updateValueMap(I, ResultReg);
1699 return true;
1700}
1701
1703 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1704 if (!EVI)
1705 return false;
1706
1707 // Make sure we only try to handle extracts with a legal result. But also
1708 // allow i1 because it's easy.
1709 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1710 if (!RealVT.isSimple())
1711 return false;
1712 MVT VT = RealVT.getSimpleVT();
1713 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1714 return false;
1715
1716 const Value *Op0 = EVI->getOperand(0);
1717 Type *AggTy = Op0->getType();
1718
1719 // Get the base result register.
1720 Register ResultReg;
1722 if (I != FuncInfo.ValueMap.end())
1723 ResultReg = I->second;
1724 else if (isa<Instruction>(Op0))
1725 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1726 else
1727 return false; // fast-isel can't handle aggregate constants at the moment
1728
1729 // Get the actual result register, which is an offset from the base register.
1730 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1731
1732 SmallVector<EVT, 4> AggValueVTs;
1733 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1734
1735 for (unsigned i = 0; i < VTIndex; i++)
1736 ResultReg = ResultReg.id() +
1737 TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1738
1739 updateValueMap(EVI, ResultReg);
1740 return true;
1741}
1742
1743bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1744 switch (Opcode) {
1745 case Instruction::Add:
1746 return selectBinaryOp(I, ISD::ADD);
1747 case Instruction::FAdd:
1748 return selectBinaryOp(I, ISD::FADD);
1749 case Instruction::Sub:
1750 return selectBinaryOp(I, ISD::SUB);
1751 case Instruction::FSub:
1752 return selectBinaryOp(I, ISD::FSUB);
1753 case Instruction::Mul:
1754 return selectBinaryOp(I, ISD::MUL);
1755 case Instruction::FMul:
1756 return selectBinaryOp(I, ISD::FMUL);
1757 case Instruction::SDiv:
1758 return selectBinaryOp(I, ISD::SDIV);
1759 case Instruction::UDiv:
1760 return selectBinaryOp(I, ISD::UDIV);
1761 case Instruction::FDiv:
1762 return selectBinaryOp(I, ISD::FDIV);
1763 case Instruction::SRem:
1764 return selectBinaryOp(I, ISD::SREM);
1765 case Instruction::URem:
1766 return selectBinaryOp(I, ISD::UREM);
1767 case Instruction::FRem:
1768 return selectBinaryOp(I, ISD::FREM);
1769 case Instruction::Shl:
1770 return selectBinaryOp(I, ISD::SHL);
1771 case Instruction::LShr:
1772 return selectBinaryOp(I, ISD::SRL);
1773 case Instruction::AShr:
1774 return selectBinaryOp(I, ISD::SRA);
1775 case Instruction::And:
1776 return selectBinaryOp(I, ISD::AND);
1777 case Instruction::Or:
1778 return selectBinaryOp(I, ISD::OR);
1779 case Instruction::Xor:
1780 return selectBinaryOp(I, ISD::XOR);
1781
1782 case Instruction::FNeg:
1783 return selectFNeg(I, I->getOperand(0));
1784
1785 case Instruction::GetElementPtr:
1786 return selectGetElementPtr(I);
1787
1788 case Instruction::Br: {
1789 const BranchInst *BI = cast<BranchInst>(I);
1790
1791 if (BI->isUnconditional()) {
1792 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1793 MachineBasicBlock *MSucc = FuncInfo.getMBB(LLVMSucc);
1794 fastEmitBranch(MSucc, BI->getDebugLoc());
1795 return true;
1796 }
1797
1798 // Conditional branches are not handed yet.
1799 // Halt "fast" selection and bail.
1800 return false;
1801 }
1802
1803 case Instruction::Unreachable: {
1804 auto UI = cast<UnreachableInst>(I);
1805 if (!UI->shouldLowerToTrap(TM.Options.TrapUnreachable,
1807 return true;
1808
1809 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1810 }
1811
1812 case Instruction::Alloca:
1813 // FunctionLowering has the static-sized case covered.
1814 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1815 return true;
1816
1817 // Dynamic-sized alloca is not handled yet.
1818 return false;
1819
1820 case Instruction::Call:
1821 // On AIX, normal call lowering uses the DAG-ISEL path currently so that the
1822 // callee of the direct function call instruction will be mapped to the
1823 // symbol for the function's entry point, which is distinct from the
1824 // function descriptor symbol. The latter is the symbol whose XCOFF symbol
1825 // name is the C-linkage name of the source level function.
1826 // But fast isel still has the ability to do selection for intrinsics.
1827 if (TM.getTargetTriple().isOSAIX() && !isa<IntrinsicInst>(I))
1828 return false;
1829 return selectCall(I);
1830
1831 case Instruction::BitCast:
1832 return selectBitCast(I);
1833
1834 case Instruction::FPToSI:
1835 return selectCast(I, ISD::FP_TO_SINT);
1836 case Instruction::ZExt:
1837 return selectCast(I, ISD::ZERO_EXTEND);
1838 case Instruction::SExt:
1839 return selectCast(I, ISD::SIGN_EXTEND);
1840 case Instruction::Trunc:
1841 return selectCast(I, ISD::TRUNCATE);
1842 case Instruction::SIToFP:
1843 return selectCast(I, ISD::SINT_TO_FP);
1844
1845 case Instruction::IntToPtr: // Deliberate fall-through.
1846 case Instruction::PtrToInt: {
1847 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1848 EVT DstVT = TLI.getValueType(DL, I->getType());
1849 if (DstVT.bitsGT(SrcVT))
1850 return selectCast(I, ISD::ZERO_EXTEND);
1851 if (DstVT.bitsLT(SrcVT))
1852 return selectCast(I, ISD::TRUNCATE);
1853 Register Reg = getRegForValue(I->getOperand(0));
1854 if (!Reg)
1855 return false;
1856 updateValueMap(I, Reg);
1857 return true;
1858 }
1859
1860 case Instruction::ExtractValue:
1861 return selectExtractValue(I);
1862
1863 case Instruction::Freeze:
1864 return selectFreeze(I);
1865
1866 case Instruction::PHI:
1867 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1868
1869 default:
1870 // Unhandled instruction. Halt "fast" selection and bail.
1871 return false;
1872 }
1873}
1874
1878 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1879 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1880 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1881 TII(*MF->getSubtarget().getInstrInfo()),
1882 TLI(*MF->getSubtarget().getTargetLowering()),
1883 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1885
1886FastISel::~FastISel() = default;
1887
1888bool FastISel::fastLowerArguments() { return false; }
1889
1890bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1891
1893 return false;
1894}
1895
1897
1899 return Register();
1900}
1901
1903 Register /*Op1*/) {
1904 return Register();
1905}
1906
1908 return Register();
1909}
1910
1912 const ConstantFP * /*FPImm*/) {
1913 return Register();
1914}
1915
1917 uint64_t /*Imm*/) {
1918 return Register();
1919}
1920
1921/// This method is a wrapper of fastEmit_ri. It first tries to emit an
1922/// instruction with an immediate operand using fastEmit_ri.
1923/// If that fails, it materializes the immediate into a register and try
1924/// fastEmit_rr instead.
1926 uint64_t Imm, MVT ImmType) {
1927 // If this is a multiply by a power of two, emit this as a shift left.
1928 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1929 Opcode = ISD::SHL;
1930 Imm = Log2_64(Imm);
1931 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1932 // div x, 8 -> srl x, 3
1933 Opcode = ISD::SRL;
1934 Imm = Log2_64(Imm);
1935 }
1936
1937 // Horrible hack (to be removed), check to make sure shift amounts are
1938 // in-range.
1939 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1940 Imm >= VT.getSizeInBits())
1941 return Register();
1942
1943 // First check if immediate type is legal. If not, we can't use the ri form.
1944 Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm);
1945 if (ResultReg)
1946 return ResultReg;
1947 Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1948 if (!MaterialReg) {
1949 // This is a bit ugly/slow, but failing here means falling out of
1950 // fast-isel, which would be very slow.
1951 IntegerType *ITy =
1953 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1954 if (!MaterialReg)
1955 return Register();
1956 }
1957 return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
1958}
1959
1961 return MRI.createVirtualRegister(RC);
1962}
1963
1965 unsigned OpNum) {
1966 if (Op.isVirtual()) {
1967 const TargetRegisterClass *RegClass =
1968 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1969 if (!MRI.constrainRegClass(Op, RegClass)) {
1970 // If it's not legal to COPY between the register classes, something
1971 // has gone very wrong before we got here.
1972 Register NewOp = createResultReg(RegClass);
1974 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1975 return NewOp;
1976 }
1977 }
1978 return Op;
1979}
1980
1981Register FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1982 const TargetRegisterClass *RC) {
1983 Register ResultReg = createResultReg(RC);
1984 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1985
1986 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg);
1987 return ResultReg;
1988}
1989
1990Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1991 const TargetRegisterClass *RC, Register Op0) {
1992 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1993
1994 Register ResultReg = createResultReg(RC);
1995 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1996
1997 if (II.getNumDefs() >= 1)
1998 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
1999 .addReg(Op0);
2000 else {
2002 .addReg(Op0);
2003 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2004 ResultReg)
2005 .addReg(II.implicit_defs()[0]);
2006 }
2007
2008 return ResultReg;
2009}
2010
2011Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2012 const TargetRegisterClass *RC, Register Op0,
2013 Register Op1) {
2014 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2015
2016 Register ResultReg = createResultReg(RC);
2017 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2018 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2019
2020 if (II.getNumDefs() >= 1)
2021 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2022 .addReg(Op0)
2023 .addReg(Op1);
2024 else {
2026 .addReg(Op0)
2027 .addReg(Op1);
2028 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2029 ResultReg)
2030 .addReg(II.implicit_defs()[0]);
2031 }
2032 return ResultReg;
2033}
2034
2035Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
2036 const TargetRegisterClass *RC, Register Op0,
2037 Register Op1, Register Op2) {
2038 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2039
2040 Register ResultReg = createResultReg(RC);
2041 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2042 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2043 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
2044
2045 if (II.getNumDefs() >= 1)
2046 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2047 .addReg(Op0)
2048 .addReg(Op1)
2049 .addReg(Op2);
2050 else {
2052 .addReg(Op0)
2053 .addReg(Op1)
2054 .addReg(Op2);
2055 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2056 ResultReg)
2057 .addReg(II.implicit_defs()[0]);
2058 }
2059 return ResultReg;
2060}
2061
2062Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
2063 const TargetRegisterClass *RC, Register Op0,
2064 uint64_t Imm) {
2065 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2066
2067 Register ResultReg = createResultReg(RC);
2068 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2069
2070 if (II.getNumDefs() >= 1)
2071 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2072 .addReg(Op0)
2073 .addImm(Imm);
2074 else {
2076 .addReg(Op0)
2077 .addImm(Imm);
2078 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2079 ResultReg)
2080 .addReg(II.implicit_defs()[0]);
2081 }
2082 return ResultReg;
2083}
2084
2085Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
2086 const TargetRegisterClass *RC, Register Op0,
2087 uint64_t Imm1, uint64_t Imm2) {
2088 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2089
2090 Register ResultReg = createResultReg(RC);
2091 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2092
2093 if (II.getNumDefs() >= 1)
2094 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2095 .addReg(Op0)
2096 .addImm(Imm1)
2097 .addImm(Imm2);
2098 else {
2100 .addReg(Op0)
2101 .addImm(Imm1)
2102 .addImm(Imm2);
2103 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2104 ResultReg)
2105 .addReg(II.implicit_defs()[0]);
2106 }
2107 return ResultReg;
2108}
2109
2110Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
2111 const TargetRegisterClass *RC,
2112 const ConstantFP *FPImm) {
2113 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2114
2115 Register ResultReg = createResultReg(RC);
2116
2117 if (II.getNumDefs() >= 1)
2118 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2119 .addFPImm(FPImm);
2120 else {
2122 .addFPImm(FPImm);
2123 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2124 ResultReg)
2125 .addReg(II.implicit_defs()[0]);
2126 }
2127 return ResultReg;
2128}
2129
2130Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
2131 const TargetRegisterClass *RC, Register Op0,
2132 Register Op1, uint64_t Imm) {
2133 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2134
2135 Register ResultReg = createResultReg(RC);
2136 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2137 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2138
2139 if (II.getNumDefs() >= 1)
2140 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2141 .addReg(Op0)
2142 .addReg(Op1)
2143 .addImm(Imm);
2144 else {
2146 .addReg(Op0)
2147 .addReg(Op1)
2148 .addImm(Imm);
2149 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2150 ResultReg)
2151 .addReg(II.implicit_defs()[0]);
2152 }
2153 return ResultReg;
2154}
2155
2156Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
2157 const TargetRegisterClass *RC, uint64_t Imm) {
2158 Register ResultReg = createResultReg(RC);
2159 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2160
2161 if (II.getNumDefs() >= 1)
2162 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2163 .addImm(Imm);
2164 else {
2166 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2167 ResultReg)
2168 .addReg(II.implicit_defs()[0]);
2169 }
2170 return ResultReg;
2171}
2172
2174 uint32_t Idx) {
2175 Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2176 assert(Op0.isVirtual() && "Cannot yet extract from physregs");
2177 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2179 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2180 ResultReg).addReg(Op0, 0, Idx);
2181 return ResultReg;
2182}
2183
2184/// Emit MachineInstrs to compute the value of Op with all but the least
2185/// significant bit set to zero.
2187 return fastEmit_ri(VT, VT, ISD::AND, Op0, 1);
2188}
2189
2190/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2191/// Emit code to ensure constants are copied into registers when needed.
2192/// Remember the virtual registers that need to be added to the Machine PHI
2193/// nodes as input. We cannot just directly add them, because expansion
2194/// might result in multiple MBB's for one BB. As such, the start of the
2195/// BB might correspond to a different MBB than the end.
2196bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2199
2200 // Check successor nodes' PHI nodes that expect a constant to be available
2201 // from this block.
2202 for (const BasicBlock *SuccBB : successors(LLVMBB)) {
2203 if (!isa<PHINode>(SuccBB->begin()))
2204 continue;
2205 MachineBasicBlock *SuccMBB = FuncInfo.getMBB(SuccBB);
2206
2207 // If this terminator has multiple identical successors (common for
2208 // switches), only handle each succ once.
2209 if (!SuccsHandled.insert(SuccMBB).second)
2210 continue;
2211
2213
2214 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2215 // nodes and Machine PHI nodes, but the incoming operands have not been
2216 // emitted yet.
2217 for (const PHINode &PN : SuccBB->phis()) {
2218 // Ignore dead phi's.
2219 if (PN.use_empty())
2220 continue;
2221
2222 // Only handle legal types. Two interesting things to note here. First,
2223 // by bailing out early, we may leave behind some dead instructions,
2224 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2225 // own moves. Second, this check is necessary because FastISel doesn't
2226 // use CreateRegs to create registers, so it always creates
2227 // exactly one register for each non-void instruction.
2228 EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
2229 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2230 // Handle integer promotions, though, because they're common and easy.
2231 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2233 return false;
2234 }
2235 }
2236
2237 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2238
2239 // Set the DebugLoc for the copy. Use the location of the operand if
2240 // there is one; otherwise no location, flushLocalValueMap will fix it.
2241 MIMD = {};
2242 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2243 MIMD = MIMetadata(*Inst);
2244
2245 Register Reg = getRegForValue(PHIOp);
2246 if (!Reg) {
2248 return false;
2249 }
2250 FuncInfo.PHINodesToUpdate.emplace_back(&*MBBI++, Reg);
2251 MIMD = {};
2252 }
2253 }
2254
2255 return true;
2256}
2257
2258bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2259 assert(LI->hasOneUse() &&
2260 "tryToFoldLoad expected a LoadInst with a single use");
2261 // We know that the load has a single use, but don't know what it is. If it
2262 // isn't one of the folded instructions, then we can't succeed here. Handle
2263 // this by scanning the single-use users of the load until we get to FoldInst.
2264 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2265
2266 const Instruction *TheUser = LI->user_back();
2267 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2268 // Stay in the right block.
2269 TheUser->getParent() == FoldInst->getParent() &&
2270 --MaxUsers) { // Don't scan too far.
2271 // If there are multiple or no uses of this instruction, then bail out.
2272 if (!TheUser->hasOneUse())
2273 return false;
2274
2275 TheUser = TheUser->user_back();
2276 }
2277
2278 // If we didn't find the fold instruction, then we failed to collapse the
2279 // sequence.
2280 if (TheUser != FoldInst)
2281 return false;
2282
2283 // Don't try to fold volatile loads. Target has to deal with alignment
2284 // constraints.
2285 if (LI->isVolatile())
2286 return false;
2287
2288 // Figure out which vreg this is going into. If there is no assigned vreg yet
2289 // then there actually was no reference to it. Perhaps the load is referenced
2290 // by a dead instruction.
2291 Register LoadReg = getRegForValue(LI);
2292 if (!LoadReg)
2293 return false;
2294
2295 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2296 // may mean that the instruction got lowered to multiple MIs, or the use of
2297 // the loaded value ended up being multiple operands of the result.
2298 if (!MRI.hasOneUse(LoadReg))
2299 return false;
2300
2301 // If the register has fixups, there may be additional uses through a
2302 // different alias of the register.
2303 if (FuncInfo.RegsWithFixups.contains(LoadReg))
2304 return false;
2305
2307 MachineInstr *User = RI->getParent();
2308
2309 // Set the insertion point properly. Folding the load can cause generation of
2310 // other random instructions (like sign extends) for addressing modes; make
2311 // sure they get inserted in a logical place before the new instruction.
2313 FuncInfo.MBB = User->getParent();
2314
2315 // Ask the target to try folding the load.
2316 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2317}
2318
2320 // Must be an add.
2321 if (!isa<AddOperator>(Add))
2322 return false;
2323 // Type size needs to match.
2324 if (DL.getTypeSizeInBits(GEP->getType()) !=
2325 DL.getTypeSizeInBits(Add->getType()))
2326 return false;
2327 // Must be in the same basic block.
2328 if (isa<Instruction>(Add) &&
2329 FuncInfo.getMBB(cast<Instruction>(Add)->getParent()) != FuncInfo.MBB)
2330 return false;
2331 // Must have a constant operand.
2332 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2333}
2334
2337 const Value *Ptr;
2338 Type *ValTy;
2339 MaybeAlign Alignment;
2341 bool IsVolatile;
2342
2343 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2344 Alignment = LI->getAlign();
2345 IsVolatile = LI->isVolatile();
2347 Ptr = LI->getPointerOperand();
2348 ValTy = LI->getType();
2349 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2350 Alignment = SI->getAlign();
2351 IsVolatile = SI->isVolatile();
2353 Ptr = SI->getPointerOperand();
2354 ValTy = SI->getValueOperand()->getType();
2355 } else
2356 return nullptr;
2357
2358 bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal);
2359 bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load);
2360 bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable);
2361 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2362
2363 AAMDNodes AAInfo = I->getAAMetadata();
2364
2365 if (!Alignment) // Ensure that codegen never sees alignment 0.
2366 Alignment = DL.getABITypeAlign(ValTy);
2367
2368 unsigned Size = DL.getTypeStoreSize(ValTy);
2369
2370 if (IsVolatile)
2372 if (IsNonTemporal)
2374 if (IsDereferenceable)
2376 if (IsInvariant)
2378
2380 *Alignment, AAInfo, Ranges);
2381}
2382
2384 // If both operands are the same, then try to optimize or fold the cmp.
2385 CmpInst::Predicate Predicate = CI->getPredicate();
2386 if (CI->getOperand(0) != CI->getOperand(1))
2387 return Predicate;
2388
2389 switch (Predicate) {
2390 default: llvm_unreachable("Invalid predicate!");
2391 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2392 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2393 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2394 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2395 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2396 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2397 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2398 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2399 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2400 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2401 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2402 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2403 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2404 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2405 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2406 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2407
2408 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2409 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2410 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2411 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2412 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2413 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2414 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2415 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2416 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2417 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2418 }
2419
2420 return Predicate;
2421}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
static Register findLocalRegDef(MachineInstr &MI)
Return the defined register if this instruction defines exactly one virtual register and uses no othe...
Definition: FastISel.cpp:161
static bool isRegUsedByPhiNodes(Register DefReg, FunctionLoweringInfo &FuncInfo)
Definition: FastISel.cpp:178
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
Definition: FastISel.cpp:942
This file defines the FastISel class.
Hexagon Common GEP
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
#define P(N)
static bool isCommutative(Instruction *I, Value *ValWithUses)
This file defines the SmallPtrSet class.
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
#define LLVM_DEBUG(...)
Definition: Debug.h:119
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
An arbitrary precision integer that knows its signedness.
Definition: APSInt.h:24
This class represents an incoming formal argument to a Function.
Definition: Argument.h:32
static LLVM_ABI AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
LLVM_ABI bool getValueAsBool() const
Return the attribute's value as a boolean.
Definition: Attributes.cpp:386
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
const Instruction & front() const
Definition: BasicBlock.h:482
const Instruction & back() const
Definition: BasicBlock.h:484
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1406
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1267
Value * getCalledOperand() const
Definition: InstrTypes.h:1340
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1292
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1273
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1205
unsigned arg_size() const
Definition: InstrTypes.h:1290
This class represents a function call, abstracting a target machine's calling convention.
bool isTailCall() const
bool isMustTailCall() const
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:666
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:678
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:681
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:695
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:707
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:708
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:684
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:693
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:682
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:683
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:702
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:701
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:705
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:692
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:686
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:689
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:703
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:690
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:685
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:687
@ ICMP_EQ
equal
Definition: InstrTypes.h:699
@ ICMP_NE
not equal
Definition: InstrTypes.h:700
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:706
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:694
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:704
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:691
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:680
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:688
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:767
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:277
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:868
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
DWARF expression.
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
LLVM_ABI std::pair< DIExpression *, const ConstantInt * > constantFold(const ConstantInt *CI)
Try to shorten an expression with an initial constant operand.
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:708
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Definition: DataLayout.cpp:850
LLVM_ABI Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:842
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:504
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:674
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition: DataLayout.h:468
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
A debug info location.
Definition: DebugLoc.h:124
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
MachineRegisterInfo & MRI
Definition: FastISel.h:205
const TargetLibraryInfo * LibInfo
Definition: FastISel.h:214
const DataLayout & DL
Definition: FastISel.h:210
bool selectGetElementPtr(const User *I)
Definition: FastISel.cpp:531
void setLastLocalValue(MachineInstr *I)
Update the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:237
bool selectStackmap(const CallInst *I)
Definition: FastISel.cpp:643
Register fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
Definition: FastISel.cpp:2062
bool selectExtractValue(const User *U)
Definition: FastISel.cpp:1702
DenseMap< const Value *, Register > LocalValueMap
Definition: FastISel.h:202
void fastEmitBranch(MachineBasicBlock *MSucc, const DebugLoc &DbgLoc)
Emit an unconditional branch to the given block, unless it is the immediate (fall-through) successor,...
Definition: FastISel.cpp:1620
MachineInstr * EmitStartPt
The top most instruction in the current block that is allowed for emitting local variables.
Definition: FastISel.h:226
bool selectXRayCustomEvent(const CallInst *II)
Definition: FastISel.cpp:900
virtual Register fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, Register Op0)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1898
Register fastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass *RC)
Emit a MachineInstr with no operands and a result register in the given register class.
Definition: FastISel.cpp:1981
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, Register Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
Definition: FastISel.cpp:2011
virtual Register fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, Register Op0, Register Op1)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1902
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering.
Definition: FastISel.cpp:1892
virtual bool lowerDbgDeclare(const Value *V, DIExpression *Expr, DILocalVariable *Var, const DebugLoc &DL)
Target-independent lowering of debug information.
Definition: FastISel.cpp:1318
MachineInstr * getLastLocalValue()
Return the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:233
bool lowerCall(const CallInst *I)
Definition: FastISel.cpp:1110
void leaveLocalValueArea(SavePoint Old)
Reset InsertPt to the given old insert position.
Definition: FastISel.cpp:436
virtual Register fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Definition: FastISel.h:473
Register fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, Register Op1, Register Op2)
Emit a MachineInstr with three register operands and a result register in the given register class.
Definition: FastISel.cpp:2035
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs)
Definition: FastISel.cpp:964
virtual Register fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1907
virtual Register fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode, const ConstantFP *FPImm)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1911
void handleDbgInfo(const Instruction *II)
Target-independent lowering of non-instruction debug info associated with this instruction.
Definition: FastISel.cpp:1188
bool selectFreeze(const User *I)
Definition: FastISel.cpp:1496
bool selectIntrinsicCall(const IntrinsicInst *II)
Definition: FastISel.cpp:1377
Register getRegForGEPIndex(MVT PtrVT, const Value *Idx)
This is a wrapper around getRegForValue that also takes care of truncating or sign-extending the give...
Definition: FastISel.cpp:384
bool selectCast(const User *I, unsigned Opcode)
Definition: FastISel.cpp:1436
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We're checking to see if we can fold LI into FoldInst.
Definition: FastISel.cpp:2258
Register getRegForValue(const Value *V)
Create a virtual register and arrange for it to be assigned the value for the given LLVM value.
Definition: FastISel.cpp:239
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition: FastISel.cpp:410
virtual Register fastMaterializeFloatZero(const ConstantFP *CF)
Emit the floating-point constant +0.0 in a register using target- specific logic.
Definition: FastISel.h:484
void startNewBlock()
Set the current block to which generated machine instructions will be appended.
Definition: FastISel.cpp:123
MachineMemOperand * createMachineMemOperandFor(const Instruction *I) const
Create a machine mem operand from the given instruction.
Definition: FastISel.cpp:2336
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Definition: FastISel.h:300
Register fastEmitInst_i(unsigned MachineInstOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
Definition: FastISel.cpp:2156
Register fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with one register operand and two immediate operands.
Definition: FastISel.cpp:2085
MachineFrameInfo & MFI
Definition: FastISel.h:206
MachineFunction * MF
Definition: FastISel.h:204
bool canFoldAddIntoGEP(const User *GEP, const Value *Add)
Check if Add is an add that can be safely folded into GEP.
Definition: FastISel.cpp:2319
virtual bool lowerDbgValue(const Value *V, DIExpression *Expr, DILocalVariable *Var, const DebugLoc &DL)
Target-independent lowering of debug information.
Definition: FastISel.cpp:1232
TargetLoweringBase::ArgListTy ArgListTy
Definition: FastISel.h:69
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
Definition: FastISel.cpp:1536
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
Definition: FastISel.cpp:1890
bool selectXRayTypedEvent(const CallInst *II)
Definition: FastISel.cpp:919
virtual Register fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
Definition: FastISel.h:478
Register fastEmitZExtFromI1(MVT VT, Register Op0)
Emit MachineInstrs to compute the value of Op with all but the least significant bit set to zero.
Definition: FastISel.cpp:2186
Register createResultReg(const TargetRegisterClass *RC)
Definition: FastISel.cpp:1960
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
Definition: FastISel.cpp:1888
bool selectFNeg(const User *I, const Value *In)
Emit an FNeg operation.
Definition: FastISel.cpp:1660
const TargetInstrInfo & TII
Definition: FastISel.h:211
bool selectCall(const User *I)
Definition: FastISel.cpp:1151
Register lookUpRegForValue(const Value *V)
Look up the value to see if its value is already cached in a register.
Definition: FastISel.cpp:352
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const
Definition: FastISel.cpp:2383
virtual Register fastEmit_(MVT VT, MVT RetVT, unsigned Opcode)
This method is called by target-independent code to request that an instruction with the given type a...
Definition: FastISel.cpp:1896
void finishBasicBlock()
Flush the local value map.
Definition: FastISel.cpp:136
Register fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0)
Emit a MachineInstr with one register operand and a result register in the given register class.
Definition: FastISel.cpp:1990
Register fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, Register Op1, uint64_t Imm)
Emit a MachineInstr with two register operands, an immediate, and a result register in the given regi...
Definition: FastISel.cpp:2130
FunctionLoweringInfo & FuncInfo
Definition: FastISel.h:203
MachineConstantPool & MCP
Definition: FastISel.h:207
bool selectOperator(const User *I, unsigned Opcode)
Do "fast" instruction selection for the given LLVM IR operator (Instruction or ConstantExpr),...
Definition: FastISel.cpp:1743
bool SkipTargetIndependentISel
Definition: FastISel.h:215
Register fastEmitInst_f(unsigned MachineInstOpcode, const TargetRegisterClass *RC, const ConstantFP *FPImm)
Emit a MachineInstr with a floating point immediate, and a result register in the given register clas...
Definition: FastISel.cpp:2110
Register constrainOperandRegClass(const MCInstrDesc &II, Register Op, unsigned OpNum)
Try to constrain Op so that it is usable by argument OpNum of the provided MCInstrDesc.
Definition: FastISel.cpp:1964
Register fastEmitInst_extractsubreg(MVT RetVT, Register Op0, uint32_t Idx)
Emit a MachineInstr for an extract_subreg from a specified index of a superregister to a specified ty...
Definition: FastISel.cpp:2173
void updateValueMap(const Value *I, Register Reg, unsigned NumRegs=1)
Update the value map to include the new mapping for this instruction, or insert an extra copy to get ...
Definition: FastISel.cpp:363
bool selectBinaryOp(const User *I, unsigned ISDOpcode)
Select and emit code for a binary operator instruction, which has an opcode which directly correspond...
Definition: FastISel.cpp:444
FastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo, bool SkipTargetIndependentISel=false)
Definition: FastISel.cpp:1875
bool selectPatchpoint(const CallInst *I)
Definition: FastISel.cpp:753
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition: FastISel.cpp:401
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
const TargetLowering & TLI
Definition: FastISel.h:212
virtual Register fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, Register Op0, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1916
const TargetMachine & TM
Definition: FastISel.h:209
MIMetadata MIMD
Definition: FastISel.h:208
MachineInstr * LastLocalValue
The position of the last instruction for materializing constants for use in the current block.
Definition: FastISel.h:221
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition: FastISel.cpp:138
SavePoint enterLocalValueArea()
Prepare InsertPt to begin inserting instructions into the local value area and return the old insert ...
Definition: FastISel.cpp:430
void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB)
Emit an unconditional branch to FalseMBB, obtains the branch weight and adds TrueMBB and FalseMBB to ...
Definition: FastISel.cpp:1641
bool selectBitCast(const User *I)
Definition: FastISel.cpp:1467
virtual ~FastISel()
Register fastEmit_ri_(MVT VT, unsigned Opcode, Register Op0, uint64_t Imm, MVT ImmType)
This method is a wrapper of fastEmit_ri.
Definition: FastISel.cpp:1925
const TargetRegisterInfo & TRI
Definition: FastISel.h:213
TargetLoweringBase::ArgListEntry ArgListEntry
Definition: FastISel.h:68
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
SmallPtrSet< const DbgVariableRecord *, 8 > PreprocessedDVRDeclares
Collection of dbg_declare instructions handled after argument lowering and before ISel proper.
MachineBasicBlock * getMBB(const BasicBlock *BB) const
DenseSet< Register > RegsWithFixups
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
Register InitializeRegForValue(const Value *V)
DenseMap< const Value *, Register > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
MachineBasicBlock * MBB
MBB - The current block.
DenseMap< Register, Register > RegFixups
RegFixups - Registers which need to be replaced after isel is done.
std::vector< std::pair< MachineInstr *, Register > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
MachineRegisterInfo * RegInfo
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
Class to represent function types.
Definition: DerivedTypes.h:105
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:762
arg_iterator arg_end()
Definition: Function.h:875
arg_iterator arg_begin()
Definition: Function.h:866
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:359
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:513
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:171
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:428
Class to represent integer types.
Definition: DerivedTypes.h:42
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:319
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:49
An instruction for reading from memory.
Definition: Instructions.h:180
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:209
Context object for machine code objects.
Definition: MCContext.h:83
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:203
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:199
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:238
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:64
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
Metadata node.
Definition: Metadata.h:1077
Set of metadata that should be preserved when using BuildMI().
const DebugLoc & getDL() const
Machine Value Type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
LLVM_ABI void addSuccessorWithoutProb(MachineBasicBlock *Succ)
Add Succ as a successor of this MachineBasicBlock.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
LLVM_ABI bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
MachineInstrBundleIterator< MachineInstr > iterator
void setHasPatchPoint(bool s=true)
void setHasStackMap(bool s=true)
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
Representation of each machine instruction.
Definition: MachineInstr.h:72
LLVM_ABI void setHeapAllocMarker(MachineFunction &MF, MDNode *MD)
Set a marker on instructions that denotes where we should create and emit heap alloc site labels.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateRegMask(const uint32_t *Mask)
CreateRegMask - Creates a register mask operand referencing Mask.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
reg_begin/reg_end - Provide iteration support to walk over all definitions and uses of a register wit...
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
reg_iterator reg_begin(Register RegNo) const
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasOneUse(Register RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
ArrayRef< std::pair< MCRegister, Register > > liveins() const
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
LLVM_ABI void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable's name.
Definition: Mangler.cpp:121
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:74
constexpr unsigned id() const
Definition: Register.h:95
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:401
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
TypeSize getElementOffset(unsigned Idx) const
Definition: DataLayout.h:657
Class to represent struct types.
Definition: DerivedTypes.h:218
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
unsigned getCallFrameDestroyOpcode() const
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
MVT getSimpleValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the MVT corresponding to this LLVM type. See getValueType.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
const Triple & getTargetTriple() const
TargetOptions Options
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
virtual const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const
Returns the largest legal sub-class of RC that supports the sub-register index Idx.
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:47
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:408
bool isOSAIX() const
Tests whether the OS is AIX.
Definition: Triple.h:757
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition: Triple.h:995
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
Value * getOperand(unsigned i) const
Definition: User.h:232
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:439
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:194
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition: DenseSet.h:169
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:174
const ParentTy * getParent() const
Definition: ilist_node.h:34
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ ConstantFP
Definition: ISDOpcodes.h:87
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:259
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:862
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:410
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:975
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:826
@ FNEG
Perform various unary floating-point operations inspired by libm.
Definition: ISDOpcodes.h:1002
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:756
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:832
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:908
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:730
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1318
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:838
bool isBitwiseLogicOp(unsigned Opcode)
Whether this is bitwise logic opcode.
Definition: ISDOpcodes.h:1572
Reg
All possible values of the reg field in the ModR/M byte.
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition: Dwarf.h:148
LLVM_ABI const_iterator begin(StringRef path LLVM_LIFETIME_BOUND, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:226
LLVM_ABI reverse_iterator rend(StringRef path LLVM_LIFETIME_BOUND)
Get reverse end iterator over path.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:663
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:293
gep_type_iterator gep_type_end(const User *GEP)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:342
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
@ Add
Sum of integers.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:543
DWARFExpression::Operation Op
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:119
gep_type_iterator gep_type_begin(const User *GEP)
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition: Analysis.cpp:33
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
static constexpr roundingMode rmTowardZero
Definition: APFloat.h:308
Extended Value Type.
Definition: ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:137
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition: ValueTypes.h:279
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:295
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:299
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:311
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:65
SmallVector< ISD::ArgFlagsTy, 16 > OutFlags
Definition: FastISel.h:95
SmallVector< Value *, 16 > OutVals
Definition: FastISel.h:94
SmallVector< Register, 16 > OutRegs
Definition: FastISel.h:96
CallLoweringInfo & setTailCall(bool Value=true)
Definition: FastISel.h:177
SmallVector< Register, 4 > InRegs
Definition: FastISel.h:98
CallLoweringInfo & setIsPatchPoint(bool Value=true)
Definition: FastISel.h:182
CallLoweringInfo & setCallee(Type *ResultTy, FunctionType *FuncTy, const Value *Target, ArgListTy &&ArgsList, const CallBase &Call)
Definition: FastISel.h:104
SmallVector< ISD::InputArg, 4 > Ins
Definition: FastISel.h:97
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
static const unsigned NoArgIndex
Sentinel value for implicit machine-level input arguments.
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117