LLVM 22.0.0git
MachineInstr.cpp
Go to the documentation of this file.
1//===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Methods common to all machine instructions.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/Hashing.h"
16#include "llvm/ADT/STLExtras.h"
38#include "llvm/IR/Constants.h"
40#include "llvm/IR/DebugLoc.h"
41#include "llvm/IR/Function.h"
42#include "llvm/IR/InlineAsm.h"
44#include "llvm/IR/LLVMContext.h"
45#include "llvm/IR/Metadata.h"
46#include "llvm/IR/Module.h"
48#include "llvm/IR/Operator.h"
49#include "llvm/MC/MCInstrDesc.h"
53#include "llvm/Support/Debug.h"
58#include <algorithm>
59#include <cassert>
60#include <cstdint>
61#include <cstring>
62#include <utility>
63
64using namespace llvm;
65
66static cl::opt<bool>
67 PrintMIAddrs("print-mi-addrs", cl::Hidden,
68 cl::desc("Print addresses of MachineInstrs when dumping"));
69
71 if (const MachineBasicBlock *MBB = MI.getParent())
72 if (const MachineFunction *MF = MBB->getParent())
73 return MF;
74 return nullptr;
75}
76
77// Try to crawl up to the machine function and get TRI/MRI/TII from it.
79 const TargetRegisterInfo *&TRI,
81 const TargetInstrInfo *&TII) {
82
83 if (const MachineFunction *MF = getMFIfAvailable(MI)) {
84 TRI = MF->getSubtarget().getRegisterInfo();
85 MRI = &MF->getRegInfo();
86 TII = MF->getSubtarget().getInstrInfo();
87 }
88}
89
91 for (MCPhysReg ImpDef : MCID->implicit_defs())
92 addOperand(MF, MachineOperand::CreateReg(ImpDef, true, true));
93 for (MCPhysReg ImpUse : MCID->implicit_uses())
94 addOperand(MF, MachineOperand::CreateReg(ImpUse, false, true));
95}
96
97/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
98/// implicit operands. It reserves space for the number of operands specified by
99/// the MCInstrDesc.
100MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &TID,
101 DebugLoc DL, bool NoImp)
102 : MCID(&TID), NumOperands(0), Flags(0), AsmPrinterFlags(0),
103 DbgLoc(std::move(DL)), DebugInstrNum(0), Opcode(TID.Opcode) {
104 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
105
106 // Reserve space for the expected number of operands.
107 if (unsigned NumOps = MCID->getNumOperands() + MCID->implicit_defs().size() +
108 MCID->implicit_uses().size()) {
109 CapOperands = OperandCapacity::get(NumOps);
110 Operands = MF.allocateOperandArray(CapOperands);
111 }
112
113 if (!NoImp)
115}
116
117/// MachineInstr ctor - Copies MachineInstr arg exactly.
118/// Does not copy the number from debug instruction numbering, to preserve
119/// uniqueness.
120MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
121 : MCID(&MI.getDesc()), NumOperands(0), Flags(0), AsmPrinterFlags(0),
122 Info(MI.Info), DbgLoc(MI.getDebugLoc()), DebugInstrNum(0),
123 Opcode(MI.getOpcode()) {
124 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
125
126 CapOperands = OperandCapacity::get(MI.getNumOperands());
127 Operands = MF.allocateOperandArray(CapOperands);
128
129 // Copy operands.
130 for (const MachineOperand &MO : MI.operands())
131 addOperand(MF, MO);
132
133 // Replicate ties between the operands, which addOperand was not
134 // able to do reliably.
135 for (unsigned i = 0, e = getNumOperands(); i < e; ++i) {
136 MachineOperand &NewMO = getOperand(i);
137 const MachineOperand &OrigMO = MI.getOperand(i);
138 NewMO.TiedTo = OrigMO.TiedTo;
139 }
140
141 // Copy all the sensible flags.
142 setFlags(MI.Flags);
143}
144
146 if (getParent())
147 getMF()->handleChangeDesc(*this, TID);
148 MCID = &TID;
149 Opcode = TID.Opcode;
150}
151
152void MachineInstr::moveBefore(MachineInstr *MovePos) {
153 MovePos->getParent()->splice(MovePos, getParent(), getIterator());
154}
155
156/// getRegInfo - If this instruction is embedded into a MachineFunction,
157/// return the MachineRegisterInfo object for the current function, otherwise
158/// return null.
159MachineRegisterInfo *MachineInstr::getRegInfo() {
161 return &MBB->getParent()->getRegInfo();
162 return nullptr;
163}
164
165const MachineRegisterInfo *MachineInstr::getRegInfo() const {
166 if (const MachineBasicBlock *MBB = getParent())
167 return &MBB->getParent()->getRegInfo();
168 return nullptr;
169}
170
171void MachineInstr::removeRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
172 for (MachineOperand &MO : operands())
173 if (MO.isReg())
174 MRI.removeRegOperandFromUseList(&MO);
175}
176
177void MachineInstr::addRegOperandsToUseLists(MachineRegisterInfo &MRI) {
178 for (MachineOperand &MO : operands())
179 if (MO.isReg())
180 MRI.addRegOperandToUseList(&MO);
181}
182
185 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
186 MachineFunction *MF = MBB->getParent();
187 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
188 addOperand(*MF, Op);
189}
190
191/// Move NumOps MachineOperands from Src to Dst, with support for overlapping
192/// ranges. If MRI is non-null also update use-def chains.
194 unsigned NumOps, MachineRegisterInfo *MRI) {
195 if (MRI)
196 return MRI->moveOperands(Dst, Src, NumOps);
197 // MachineOperand is a trivially copyable type so we can just use memmove.
198 assert(Dst && Src && "Unknown operands");
199 std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
200}
201
202/// addOperand - Add the specified operand to the instruction. If it is an
203/// implicit operand, it is added to the end of the operand list. If it is
204/// an explicit operand it is added at the end of the explicit operand list
205/// (before the first implicit operand).
207 assert(isUInt<LLVM_MI_NUMOPERANDS_BITS>(NumOperands + 1) &&
208 "Cannot add more operands.");
209 assert(MCID && "Cannot add operands before providing an instr descriptor");
210
211 // Check if we're adding one of our existing operands.
212 if (&Op >= Operands && &Op < Operands + NumOperands) {
213 // This is unusual: MI->addOperand(MI->getOperand(i)).
214 // If adding Op requires reallocating or moving existing operands around,
215 // the Op reference could go stale. Support it by copying Op.
216 MachineOperand CopyOp(Op);
217 return addOperand(MF, CopyOp);
218 }
219
220 // Find the insert location for the new operand. Implicit registers go at
221 // the end, everything else goes before the implicit regs.
222 //
223 // FIXME: Allow mixed explicit and implicit operands on inline asm.
224 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
225 // implicit-defs, but they must not be moved around. See the FIXME in
226 // InstrEmitter.cpp.
227 unsigned OpNo = getNumOperands();
228 bool isImpReg = Op.isReg() && Op.isImplicit();
229 if (!isImpReg && !isInlineAsm()) {
230 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
231 --OpNo;
232 assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
233 }
234 }
235
236 // OpNo now points as the desired insertion point. Unless this is a variadic
237 // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
238 // RegMask operands go between the explicit and implicit operands.
239 MachineRegisterInfo *MRI = getRegInfo();
240
241 // Determine if the Operands array needs to be reallocated.
242 // Save the old capacity and operand array.
243 OperandCapacity OldCap = CapOperands;
244 MachineOperand *OldOperands = Operands;
245 if (!OldOperands || OldCap.getSize() == getNumOperands()) {
246 CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
247 Operands = MF.allocateOperandArray(CapOperands);
248 // Move the operands before the insertion point.
249 if (OpNo)
250 moveOperands(Operands, OldOperands, OpNo, MRI);
251 }
252
253 // Move the operands following the insertion point.
254 if (OpNo != NumOperands)
255 moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
256 MRI);
257 ++NumOperands;
258
259 // Deallocate the old operand array.
260 if (OldOperands != Operands && OldOperands)
261 MF.deallocateOperandArray(OldCap, OldOperands);
262
263 // Copy Op into place. It still needs to be inserted into the MRI use lists.
264 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
265 NewMO->ParentMI = this;
266
267 // When adding a register operand, tell MRI about it.
268 if (NewMO->isReg()) {
269 // Ensure isOnRegUseList() returns false, regardless of Op's status.
270 NewMO->Contents.Reg.Prev = nullptr;
271 // Ignore existing ties. This is not a property that can be copied.
272 NewMO->TiedTo = 0;
273 // Add the new operand to MRI, but only for instructions in an MBB.
274 if (MRI)
275 MRI->addRegOperandToUseList(NewMO);
276 // The MCID operand information isn't accurate until we start adding
277 // explicit operands. The implicit operands are added first, then the
278 // explicits are inserted before them.
279 if (!isImpReg) {
280 // Tie uses to defs as indicated in MCInstrDesc.
281 if (NewMO->isUse()) {
282 int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
283 if (DefIdx != -1)
284 tieOperands(DefIdx, OpNo);
285 }
286 // If the register operand is flagged as early, mark the operand as such.
287 if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
288 NewMO->setIsEarlyClobber(true);
289 }
290 // Ensure debug instructions set debug flag on register uses.
291 if (NewMO->isUse() && isDebugInstr())
292 NewMO->setIsDebug();
293 }
294}
295
296void MachineInstr::removeOperand(unsigned OpNo) {
297 assert(OpNo < getNumOperands() && "Invalid operand number");
298 untieRegOperand(OpNo);
299
300#ifndef NDEBUG
301 // Moving tied operands would break the ties.
302 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
303 if (Operands[i].isReg())
304 assert(!Operands[i].isTied() && "Cannot move tied operands");
305#endif
306
307 MachineRegisterInfo *MRI = getRegInfo();
308 if (MRI && Operands[OpNo].isReg())
309 MRI->removeRegOperandFromUseList(Operands + OpNo);
310
311 // Don't call the MachineOperand destructor. A lot of this code depends on
312 // MachineOperand having a trivial destructor anyway, and adding a call here
313 // wouldn't make it 'destructor-correct'.
314
315 if (unsigned N = NumOperands - 1 - OpNo)
316 moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
317 --NumOperands;
318}
319
320void MachineInstr::setExtraInfo(MachineFunction &MF,
322 MCSymbol *PreInstrSymbol,
323 MCSymbol *PostInstrSymbol,
324 MDNode *HeapAllocMarker, MDNode *PCSections,
325 uint32_t CFIType, MDNode *MMRAs) {
326 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
327 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
328 bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
329 bool HasPCSections = PCSections != nullptr;
330 bool HasCFIType = CFIType != 0;
331 bool HasMMRAs = MMRAs != nullptr;
332 int NumPointers = MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol +
333 HasHeapAllocMarker + HasPCSections + HasCFIType + HasMMRAs;
334
335 // Drop all extra info if there is none.
336 if (NumPointers <= 0) {
337 Info.clear();
338 return;
339 }
340
341 // If more than one pointer, then store out of line. Store heap alloc markers
342 // out of line because PointerSumType cannot hold more than 4 tag types with
343 // 32-bit pointers.
344 // FIXME: Maybe we should make the symbols in the extra info mutable?
345 else if (NumPointers > 1 || HasMMRAs || HasHeapAllocMarker || HasPCSections ||
346 HasCFIType) {
347 Info.set<EIIK_OutOfLine>(
348 MF.createMIExtraInfo(MMOs, PreInstrSymbol, PostInstrSymbol,
349 HeapAllocMarker, PCSections, CFIType, MMRAs));
350 return;
351 }
352
353 // Otherwise store the single pointer inline.
354 if (HasPreInstrSymbol)
355 Info.set<EIIK_PreInstrSymbol>(PreInstrSymbol);
356 else if (HasPostInstrSymbol)
357 Info.set<EIIK_PostInstrSymbol>(PostInstrSymbol);
358 else
359 Info.set<EIIK_MMO>(MMOs[0]);
360}
361
363 if (memoperands_empty())
364 return;
365
366 setExtraInfo(MF, {}, getPreInstrSymbol(), getPostInstrSymbol(),
369}
370
373 if (MMOs.empty()) {
374 dropMemRefs(MF);
375 return;
376 }
377
378 setExtraInfo(MF, MMOs, getPreInstrSymbol(), getPostInstrSymbol(),
381}
382
390
391void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
392 if (this == &MI)
393 // Nothing to do for a self-clone!
394 return;
395
396 assert(&MF == MI.getMF() &&
397 "Invalid machine functions when cloning memory refrences!");
398 // See if we can just steal the extra info already allocated for the
399 // instruction. We can do this whenever the pre- and post-instruction symbols
400 // are the same (including null).
401 if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
402 getPostInstrSymbol() == MI.getPostInstrSymbol() &&
403 getHeapAllocMarker() == MI.getHeapAllocMarker() &&
404 getPCSections() == MI.getPCSections() && getMMRAMetadata() &&
405 MI.getMMRAMetadata()) {
406 Info = MI.Info;
407 return;
408 }
409
410 // Otherwise, fall back on a copy-based clone.
411 setMemRefs(MF, MI.memoperands());
412}
413
414/// Check to see if the MMOs pointed to by the two MemRefs arrays are
415/// identical.
418 if (LHS.size() != RHS.size())
419 return false;
420
421 auto LHSPointees = make_pointee_range(LHS);
422 auto RHSPointees = make_pointee_range(RHS);
423 return std::equal(LHSPointees.begin(), LHSPointees.end(),
424 RHSPointees.begin());
425}
426
429 // Try handling easy numbers of MIs with simpler mechanisms.
430 if (MIs.empty()) {
431 dropMemRefs(MF);
432 return;
433 }
434 if (MIs.size() == 1) {
435 cloneMemRefs(MF, *MIs[0]);
436 return;
437 }
438 // Because an empty memoperands list provides *no* information and must be
439 // handled conservatively (assuming the instruction can do anything), the only
440 // way to merge with it is to drop all other memoperands.
441 if (MIs[0]->memoperands_empty()) {
442 dropMemRefs(MF);
443 return;
444 }
445
446 // Handle the general case.
448 // Start with the first instruction.
449 assert(&MF == MIs[0]->getMF() &&
450 "Invalid machine functions when cloning memory references!");
451 MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
452 // Now walk all the other instructions and accumulate any different MMOs.
453 for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
454 assert(&MF == MI.getMF() &&
455 "Invalid machine functions when cloning memory references!");
456
457 // Skip MIs with identical operands to the first. This is a somewhat
458 // arbitrary hack but will catch common cases without being quadratic.
459 // TODO: We could fully implement merge semantics here if needed.
460 if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
461 continue;
462
463 // Because an empty memoperands list provides *no* information and must be
464 // handled conservatively (assuming the instruction can do anything), the
465 // only way to merge with it is to drop all other memoperands.
466 if (MI.memoperands_empty()) {
467 dropMemRefs(MF);
468 return;
469 }
470
471 // Otherwise accumulate these into our temporary buffer of the merged state.
472 MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
473 }
474
475 setMemRefs(MF, MergedMMOs);
476}
477
479 // Do nothing if old and new symbols are the same.
480 if (Symbol == getPreInstrSymbol())
481 return;
482
483 // If there was only one symbol and we're removing it, just clear info.
484 if (!Symbol && Info.is<EIIK_PreInstrSymbol>()) {
485 Info.clear();
486 return;
487 }
488
489 setExtraInfo(MF, memoperands(), Symbol, getPostInstrSymbol(),
492}
493
495 // Do nothing if old and new symbols are the same.
496 if (Symbol == getPostInstrSymbol())
497 return;
498
499 // If there was only one symbol and we're removing it, just clear info.
500 if (!Symbol && Info.is<EIIK_PostInstrSymbol>()) {
501 Info.clear();
502 return;
503 }
504
505 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), Symbol,
508}
509
511 // Do nothing if old and new symbols are the same.
512 if (Marker == getHeapAllocMarker())
513 return;
514
515 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
517}
518
520 // Do nothing if old and new symbols are the same.
521 if (PCSections == getPCSections())
522 return;
523
524 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
525 getHeapAllocMarker(), PCSections, getCFIType(),
527}
528
530 // Do nothing if old and new types are the same.
531 if (Type == getCFIType())
532 return;
533
534 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
536}
537
539 // Do nothing if old and new symbols are the same.
540 if (MMRAs == getMMRAMetadata())
541 return;
542
543 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
545}
546
548 const MachineInstr &MI) {
549 if (this == &MI)
550 // Nothing to do for a self-clone!
551 return;
552
553 assert(&MF == MI.getMF() &&
554 "Invalid machine functions when cloning instruction symbols!");
555
556 setPreInstrSymbol(MF, MI.getPreInstrSymbol());
557 setPostInstrSymbol(MF, MI.getPostInstrSymbol());
558 setHeapAllocMarker(MF, MI.getHeapAllocMarker());
559 setPCSections(MF, MI.getPCSections());
560 setMMRAMetadata(MF, MI.getMMRAMetadata());
561}
562
563uint32_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
564 // For now, the just return the union of the flags. If the flags get more
565 // complicated over time, we might need more logic here.
566 return getFlags() | Other.getFlags();
567}
568
570 uint32_t MIFlags = 0;
571 // Copy the wrapping flags.
572 if (const OverflowingBinaryOperator *OB =
574 if (OB->hasNoSignedWrap())
576 if (OB->hasNoUnsignedWrap())
578 } else if (const TruncInst *TI = dyn_cast<TruncInst>(&I)) {
579 if (TI->hasNoSignedWrap())
581 if (TI->hasNoUnsignedWrap())
583 } else if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
584 if (GEP->hasNoUnsignedSignedWrap())
586 if (GEP->hasNoUnsignedWrap())
588 if (GEP->isInBounds())
590 }
591
592 // Copy the nonneg flag.
594 if (PNI->hasNonNeg())
596 // Copy the disjoint flag.
597 } else if (const PossiblyDisjointInst *PD =
599 if (PD->isDisjoint())
601 }
602
603 // Copy the samesign flag.
604 if (const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I))
605 if (ICmp->hasSameSign())
607
608 // Copy the exact flag.
610 if (PE->isExact())
612
613 // Copy the fast-math flags.
615 const FastMathFlags Flags = FP->getFastMathFlags();
616 if (Flags.noNaNs())
618 if (Flags.noInfs())
620 if (Flags.noSignedZeros())
622 if (Flags.allowReciprocal())
624 if (Flags.allowContract())
626 if (Flags.approxFunc())
628 if (Flags.allowReassoc())
630 }
631
632 if (I.getMetadata(LLVMContext::MD_unpredictable))
634
635 return MIFlags;
636}
637
641
642bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
643 assert(!isBundledWithPred() && "Must be called on bundle header");
645 if (MII->getDesc().getFlags() & Mask) {
646 if (Type == AnyInBundle)
647 return true;
648 } else {
649 if (Type == AllInBundle && !MII->isBundle())
650 return false;
651 }
652 // This was the last instruction in the bundle.
653 if (!MII->isBundledWithSucc())
654 return Type == AllInBundle;
655 }
656}
657
658bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
659 MICheckType Check) const {
660 // If opcodes or number of operands are not the same then the two
661 // instructions are obviously not identical.
662 if (Other.getOpcode() != getOpcode() ||
663 Other.getNumOperands() != getNumOperands())
664 return false;
665
666 if (isBundle()) {
667 // We have passed the test above that both instructions have the same
668 // opcode, so we know that both instructions are bundles here. Let's compare
669 // MIs inside the bundle.
670 assert(Other.isBundle() && "Expected that both instructions are bundles.");
673 // Loop until we analysed the last intruction inside at least one of the
674 // bundles.
675 while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
676 ++I1;
677 ++I2;
678 if (!I1->isIdenticalTo(*I2, Check))
679 return false;
680 }
681 // If we've reached the end of just one of the two bundles, but not both,
682 // the instructions are not identical.
683 if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
684 return false;
685 }
686
687 // Check operands to make sure they match.
688 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
689 const MachineOperand &MO = getOperand(i);
690 const MachineOperand &OMO = Other.getOperand(i);
691 if (!MO.isReg()) {
692 if (!MO.isIdenticalTo(OMO))
693 return false;
694 continue;
695 }
696
697 // Clients may or may not want to ignore defs when testing for equality.
698 // For example, machine CSE pass only cares about finding common
699 // subexpressions, so it's safe to ignore virtual register defs.
700 if (MO.isDef()) {
701 if (Check == IgnoreDefs)
702 continue;
703 else if (Check == IgnoreVRegDefs) {
704 if (!MO.getReg().isVirtual() || !OMO.getReg().isVirtual())
705 if (!MO.isIdenticalTo(OMO))
706 return false;
707 } else {
708 if (!MO.isIdenticalTo(OMO))
709 return false;
710 if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
711 return false;
712 }
713 } else {
714 if (!MO.isIdenticalTo(OMO))
715 return false;
716 if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
717 return false;
718 }
719 }
720 // If DebugLoc does not match then two debug instructions are not identical.
721 if (isDebugInstr())
722 if (getDebugLoc() && Other.getDebugLoc() &&
723 getDebugLoc() != Other.getDebugLoc())
724 return false;
725 // If pre- or post-instruction symbols do not match then the two instructions
726 // are not identical.
727 if (getPreInstrSymbol() != Other.getPreInstrSymbol() ||
728 getPostInstrSymbol() != Other.getPostInstrSymbol())
729 return false;
730 // Call instructions with different CFI types are not identical.
731 if (isCall() && getCFIType() != Other.getCFIType())
732 return false;
733
734 return true;
735}
736
737bool MachineInstr::isEquivalentDbgInstr(const MachineInstr &Other) const {
738 if (!isDebugValueLike() || !Other.isDebugValueLike())
739 return false;
740 if (getDebugLoc() != Other.getDebugLoc())
741 return false;
742 if (getDebugVariable() != Other.getDebugVariable())
743 return false;
744 if (getNumDebugOperands() != Other.getNumDebugOperands())
745 return false;
746 for (unsigned OpIdx = 0; OpIdx < getNumDebugOperands(); ++OpIdx)
747 if (!getDebugOperand(OpIdx).isIdenticalTo(Other.getDebugOperand(OpIdx)))
748 return false;
751 Other.getDebugExpression(), Other.isIndirectDebugValue()))
752 return false;
753 return true;
754}
755
757 return getParent()->getParent();
758}
759
761 assert(getParent() && "Not embedded in a basic block!");
762 return getParent()->remove(this);
763}
764
766 assert(getParent() && "Not embedded in a basic block!");
767 return getParent()->remove_instr(this);
768}
769
771 assert(getParent() && "Not embedded in a basic block!");
772 getParent()->erase(this);
773}
774
776 assert(getParent() && "Not embedded in a basic block!");
777 getParent()->erase_instr(this);
778}
779
781 if (!isCall(Type))
782 return false;
783 switch (getOpcode()) {
784 case TargetOpcode::PATCHPOINT:
785 case TargetOpcode::STACKMAP:
786 case TargetOpcode::STATEPOINT:
787 case TargetOpcode::FENTRY_CALL:
788 return false;
789 }
790 return true;
791}
792
798
799template <typename Operand, typename Instruction>
800static iterator_range<
801 filter_iterator<Operand *, std::function<bool(Operand &Op)>>>
803 std::function<bool(Operand & Op)> OpUsesReg(
804 [Reg](Operand &Op) { return Op.isReg() && Op.getReg() == Reg; });
805 return make_filter_range(MI->debug_operands(), OpUsesReg);
806}
807
809 std::function<bool(const MachineOperand &Op)>>>
814
820
822 unsigned NumOperands = MCID->getNumOperands();
823 if (!MCID->isVariadic())
824 return NumOperands;
825
826 for (const MachineOperand &MO : operands_impl().drop_front(NumOperands)) {
827 // The operands must always be in the following order:
828 // - explicit reg defs,
829 // - other explicit operands (reg uses, immediates, etc.),
830 // - implicit reg defs
831 // - implicit reg uses
832 if (MO.isReg() && MO.isImplicit())
833 break;
834 ++NumOperands;
835 }
836 return NumOperands;
837}
838
840 unsigned NumDefs = MCID->getNumDefs();
841 if (!MCID->isVariadic())
842 return NumDefs;
843
844 for (const MachineOperand &MO : operands_impl().drop_front(NumDefs)) {
845 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
846 break;
847 ++NumDefs;
848 }
849 return NumDefs;
850}
851
853 assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
856 --Pred;
857 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
858 Pred->setFlag(BundledSucc);
859}
860
862 assert(!isBundledWithSucc() && "MI is already bundled with its successor");
865 ++Succ;
866 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
867 Succ->setFlag(BundledPred);
868}
869
871 assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
874 --Pred;
875 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
876 Pred->clearFlag(BundledSucc);
877}
878
880 assert(isBundledWithSucc() && "MI isn't bundled with its successor");
883 ++Succ;
884 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
885 Succ->clearFlag(BundledPred);
886}
887
889 if (isInlineAsm()) {
890 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
891 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
892 return true;
893 }
894 return false;
895}
896
898 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
899 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
900 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
901}
902
904 unsigned *GroupNo) const {
905 assert(isInlineAsm() && "Expected an inline asm instruction");
906 assert(OpIdx < getNumOperands() && "OpIdx out of range");
907
908 // Ignore queries about the initial operands.
910 return -1;
911
912 unsigned Group = 0;
913 unsigned NumOps;
914 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
915 i += NumOps) {
916 const MachineOperand &FlagMO = getOperand(i);
917 // If we reach the implicit register operands, stop looking.
918 if (!FlagMO.isImm())
919 return -1;
920 const InlineAsm::Flag F(FlagMO.getImm());
921 NumOps = 1 + F.getNumOperandRegisters();
922 if (i + NumOps > OpIdx) {
923 if (GroupNo)
924 *GroupNo = Group;
925 return i;
926 }
927 ++Group;
928 }
929 return -1;
930}
931
933 assert(isDebugLabel() && "not a DBG_LABEL");
934 return cast<DILabel>(getOperand(0).getMetadata());
935}
936
938 assert((isDebugValueLike()) && "not a DBG_VALUE*");
939 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
940 return getOperand(VariableOp);
941}
942
944 assert((isDebugValueLike()) && "not a DBG_VALUE*");
945 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
946 return getOperand(VariableOp);
947}
948
952
954 assert((isDebugValueLike()) && "not a DBG_VALUE*");
955 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
956 return getOperand(ExpressionOp);
957}
958
960 assert((isDebugValueLike()) && "not a DBG_VALUE*");
961 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
962 return getOperand(ExpressionOp);
963}
964
968
972
975 const TargetInstrInfo *TII,
976 const TargetRegisterInfo *TRI) const {
977 assert(getParent() && "Can't have an MBB reference here!");
978 assert(getMF() && "Can't have an MF reference here!");
979 // Most opcodes have fixed constraints in their MCInstrDesc.
980 if (!isInlineAsm())
981 return TII->getRegClass(getDesc(), OpIdx, TRI);
982
983 if (!getOperand(OpIdx).isReg())
984 return nullptr;
985
986 // For tied uses on inline asm, get the constraint from the def.
987 unsigned DefIdx;
988 if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
989 OpIdx = DefIdx;
990
991 // Inline asm stores register class constraints in the flag word.
992 int FlagIdx = findInlineAsmFlagIdx(OpIdx);
993 if (FlagIdx < 0)
994 return nullptr;
995
996 const InlineAsm::Flag F(getOperand(FlagIdx).getImm());
997 unsigned RCID;
998 if ((F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind()) &&
999 F.hasRegClassConstraint(RCID))
1000 return TRI->getRegClass(RCID);
1001
1002 // Assume that all registers in a memory operand are pointers.
1003 if (F.isMemKind())
1004 return TRI->getPointerRegClass();
1005
1006 return nullptr;
1007}
1008
1010 Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
1011 const TargetRegisterInfo *TRI, bool ExploreBundle) const {
1012 // Check every operands inside the bundle if we have
1013 // been asked to.
1014 if (ExploreBundle)
1015 for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
1016 ++OpndIt)
1017 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
1018 OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
1019 else
1020 // Otherwise, just check the current operands.
1021 for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
1022 CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
1023 return CurRC;
1024}
1025
1026const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
1027 unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
1028 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1029 assert(CurRC && "Invalid initial register class");
1030 // Check if Reg is constrained by some of its use/def from MI.
1031 const MachineOperand &MO = getOperand(OpIdx);
1032 if (!MO.isReg() || MO.getReg() != Reg)
1033 return CurRC;
1034 // If yes, accumulate the constraints through the operand.
1035 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
1036}
1037
1039 unsigned OpIdx, const TargetRegisterClass *CurRC,
1040 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1042 const MachineOperand &MO = getOperand(OpIdx);
1043 assert(MO.isReg() &&
1044 "Cannot get register constraints for non-register operand");
1045 assert(CurRC && "Invalid initial register class");
1046 if (unsigned SubIdx = MO.getSubReg()) {
1047 if (OpRC)
1048 CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
1049 else
1050 CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
1051 } else if (OpRC)
1052 CurRC = TRI->getCommonSubClass(CurRC, OpRC);
1053 return CurRC;
1054}
1055
1056/// Return the number of instructions inside the MI bundle, not counting the
1057/// header instruction.
1060 unsigned Size = 0;
1061 while (I->isBundledWithSucc()) {
1062 ++Size;
1063 ++I;
1064 }
1065 return Size;
1066}
1067
1068/// Returns true if the MachineInstr has an implicit-use operand of exactly
1069/// the given register (not considering sub/super-registers).
1071 for (const MachineOperand &MO : implicit_operands()) {
1072 if (MO.isReg() && MO.isUse() && MO.getReg() == Reg)
1073 return true;
1074 }
1075 return false;
1076}
1077
1078/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
1079/// the specific register or -1 if it is not found. It further tightens
1080/// the search criteria to a use that kills the register if isKill is true.
1082 const TargetRegisterInfo *TRI,
1083 bool isKill) const {
1084 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1085 const MachineOperand &MO = getOperand(i);
1086 if (!MO.isReg() || !MO.isUse())
1087 continue;
1088 Register MOReg = MO.getReg();
1089 if (!MOReg)
1090 continue;
1091 if (MOReg == Reg || (TRI && Reg && MOReg && TRI->regsOverlap(MOReg, Reg)))
1092 if (!isKill || MO.isKill())
1093 return i;
1094 }
1095 return -1;
1096}
1097
1098/// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
1099/// indicating if this instruction reads or writes Reg. This also considers
1100/// partial defines.
1101std::pair<bool,bool>
1104 bool PartDef = false; // Partial redefine.
1105 bool FullDef = false; // Full define.
1106 bool Use = false;
1107
1108 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1109 const MachineOperand &MO = getOperand(i);
1110 if (!MO.isReg() || MO.getReg() != Reg)
1111 continue;
1112 if (Ops)
1113 Ops->push_back(i);
1114 if (MO.isUse())
1115 Use |= !MO.isUndef();
1116 else if (MO.getSubReg() && !MO.isUndef())
1117 // A partial def undef doesn't count as reading the register.
1118 PartDef = true;
1119 else
1120 FullDef = true;
1121 }
1122 // A partial redefine uses Reg unless there is also a full define.
1123 return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
1124}
1125
1126/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
1127/// the specified register or -1 if it is not found. If isDead is true, defs
1128/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
1129/// also checks if there is a def of a super-register.
1131 const TargetRegisterInfo *TRI,
1132 bool isDead, bool Overlap) const {
1133 bool isPhys = Reg.isPhysical();
1134 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1135 const MachineOperand &MO = getOperand(i);
1136 // Accept regmask operands when Overlap is set.
1137 // Ignore them when looking for a specific def operand (Overlap == false).
1138 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
1139 return i;
1140 if (!MO.isReg() || !MO.isDef())
1141 continue;
1142 Register MOReg = MO.getReg();
1143 bool Found = (MOReg == Reg);
1144 if (!Found && TRI && isPhys && MOReg.isPhysical()) {
1145 if (Overlap)
1146 Found = TRI->regsOverlap(MOReg, Reg);
1147 else
1148 Found = TRI->isSubRegister(MOReg, Reg);
1149 }
1150 if (Found && (!isDead || MO.isDead()))
1151 return i;
1152 }
1153 return -1;
1154}
1155
1156/// findFirstPredOperandIdx() - Find the index of the first operand in the
1157/// operand list that is used to represent the predicate. It returns -1 if
1158/// none is found.
1160 // Don't call MCID.findFirstPredOperandIdx() because this variant
1161 // is sometimes called on an instruction that's not yet complete, and
1162 // so the number of operands is less than the MCID indicates. In
1163 // particular, the PTX target does this.
1164 const MCInstrDesc &MCID = getDesc();
1165 if (MCID.isPredicable()) {
1166 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1167 if (MCID.operands()[i].isPredicate())
1168 return i;
1169 }
1170
1171 return -1;
1172}
1173
1174// MachineOperand::TiedTo is 4 bits wide.
1175const unsigned TiedMax = 15;
1176
1177/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
1178///
1179/// Use and def operands can be tied together, indicated by a non-zero TiedTo
1180/// field. TiedTo can have these values:
1181///
1182/// 0: Operand is not tied to anything.
1183/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
1184/// TiedMax: Tied to an operand >= TiedMax-1.
1185///
1186/// The tied def must be one of the first TiedMax operands on a normal
1187/// instruction. INLINEASM instructions allow more tied defs.
1188///
1189void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1190 MachineOperand &DefMO = getOperand(DefIdx);
1191 MachineOperand &UseMO = getOperand(UseIdx);
1192 assert(DefMO.isDef() && "DefIdx must be a def operand");
1193 assert(UseMO.isUse() && "UseIdx must be a use operand");
1194 assert(!DefMO.isTied() && "Def is already tied to another use");
1195 assert(!UseMO.isTied() && "Use is already tied to another def");
1196
1197 if (DefIdx < TiedMax) {
1198 UseMO.TiedTo = DefIdx + 1;
1199 } else {
1200 // Inline asm can use the group descriptors to find tied operands,
1201 // statepoint tied operands are trivial to match (1-1 reg def with reg use),
1202 // but on normal instruction, the tied def must be within the first TiedMax
1203 // operands.
1204 assert((isInlineAsm() || getOpcode() == TargetOpcode::STATEPOINT) &&
1205 "DefIdx out of range");
1206 UseMO.TiedTo = TiedMax;
1207 }
1208
1209 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1210 DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
1211}
1212
1213/// Given the index of a tied register operand, find the operand it is tied to.
1214/// Defs are tied to uses and vice versa. Returns the index of the tied operand
1215/// which must exist.
1216unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1217 const MachineOperand &MO = getOperand(OpIdx);
1218 assert(MO.isTied() && "Operand isn't tied");
1219
1220 // Normally TiedTo is in range.
1221 if (MO.TiedTo < TiedMax)
1222 return MO.TiedTo - 1;
1223
1224 // Uses on normal instructions can be out of range.
1225 if (!isInlineAsm() && getOpcode() != TargetOpcode::STATEPOINT) {
1226 // Normal tied defs must be in the 0..TiedMax-1 range.
1227 if (MO.isUse())
1228 return TiedMax - 1;
1229 // MO is a def. Search for the tied use.
1230 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1231 const MachineOperand &UseMO = getOperand(i);
1232 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1233 return i;
1234 }
1235 llvm_unreachable("Can't find tied use");
1236 }
1237
1238 if (getOpcode() == TargetOpcode::STATEPOINT) {
1239 // In STATEPOINT defs correspond 1-1 to GC pointer operands passed
1240 // on registers.
1241 StatepointOpers SO(this);
1242 unsigned CurUseIdx = SO.getFirstGCPtrIdx();
1243 assert(CurUseIdx != -1U && "only gc pointer statepoint operands can be tied");
1244 unsigned NumDefs = getNumDefs();
1245 for (unsigned CurDefIdx = 0; CurDefIdx < NumDefs; ++CurDefIdx) {
1246 while (!getOperand(CurUseIdx).isReg())
1247 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1248 if (OpIdx == CurDefIdx)
1249 return CurUseIdx;
1250 if (OpIdx == CurUseIdx)
1251 return CurDefIdx;
1252 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1253 }
1254 llvm_unreachable("Can't find tied use");
1255 }
1256
1257 // Now deal with inline asm by parsing the operand group descriptor flags.
1258 // Find the beginning of each operand group.
1259 SmallVector<unsigned, 8> GroupIdx;
1260 unsigned OpIdxGroup = ~0u;
1261 unsigned NumOps;
1262 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1263 i += NumOps) {
1264 const MachineOperand &FlagMO = getOperand(i);
1265 assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1266 unsigned CurGroup = GroupIdx.size();
1267 GroupIdx.push_back(i);
1268 const InlineAsm::Flag F(FlagMO.getImm());
1269 NumOps = 1 + F.getNumOperandRegisters();
1270 // OpIdx belongs to this operand group.
1271 if (OpIdx > i && OpIdx < i + NumOps)
1272 OpIdxGroup = CurGroup;
1273 unsigned TiedGroup;
1274 if (!F.isUseOperandTiedToDef(TiedGroup))
1275 continue;
1276 // Operands in this group are tied to operands in TiedGroup which must be
1277 // earlier. Find the number of operands between the two groups.
1278 unsigned Delta = i - GroupIdx[TiedGroup];
1279
1280 // OpIdx is a use tied to TiedGroup.
1281 if (OpIdxGroup == CurGroup)
1282 return OpIdx - Delta;
1283
1284 // OpIdx is a def tied to this use group.
1285 if (OpIdxGroup == TiedGroup)
1286 return OpIdx + Delta;
1287 }
1288 llvm_unreachable("Invalid tied operand on inline asm");
1289}
1290
1291/// clearKillInfo - Clears kill flags on all operands.
1292///
1294 for (MachineOperand &MO : operands()) {
1295 if (MO.isReg() && MO.isUse())
1296 MO.setIsKill(false);
1297 }
1298}
1299
1301 unsigned SubIdx,
1302 const TargetRegisterInfo &RegInfo) {
1303 if (ToReg.isPhysical()) {
1304 if (SubIdx)
1305 ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1306 for (MachineOperand &MO : operands()) {
1307 if (!MO.isReg() || MO.getReg() != FromReg)
1308 continue;
1309 MO.substPhysReg(ToReg, RegInfo);
1310 }
1311 } else {
1312 for (MachineOperand &MO : operands()) {
1313 if (!MO.isReg() || MO.getReg() != FromReg)
1314 continue;
1315 MO.substVirtReg(ToReg, SubIdx, RegInfo);
1316 }
1317 }
1318}
1319
1320/// isSafeToMove - Return true if it is safe to move this instruction. If
1321/// SawStore is set to true, it means that there is a store (or call) between
1322/// the instruction's location and its intended destination.
1323bool MachineInstr::isSafeToMove(bool &SawStore) const {
1324 // Ignore stuff that we obviously can't move.
1325 //
1326 // Treat volatile loads as stores. This is not strictly necessary for
1327 // volatiles, but it is required for atomic loads. It is not allowed to move
1328 // a load across an atomic load with Ordering > Monotonic.
1329 if (mayStore() || isCall() || isPHI() ||
1330 (mayLoad() && hasOrderedMemoryRef())) {
1331 SawStore = true;
1332 return false;
1333 }
1334
1335 // Don't touch instructions that have non-trivial invariants. For example,
1336 // terminators have to be at the end of a basic block.
1337 if (isPosition() || isDebugInstr() || isTerminator() ||
1339 return false;
1340
1341 // Don't touch instructions which can have non-load/store effects.
1342 //
1343 // Inline asm has a "sideeffect" marker to indicate whether the asm has
1344 // intentional side-effects. Even if an inline asm is not "sideeffect",
1345 // though, it still can't be speculatively executed: the operation might
1346 // not be valid on the current target, or for some combinations of operands.
1347 // (Some transforms that move an instruction don't speculatively execute it;
1348 // we currently don't try to handle that distinction here.)
1349 //
1350 // Other instructions handled here include those that can raise FP
1351 // exceptions, x86 "DIV" instructions which trap on divide by zero, and
1352 // stack adjustments.
1354 isInlineAsm())
1355 return false;
1356
1357 // See if this instruction does a load. If so, we have to guarantee that the
1358 // loaded value doesn't change between the load and the its intended
1359 // destination. The check for isInvariantLoad gives the target the chance to
1360 // classify the load as always returning a constant, e.g. a constant pool
1361 // load.
1363 // Otherwise, this is a real load. If there is a store between the load and
1364 // end of block, we can't move it.
1365 return !SawStore;
1366
1367 return true;
1368}
1369
1371 // Don't delete frame allocation labels.
1372 // FIXME: Why is LOCAL_ESCAPE not considered in MachineInstr::isLabel?
1373 if (getOpcode() == TargetOpcode::LOCAL_ESCAPE)
1374 return false;
1375
1376 // Don't delete FAKE_USE.
1377 // FIXME: Why is FAKE_USE not considered in MachineInstr::isPosition?
1378 if (isFakeUse())
1379 return false;
1380
1381 // LIFETIME markers should be preserved.
1382 // FIXME: Why are LIFETIME markers not considered in MachineInstr::isPosition?
1383 if (isLifetimeMarker())
1384 return false;
1385
1386 // If we can move an instruction, we can remove it. Otherwise, it has
1387 // a side-effect of some sort.
1388 bool SawStore = false;
1389 return isPHI() || isSafeToMove(SawStore);
1390}
1391
1393 LiveRegUnits *LivePhysRegs) const {
1394 // Instructions without side-effects are dead iff they only define dead regs.
1395 // This function is hot and this loop returns early in the common case,
1396 // so only perform additional checks before this if absolutely necessary.
1397 for (const MachineOperand &MO : all_defs()) {
1398 Register Reg = MO.getReg();
1399 if (Reg.isPhysical()) {
1400 // Don't delete live physreg defs, or any reserved register defs.
1401 if (!LivePhysRegs || !LivePhysRegs->available(Reg) || MRI.isReserved(Reg))
1402 return false;
1403 } else {
1404 if (MO.isDead())
1405 continue;
1406 for (const MachineInstr &Use : MRI.use_nodbg_instructions(Reg)) {
1407 if (&Use != this)
1408 // This def has a non-debug use. Don't delete the instruction!
1409 return false;
1410 }
1411 }
1412 }
1413
1414 // Technically speaking inline asm without side effects and no defs can still
1415 // be deleted. But there is so much bad inline asm code out there, we should
1416 // let them be.
1417 if (isInlineAsm())
1418 return false;
1419
1420 // FIXME: See issue #105950 for why LIFETIME markers are considered dead here.
1421 if (isLifetimeMarker())
1422 return true;
1423
1424 // If there are no defs with uses, then we call the instruction dead so long
1425 // as we do not suspect it may have sideeffects.
1426 return wouldBeTriviallyDead();
1427}
1428
1430 BatchAAResults *AA, bool UseTBAA,
1431 const MachineMemOperand *MMOa,
1432 const MachineMemOperand *MMOb) {
1433 // The following interface to AA is fashioned after DAGCombiner::isAlias and
1434 // operates with MachineMemOperand offset with some important assumptions:
1435 // - LLVM fundamentally assumes flat address spaces.
1436 // - MachineOperand offset can *only* result from legalization and cannot
1437 // affect queries other than the trivial case of overlap checking.
1438 // - These offsets never wrap and never step outside of allocated objects.
1439 // - There should never be any negative offsets here.
1440 //
1441 // FIXME: Modify API to hide this math from "user"
1442 // Even before we go to AA we can reason locally about some memory objects. It
1443 // can save compile time, and possibly catch some corner cases not currently
1444 // covered.
1445
1446 int64_t OffsetA = MMOa->getOffset();
1447 int64_t OffsetB = MMOb->getOffset();
1448 int64_t MinOffset = std::min(OffsetA, OffsetB);
1449
1450 LocationSize WidthA = MMOa->getSize();
1451 LocationSize WidthB = MMOb->getSize();
1452 bool KnownWidthA = WidthA.hasValue();
1453 bool KnownWidthB = WidthB.hasValue();
1454 bool BothMMONonScalable = !WidthA.isScalable() && !WidthB.isScalable();
1455
1456 const Value *ValA = MMOa->getValue();
1457 const Value *ValB = MMOb->getValue();
1458 bool SameVal = (ValA && ValB && (ValA == ValB));
1459 if (!SameVal) {
1460 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1461 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1462 if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1463 return false;
1464 if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1465 return false;
1466 if (PSVa && PSVb && (PSVa == PSVb))
1467 SameVal = true;
1468 }
1469
1470 if (SameVal && BothMMONonScalable) {
1471 if (!KnownWidthA || !KnownWidthB)
1472 return true;
1473 int64_t MaxOffset = std::max(OffsetA, OffsetB);
1474 int64_t LowWidth = (MinOffset == OffsetA)
1475 ? WidthA.getValue().getKnownMinValue()
1476 : WidthB.getValue().getKnownMinValue();
1477 return (MinOffset + LowWidth > MaxOffset);
1478 }
1479
1480 if (!AA)
1481 return true;
1482
1483 if (!ValA || !ValB)
1484 return true;
1485
1486 assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1487 assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1488
1489 // If Scalable Location Size has non-zero offset, Width + Offset does not work
1490 // at the moment
1491 if ((WidthA.isScalable() && OffsetA > 0) ||
1492 (WidthB.isScalable() && OffsetB > 0))
1493 return true;
1494
1495 int64_t OverlapA =
1496 KnownWidthA ? WidthA.getValue().getKnownMinValue() + OffsetA - MinOffset
1498 int64_t OverlapB =
1499 KnownWidthB ? WidthB.getValue().getKnownMinValue() + OffsetB - MinOffset
1501
1502 LocationSize LocA = (WidthA.isScalable() || !KnownWidthA)
1503 ? WidthA
1504 : LocationSize::precise(OverlapA);
1505 LocationSize LocB = (WidthB.isScalable() || !KnownWidthB)
1506 ? WidthB
1507 : LocationSize::precise(OverlapB);
1508
1509 return !AA->isNoAlias(
1510 MemoryLocation(ValA, LocA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1511 MemoryLocation(ValB, LocB, UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1512}
1513
1515 bool UseTBAA) const {
1516 const MachineFunction *MF = getMF();
1518 const MachineFrameInfo &MFI = MF->getFrameInfo();
1519
1520 // Exclude call instruction which may alter the memory but can not be handled
1521 // by this function.
1522 if (isCall() || Other.isCall())
1523 return true;
1524
1525 // If neither instruction stores to memory, they can't alias in any
1526 // meaningful way, even if they read from the same address.
1527 if (!mayStore() && !Other.mayStore())
1528 return false;
1529
1530 // Both instructions must be memory operations to be able to alias.
1531 if (!mayLoadOrStore() || !Other.mayLoadOrStore())
1532 return false;
1533
1534 // Let the target decide if memory accesses cannot possibly overlap.
1535 if (TII->areMemAccessesTriviallyDisjoint(*this, Other))
1536 return false;
1537
1538 // Memory operations without memory operands may access anything. Be
1539 // conservative and assume `MayAlias`.
1540 if (memoperands_empty() || Other.memoperands_empty())
1541 return true;
1542
1543 // Skip if there are too many memory operands.
1544 auto NumChecks = getNumMemOperands() * Other.getNumMemOperands();
1545 if (NumChecks > TII->getMemOperandAACheckLimit())
1546 return true;
1547
1548 // Check each pair of memory operands from both instructions, which can't
1549 // alias only if all pairs won't alias.
1550 for (auto *MMOa : memoperands())
1551 for (auto *MMOb : Other.memoperands())
1552 if (MemOperandsHaveAlias(MFI, AA, UseTBAA, MMOa, MMOb))
1553 return true;
1554
1555 return false;
1556}
1557
1558bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other,
1559 bool UseTBAA) const {
1560 if (AA) {
1561 BatchAAResults BAA(*AA);
1562 return mayAlias(&BAA, Other, UseTBAA);
1563 }
1564 return mayAlias(static_cast<BatchAAResults *>(nullptr), Other, UseTBAA);
1565}
1566
1567/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1568/// or volatile memory reference, or if the information describing the memory
1569/// reference is not available. Return false if it is known to have no ordered
1570/// memory references.
1572 // An instruction known never to access memory won't have a volatile access.
1573 if (!mayStore() &&
1574 !mayLoad() &&
1575 !isCall() &&
1577 return false;
1578
1579 // Otherwise, if the instruction has no memory reference information,
1580 // conservatively assume it wasn't preserved.
1581 if (memoperands_empty())
1582 return true;
1583
1584 // Check if any of our memory operands are ordered.
1585 return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1586 return !MMO->isUnordered();
1587 });
1588}
1589
1590/// isDereferenceableInvariantLoad - Return true if this instruction will never
1591/// trap and is loading from a location whose value is invariant across a run of
1592/// this function.
1594 // If the instruction doesn't load at all, it isn't an invariant load.
1595 if (!mayLoad())
1596 return false;
1597
1598 // If the instruction has lost its memoperands, conservatively assume that
1599 // it may not be an invariant load.
1600 if (memoperands_empty())
1601 return false;
1602
1603 const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1604
1605 for (MachineMemOperand *MMO : memoperands()) {
1606 if (!MMO->isUnordered())
1607 // If the memory operand has ordering side effects, we can't move the
1608 // instruction. Such an instruction is technically an invariant load,
1609 // but the caller code would need updated to expect that.
1610 return false;
1611 if (MMO->isStore()) return false;
1612 if (MMO->isInvariant() && MMO->isDereferenceable())
1613 continue;
1614
1615 // A load from a constant PseudoSourceValue is invariant.
1616 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) {
1617 if (PSV->isConstant(&MFI))
1618 continue;
1619 }
1620
1621 // Otherwise assume conservatively.
1622 return false;
1623 }
1624
1625 // Everything checks out.
1626 return true;
1627}
1628
1630 if (!isPHI())
1631 return {};
1632 assert(getNumOperands() >= 3 &&
1633 "It's illegal to have a PHI without source operands");
1634
1635 Register Reg = getOperand(1).getReg();
1636 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1637 if (getOperand(i).getReg() != Reg)
1638 return {};
1639 return Reg;
1640}
1641
1644 return true;
1645 if (isInlineAsm()) {
1646 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1647 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1648 return true;
1649 }
1650
1651 return false;
1652}
1653
1655 return mayStore() || isCall() ||
1657}
1658
1659/// allDefsAreDead - Return true if all the defs of this instruction are dead.
1660///
1662 for (const MachineOperand &MO : operands()) {
1663 if (!MO.isReg() || MO.isUse())
1664 continue;
1665 if (!MO.isDead())
1666 return false;
1667 }
1668 return true;
1669}
1670
1672 for (const MachineOperand &MO : implicit_operands()) {
1673 if (!MO.isReg() || MO.isUse())
1674 continue;
1675 if (!MO.isDead())
1676 return false;
1677 }
1678 return true;
1679}
1680
1681/// copyImplicitOps - Copy implicit register operands from specified
1682/// instruction to this instruction.
1684 const MachineInstr &MI) {
1685 for (const MachineOperand &MO :
1686 llvm::drop_begin(MI.operands(), MI.getDesc().getNumOperands()))
1687 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1688 addOperand(MF, MO);
1689}
1690
1692 const MCInstrDesc &MCID = getDesc();
1693 if (MCID.Opcode == TargetOpcode::STATEPOINT)
1694 return true;
1695 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1696 const auto &Operand = getOperand(I);
1697 if (!Operand.isReg() || Operand.isDef())
1698 // Ignore the defined registers as MCID marks only the uses as tied.
1699 continue;
1700 int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1701 int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1702 if (ExpectedTiedIdx != TiedIdx)
1703 return true;
1704 }
1705 return false;
1706}
1707
1709 const MachineRegisterInfo &MRI) const {
1711 if (!Op.isReg())
1712 return LLT{};
1713
1715 return MRI.getType(Op.getReg());
1716
1717 auto &OpInfo = getDesc().operands()[OpIdx];
1718 if (!OpInfo.isGenericType())
1719 return MRI.getType(Op.getReg());
1720
1721 if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1722 return LLT{};
1723
1724 LLT TypeToPrint = MRI.getType(Op.getReg());
1725 // Don't mark the type index printed if it wasn't actually printed: maybe
1726 // another operand with the same type index has an actual type attached:
1727 if (TypeToPrint.isValid())
1728 PrintedTypes.set(OpInfo.getGenericTypeIndex());
1729 return TypeToPrint;
1730}
1731
1732#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1734 dbgs() << " ";
1735 print(dbgs());
1736}
1737
1738LLVM_DUMP_METHOD void MachineInstr::dumprImpl(
1739 const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
1740 SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const {
1741 if (Depth >= MaxDepth)
1742 return;
1743 if (!AlreadySeenInstrs.insert(this).second)
1744 return;
1745 // PadToColumn always inserts at least one space.
1746 // Don't mess up the alignment if we don't want any space.
1747 if (Depth)
1748 fdbgs().PadToColumn(Depth * 2);
1749 print(fdbgs());
1750 for (const MachineOperand &MO : operands()) {
1751 if (!MO.isReg() || MO.isDef())
1752 continue;
1753 Register Reg = MO.getReg();
1754 if (Reg.isPhysical())
1755 continue;
1756 const MachineInstr *NewMI = MRI.getUniqueVRegDef(Reg);
1757 if (NewMI == nullptr)
1758 continue;
1759 NewMI->dumprImpl(MRI, Depth + 1, MaxDepth, AlreadySeenInstrs);
1760 }
1761}
1762
1764 unsigned MaxDepth) const {
1765 SmallPtrSet<const MachineInstr *, 16> AlreadySeenInstrs;
1766 dumprImpl(MRI, 0, MaxDepth, AlreadySeenInstrs);
1767}
1768#endif
1769
1770void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1771 bool SkipDebugLoc, bool AddNewLine,
1772 const TargetInstrInfo *TII) const {
1773 const Module *M = nullptr;
1774 const Function *F = nullptr;
1775 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1776 F = &MF->getFunction();
1777 M = F->getParent();
1778 if (!TII)
1779 TII = MF->getSubtarget().getInstrInfo();
1780 }
1781
1782 ModuleSlotTracker MST(M);
1783 if (F)
1784 MST.incorporateFunction(*F);
1785 print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
1786}
1787
1789 bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1790 bool AddNewLine, const TargetInstrInfo *TII) const {
1791 // We can be a bit tidier if we know the MachineFunction.
1792 const TargetRegisterInfo *TRI = nullptr;
1793 const MachineRegisterInfo *MRI = nullptr;
1794 tryToGetTargetInfo(*this, TRI, MRI, TII);
1795
1796 if (isCFIInstruction())
1797 assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1798
1799 SmallBitVector PrintedTypes(8);
1800 bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies();
1801 auto getTiedOperandIdx = [&](unsigned OpIdx) {
1802 if (!ShouldPrintRegisterTies)
1803 return 0U;
1804 const MachineOperand &MO = getOperand(OpIdx);
1805 if (MO.isReg() && MO.isTied() && !MO.isDef())
1806 return findTiedOperandIdx(OpIdx);
1807 return 0U;
1808 };
1809 unsigned StartOp = 0;
1810 unsigned e = getNumOperands();
1811
1812 // Print explicitly defined operands on the left of an assignment syntax.
1813 while (StartOp < e) {
1814 const MachineOperand &MO = getOperand(StartOp);
1815 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1816 break;
1817
1818 if (StartOp != 0)
1819 OS << ", ";
1820
1821 LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1822 unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1823 MO.print(OS, MST, TypeToPrint, StartOp, /*PrintDef=*/false, IsStandalone,
1824 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1825 ++StartOp;
1826 }
1827
1828 if (StartOp != 0)
1829 OS << " = ";
1830
1832 OS << "frame-setup ";
1834 OS << "frame-destroy ";
1836 OS << "nnan ";
1838 OS << "ninf ";
1840 OS << "nsz ";
1842 OS << "arcp ";
1844 OS << "contract ";
1846 OS << "afn ";
1848 OS << "reassoc ";
1850 OS << "nuw ";
1852 OS << "nsw ";
1854 OS << "exact ";
1856 OS << "nofpexcept ";
1858 OS << "nomerge ";
1860 OS << "nneg ";
1862 OS << "disjoint ";
1864 OS << "nusw ";
1866 OS << "samesign ";
1868 OS << "inbounds ";
1869
1870 // Print the opcode name.
1871 if (TII)
1872 OS << TII->getName(getOpcode());
1873 else
1874 OS << "UNKNOWN";
1875
1876 if (SkipOpers)
1877 return;
1878
1879 // Print the rest of the operands.
1880 bool FirstOp = true;
1881 unsigned AsmDescOp = ~0u;
1882 unsigned AsmOpCount = 0;
1883
1885 // Print asm string.
1886 OS << " ";
1887 const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1888 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1889 unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1890 getOperand(OpIdx).print(OS, MST, TypeToPrint, OpIdx, /*PrintDef=*/true,
1891 IsStandalone, ShouldPrintRegisterTies,
1892 TiedOperandIdx, TRI);
1893
1894 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1895 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1896 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1897 OS << " [sideeffect]";
1898 if (ExtraInfo & InlineAsm::Extra_MayLoad)
1899 OS << " [mayload]";
1900 if (ExtraInfo & InlineAsm::Extra_MayStore)
1901 OS << " [maystore]";
1902 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1903 OS << " [isconvergent]";
1904 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1905 OS << " [alignstack]";
1907 OS << " [attdialect]";
1909 OS << " [inteldialect]";
1910
1911 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1912 FirstOp = false;
1913 }
1914
1915 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1916 const MachineOperand &MO = getOperand(i);
1917
1918 if (FirstOp) FirstOp = false; else OS << ",";
1919 OS << " ";
1920
1921 if (isDebugValueLike() && MO.isMetadata()) {
1922 // Pretty print DBG_VALUE* instructions.
1923 auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1924 if (DIV && !DIV->getName().empty())
1925 OS << "!\"" << DIV->getName() << '\"';
1926 else {
1927 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1928 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1929 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1930 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1931 }
1932 } else if (isDebugLabel() && MO.isMetadata()) {
1933 // Pretty print DBG_LABEL instructions.
1934 auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1935 if (DIL && !DIL->getName().empty())
1936 OS << "\"" << DIL->getName() << '\"';
1937 else {
1938 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1939 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1940 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1941 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1942 }
1943 } else if (i == AsmDescOp && MO.isImm()) {
1944 // Pretty print the inline asm operand descriptor.
1945 OS << '$' << AsmOpCount++;
1946 unsigned Flag = MO.getImm();
1947 const InlineAsm::Flag F(Flag);
1948 OS << ":[";
1949 OS << F.getKindName();
1950
1951 unsigned RCID;
1952 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
1953 if (TRI) {
1954 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1955 } else
1956 OS << ":RC" << RCID;
1957 }
1958
1959 if (F.isMemKind()) {
1960 const InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1961 OS << ":" << InlineAsm::getMemConstraintName(MCID);
1962 }
1963
1964 unsigned TiedTo;
1965 if (F.isUseOperandTiedToDef(TiedTo))
1966 OS << " tiedto:$" << TiedTo;
1967
1968 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
1969 F.isRegUseKind()) &&
1970 F.getRegMayBeFolded()) {
1971 OS << " foldable";
1972 }
1973
1974 OS << ']';
1975
1976 // Compute the index of the next operand descriptor.
1977 AsmDescOp += 1 + F.getNumOperandRegisters();
1978 } else {
1979 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1980 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1981 if (MO.isImm() && isOperandSubregIdx(i))
1983 else
1984 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1985 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1986 }
1987 }
1988
1989 // Print any optional symbols attached to this instruction as-if they were
1990 // operands.
1991 if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
1992 if (!FirstOp) {
1993 FirstOp = false;
1994 OS << ',';
1995 }
1996 OS << " pre-instr-symbol ";
1997 MachineOperand::printSymbol(OS, *PreInstrSymbol);
1998 }
1999 if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
2000 if (!FirstOp) {
2001 FirstOp = false;
2002 OS << ',';
2003 }
2004 OS << " post-instr-symbol ";
2005 MachineOperand::printSymbol(OS, *PostInstrSymbol);
2006 }
2007 if (MDNode *HeapAllocMarker = getHeapAllocMarker()) {
2008 if (!FirstOp) {
2009 FirstOp = false;
2010 OS << ',';
2011 }
2012 OS << " heap-alloc-marker ";
2013 HeapAllocMarker->printAsOperand(OS, MST);
2014 }
2015 if (MDNode *PCSections = getPCSections()) {
2016 if (!FirstOp) {
2017 FirstOp = false;
2018 OS << ',';
2019 }
2020 OS << " pcsections ";
2021 PCSections->printAsOperand(OS, MST);
2022 }
2023 if (MDNode *MMRA = getMMRAMetadata()) {
2024 if (!FirstOp) {
2025 FirstOp = false;
2026 OS << ',';
2027 }
2028 OS << " mmra ";
2029 MMRA->printAsOperand(OS, MST);
2030 }
2031 if (uint32_t CFIType = getCFIType()) {
2032 if (!FirstOp)
2033 OS << ',';
2034 OS << " cfi-type " << CFIType;
2035 }
2036
2037 if (DebugInstrNum) {
2038 if (!FirstOp)
2039 OS << ",";
2040 OS << " debug-instr-number " << DebugInstrNum;
2041 }
2042
2043 if (!SkipDebugLoc) {
2044 if (const DebugLoc &DL = getDebugLoc()) {
2045 if (!FirstOp)
2046 OS << ',';
2047 OS << " debug-location ";
2048 DL->printAsOperand(OS, MST);
2049 }
2050 }
2051
2052 if (!memoperands_empty()) {
2054 const LLVMContext *Context = nullptr;
2055 std::unique_ptr<LLVMContext> CtxPtr;
2056 const MachineFrameInfo *MFI = nullptr;
2057 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
2058 MFI = &MF->getFrameInfo();
2059 Context = &MF->getFunction().getContext();
2060 } else {
2061 CtxPtr = std::make_unique<LLVMContext>();
2062 Context = CtxPtr.get();
2063 }
2064
2065 OS << " :: ";
2066 bool NeedComma = false;
2067 for (const MachineMemOperand *Op : memoperands()) {
2068 if (NeedComma)
2069 OS << ", ";
2070 Op->print(OS, MST, SSNs, *Context, MFI, TII);
2071 NeedComma = true;
2072 }
2073 }
2074
2075 if (SkipDebugLoc)
2076 return;
2077
2078 bool HaveSemi = false;
2079
2080 // Print debug location information.
2081 if (const DebugLoc &DL = getDebugLoc()) {
2082 if (!HaveSemi) {
2083 OS << ';';
2084 HaveSemi = true;
2085 }
2086 OS << ' ';
2087 DL.print(OS);
2088 }
2089
2090 // Print extra comments for DEBUG_VALUE and friends if they are well-formed.
2091 if ((isNonListDebugValue() && getNumOperands() >= 4) ||
2092 (isDebugValueList() && getNumOperands() >= 2) ||
2093 (isDebugRef() && getNumOperands() >= 3)) {
2094 if (getDebugVariableOp().isMetadata()) {
2095 if (!HaveSemi) {
2096 OS << ";";
2097 HaveSemi = true;
2098 }
2099 auto *DV = getDebugVariable();
2100 OS << " line no:" << DV->getLine();
2102 OS << " indirect";
2103 }
2104 }
2105 // TODO: DBG_LABEL
2106
2107 if (PrintMIAddrs)
2108 OS << " ; " << this;
2109
2110 if (AddNewLine)
2111 OS << '\n';
2112}
2113
2115 const TargetRegisterInfo *RegInfo,
2116 bool AddIfNotFound) {
2117 bool isPhysReg = IncomingReg.isPhysical();
2118 bool hasAliases = isPhysReg &&
2119 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
2120 bool Found = false;
2122 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2123 MachineOperand &MO = getOperand(i);
2124 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
2125 continue;
2126
2127 // DEBUG_VALUE nodes do not contribute to code generation and should
2128 // always be ignored. Failure to do so may result in trying to modify
2129 // KILL flags on DEBUG_VALUE nodes.
2130 if (MO.isDebug())
2131 continue;
2132
2133 Register Reg = MO.getReg();
2134 if (!Reg)
2135 continue;
2136
2137 if (Reg == IncomingReg) {
2138 if (!Found) {
2139 if (MO.isKill())
2140 // The register is already marked kill.
2141 return true;
2142 if (isPhysReg && isRegTiedToDefOperand(i))
2143 // Two-address uses of physregs must not be marked kill.
2144 return true;
2145 MO.setIsKill();
2146 Found = true;
2147 }
2148 } else if (hasAliases && MO.isKill() && Reg.isPhysical()) {
2149 // A super-register kill already exists.
2150 if (RegInfo->isSuperRegister(IncomingReg, Reg))
2151 return true;
2152 if (RegInfo->isSubRegister(IncomingReg, Reg))
2153 DeadOps.push_back(i);
2154 }
2155 }
2156
2157 // Trim unneeded kill operands.
2158 while (!DeadOps.empty()) {
2159 unsigned OpIdx = DeadOps.back();
2160 if (getOperand(OpIdx).isImplicit() &&
2163 else
2164 getOperand(OpIdx).setIsKill(false);
2165 DeadOps.pop_back();
2166 }
2167
2168 // If not found, this means an alias of one of the operands is killed. Add a
2169 // new implicit operand if required.
2170 if (!Found && AddIfNotFound) {
2172 false /*IsDef*/,
2173 true /*IsImp*/,
2174 true /*IsKill*/));
2175 return true;
2176 }
2177 return Found;
2178}
2179
2181 const TargetRegisterInfo *RegInfo) {
2182 if (!Reg.isPhysical())
2183 RegInfo = nullptr;
2184 for (MachineOperand &MO : operands()) {
2185 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
2186 continue;
2187 Register OpReg = MO.getReg();
2188 if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
2189 MO.setIsKill(false);
2190 }
2191}
2192
2194 const TargetRegisterInfo *RegInfo,
2195 bool AddIfNotFound) {
2196 bool isPhysReg = Reg.isPhysical();
2197 bool hasAliases = isPhysReg &&
2198 MCRegAliasIterator(Reg, RegInfo, false).isValid();
2199 bool Found = false;
2201 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2202 MachineOperand &MO = getOperand(i);
2203 if (!MO.isReg() || !MO.isDef())
2204 continue;
2205 Register MOReg = MO.getReg();
2206 if (!MOReg)
2207 continue;
2208
2209 if (MOReg == Reg) {
2210 MO.setIsDead();
2211 Found = true;
2212 } else if (hasAliases && MO.isDead() && MOReg.isPhysical()) {
2213 // There exists a super-register that's marked dead.
2214 if (RegInfo->isSuperRegister(Reg, MOReg))
2215 return true;
2216 if (RegInfo->isSubRegister(Reg, MOReg))
2217 DeadOps.push_back(i);
2218 }
2219 }
2220
2221 // Trim unneeded dead operands.
2222 while (!DeadOps.empty()) {
2223 unsigned OpIdx = DeadOps.back();
2224 if (getOperand(OpIdx).isImplicit() &&
2227 else
2228 getOperand(OpIdx).setIsDead(false);
2229 DeadOps.pop_back();
2230 }
2231
2232 // If not found, this means an alias of one of the operands is dead. Add a
2233 // new implicit operand if required.
2234 if (Found || !AddIfNotFound)
2235 return Found;
2236
2238 true /*IsDef*/,
2239 true /*IsImp*/,
2240 false /*IsKill*/,
2241 true /*IsDead*/));
2242 return true;
2243}
2244
2246 for (MachineOperand &MO : all_defs())
2247 if (MO.getReg() == Reg)
2248 MO.setIsDead(false);
2249}
2250
2252 for (MachineOperand &MO : all_defs())
2253 if (MO.getReg() == Reg && MO.getSubReg() != 0)
2254 MO.setIsUndef(IsUndef);
2255}
2256
2258 const TargetRegisterInfo *RegInfo) {
2259 if (Reg.isPhysical()) {
2260 MachineOperand *MO = findRegisterDefOperand(Reg, RegInfo, false, false);
2261 if (MO)
2262 return;
2263 } else {
2264 for (const MachineOperand &MO : all_defs()) {
2265 if (MO.getReg() == Reg && MO.getSubReg() == 0)
2266 return;
2267 }
2268 }
2270 true /*IsDef*/,
2271 true /*IsImp*/));
2272}
2273
2275 const TargetRegisterInfo &TRI) {
2276 bool HasRegMask = false;
2277 for (MachineOperand &MO : operands()) {
2278 if (MO.isRegMask()) {
2279 HasRegMask = true;
2280 continue;
2281 }
2282 if (!MO.isReg() || !MO.isDef()) continue;
2283 Register Reg = MO.getReg();
2284 if (!Reg.isPhysical())
2285 continue;
2286 // If there are no uses, including partial uses, the def is dead.
2287 if (llvm::none_of(UsedRegs,
2288 [&](MCRegister Use) { return TRI.regsOverlap(Use, Reg); }))
2289 MO.setIsDead();
2290 }
2291
2292 // This is a call with a register mask operand.
2293 // Mask clobbers are always dead, so add defs for the non-dead defines.
2294 if (HasRegMask)
2295 for (const Register &UsedReg : UsedRegs)
2296 addRegisterDefined(UsedReg, &TRI);
2297}
2298
2299unsigned
2301 // Build up a buffer of hash code components.
2302 SmallVector<size_t, 16> HashComponents;
2303 HashComponents.reserve(MI->getNumOperands() + 1);
2304 HashComponents.push_back(MI->getOpcode());
2305 for (const MachineOperand &MO : MI->operands()) {
2306 if (MO.isReg() && MO.isDef() && MO.getReg().isVirtual())
2307 continue; // Skip virtual register defs.
2308
2309 HashComponents.push_back(hash_value(MO));
2310 }
2311 return hash_combine_range(HashComponents);
2312}
2313
2315 // Find the source location cookie.
2316 const MDNode *LocMD = nullptr;
2317 for (unsigned i = getNumOperands(); i != 0; --i) {
2318 if (getOperand(i-1).isMetadata() &&
2319 (LocMD = getOperand(i-1).getMetadata()) &&
2320 LocMD->getNumOperands() != 0) {
2322 return LocMD;
2323 }
2324 }
2325
2326 return nullptr;
2327}
2328
2331 const MDNode *LocMD = getLocCookieMD();
2332 uint64_t LocCookie =
2333 LocMD
2334 ? mdconst::extract<ConstantInt>(LocMD->getOperand(0))->getZExtValue()
2335 : 0;
2337 Ctx.diagnose(DiagnosticInfoInlineAsm(LocCookie, Msg));
2338}
2339
2341 const Function &Fn = getMF()->getFunction();
2342 Fn.getContext().diagnose(
2344}
2345
2347 const MCInstrDesc &MCID, bool IsIndirect,
2348 Register Reg, const MDNode *Variable,
2349 const MDNode *Expr) {
2350 assert(isa<DILocalVariable>(Variable) && "not a variable");
2351 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2352 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2353 "Expected inlined-at fields to agree");
2354 auto MIB = BuildMI(MF, DL, MCID).addReg(Reg);
2355 if (IsIndirect)
2356 MIB.addImm(0U);
2357 else
2358 MIB.addReg(0U);
2359 return MIB.addMetadata(Variable).addMetadata(Expr);
2360}
2361
2363 const MCInstrDesc &MCID, bool IsIndirect,
2364 ArrayRef<MachineOperand> DebugOps,
2365 const MDNode *Variable, const MDNode *Expr) {
2366 assert(isa<DILocalVariable>(Variable) && "not a variable");
2367 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2368 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2369 "Expected inlined-at fields to agree");
2370 if (MCID.Opcode == TargetOpcode::DBG_VALUE) {
2371 assert(DebugOps.size() == 1 &&
2372 "DBG_VALUE must contain exactly one debug operand");
2373 MachineOperand DebugOp = DebugOps[0];
2374 if (DebugOp.isReg())
2375 return BuildMI(MF, DL, MCID, IsIndirect, DebugOp.getReg(), Variable,
2376 Expr);
2377
2378 auto MIB = BuildMI(MF, DL, MCID).add(DebugOp);
2379 if (IsIndirect)
2380 MIB.addImm(0U);
2381 else
2382 MIB.addReg(0U);
2383 return MIB.addMetadata(Variable).addMetadata(Expr);
2384 }
2385
2386 auto MIB = BuildMI(MF, DL, MCID);
2387 MIB.addMetadata(Variable).addMetadata(Expr);
2388 for (const MachineOperand &DebugOp : DebugOps)
2389 if (DebugOp.isReg())
2390 MIB.addReg(DebugOp.getReg());
2391 else
2392 MIB.add(DebugOp);
2393 return MIB;
2394}
2395
2398 const DebugLoc &DL, const MCInstrDesc &MCID,
2399 bool IsIndirect, Register Reg,
2400 const MDNode *Variable, const MDNode *Expr) {
2401 MachineFunction &MF = *BB.getParent();
2402 MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
2403 BB.insert(I, MI);
2404 return MachineInstrBuilder(MF, MI);
2405}
2406
2409 const DebugLoc &DL, const MCInstrDesc &MCID,
2410 bool IsIndirect,
2411 ArrayRef<MachineOperand> DebugOps,
2412 const MDNode *Variable, const MDNode *Expr) {
2413 MachineFunction &MF = *BB.getParent();
2414 MachineInstr *MI =
2415 BuildMI(MF, DL, MCID, IsIndirect, DebugOps, Variable, Expr);
2416 BB.insert(I, MI);
2417 return MachineInstrBuilder(MF, *MI);
2418}
2419
2420/// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
2421/// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
2423 const MachineInstr &MI,
2424 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2425 assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
2426 "Expected inlined-at fields to agree");
2427
2428 const DIExpression *Expr = MI.getDebugExpression();
2429 if (MI.isIndirectDebugValue()) {
2430 assert(MI.getDebugOffset().getImm() == 0 &&
2431 "DBG_VALUE with nonzero offset");
2433 } else if (MI.isDebugValueList()) {
2434 // We will replace the spilled register with a frame index, so
2435 // immediately deref all references to the spilled register.
2436 std::array<uint64_t, 1> Ops{{dwarf::DW_OP_deref}};
2437 for (const MachineOperand *Op : SpilledOperands) {
2438 unsigned OpIdx = MI.getDebugOperandIndex(Op);
2439 Expr = DIExpression::appendOpsToArg(Expr, Ops, OpIdx);
2440 }
2441 }
2442 return Expr;
2443}
2445 Register SpillReg) {
2446 assert(MI.hasDebugOperandForReg(SpillReg) && "Spill Reg is not used in MI.");
2448 llvm::make_pointer_range(MI.getDebugOperandsForReg(SpillReg)));
2449 return computeExprForSpill(MI, SpillOperands);
2450}
2451
2454 const MachineInstr &Orig,
2455 int FrameIndex, Register SpillReg) {
2456 assert(!Orig.isDebugRef() &&
2457 "DBG_INSTR_REF should not reference a virtual register.");
2458 const DIExpression *Expr = computeExprForSpill(Orig, SpillReg);
2459 MachineInstrBuilder NewMI =
2460 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2461 // Non-Variadic Operands: Location, Offset, Variable, Expression
2462 // Variadic Operands: Variable, Expression, Locations...
2463 if (Orig.isNonListDebugValue())
2464 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2465 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2466 if (Orig.isDebugValueList()) {
2467 for (const MachineOperand &Op : Orig.debug_operands())
2468 if (Op.isReg() && Op.getReg() == SpillReg)
2469 NewMI.addFrameIndex(FrameIndex);
2470 else
2471 NewMI.add(MachineOperand(Op));
2472 }
2473 return NewMI;
2474}
2477 const MachineInstr &Orig, int FrameIndex,
2478 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2479 const DIExpression *Expr = computeExprForSpill(Orig, SpilledOperands);
2480 MachineInstrBuilder NewMI =
2481 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2482 // Non-Variadic Operands: Location, Offset, Variable, Expression
2483 // Variadic Operands: Variable, Expression, Locations...
2484 if (Orig.isNonListDebugValue())
2485 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2486 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2487 if (Orig.isDebugValueList()) {
2488 for (const MachineOperand &Op : Orig.debug_operands())
2489 if (is_contained(SpilledOperands, &Op))
2490 NewMI.addFrameIndex(FrameIndex);
2491 else
2492 NewMI.add(MachineOperand(Op));
2493 }
2494 return NewMI;
2495}
2496
2498 Register Reg) {
2499 const DIExpression *Expr = computeExprForSpill(Orig, Reg);
2500 if (Orig.isNonListDebugValue())
2502 for (MachineOperand &Op : Orig.getDebugOperandsForReg(Reg))
2503 Op.ChangeToFrameIndex(FrameIndex);
2504 Orig.getDebugExpressionOp().setMetadata(Expr);
2505}
2506
2509 MachineInstr &MI = *this;
2510 if (!MI.getOperand(0).isReg())
2511 return;
2512
2514 for (MachineBasicBlock::iterator DE = MI.getParent()->end();
2515 DI != DE; ++DI) {
2516 if (!DI->isDebugValue())
2517 return;
2518 if (DI->hasDebugOperandForReg(MI.getOperand(0).getReg()))
2519 DbgValues.push_back(&*DI);
2520 }
2521}
2522
2524 // Collect matching debug values.
2526
2527 if (!getOperand(0).isReg())
2528 return;
2529
2530 Register DefReg = getOperand(0).getReg();
2531 auto *MRI = getRegInfo();
2532 for (auto &MO : MRI->use_operands(DefReg)) {
2533 auto *DI = MO.getParent();
2534 if (!DI->isDebugValue())
2535 continue;
2536 if (DI->hasDebugOperandForReg(DefReg)) {
2537 DbgValues.push_back(DI);
2538 }
2539 }
2540
2541 // Propagate Reg to debug value instructions.
2542 for (auto *DBI : DbgValues)
2543 for (MachineOperand &Op : DBI->getDebugOperandsForReg(DefReg))
2544 Op.setReg(Reg);
2545}
2546
2548
2550 const MachineFrameInfo &MFI) {
2551 std::optional<TypeSize> Size;
2552 for (const auto *A : Accesses) {
2553 if (MFI.isSpillSlotObjectIndex(
2554 cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
2555 ->getFrameIndex())) {
2556 LocationSize S = A->getSize();
2557 if (!S.hasValue())
2559 if (!Size)
2560 Size = S.getValue();
2561 else
2562 Size = *Size + S.getValue();
2563 }
2564 }
2565 if (!Size)
2566 return LocationSize::precise(0);
2567 return LocationSize::precise(*Size);
2568}
2569
2570std::optional<LocationSize>
2572 int FI;
2573 if (TII->isStoreToStackSlotPostFE(*this, FI)) {
2574 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2575 if (MFI.isSpillSlotObjectIndex(FI))
2576 return (*memoperands_begin())->getSize();
2577 }
2578 return std::nullopt;
2579}
2580
2581std::optional<LocationSize>
2584 if (TII->hasStoreToStackSlot(*this, Accesses))
2585 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2586 return std::nullopt;
2587}
2588
2589std::optional<LocationSize>
2591 int FI;
2592 if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
2593 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2594 if (MFI.isSpillSlotObjectIndex(FI))
2595 return (*memoperands_begin())->getSize();
2596 }
2597 return std::nullopt;
2598}
2599
2600std::optional<LocationSize>
2603 if (TII->hasLoadFromStackSlot(*this, Accesses))
2604 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2605 return std::nullopt;
2606}
2607
2609 if (DebugInstrNum == 0)
2610 DebugInstrNum = getParent()->getParent()->getNewDebugInstrNum();
2611 return DebugInstrNum;
2612}
2613
2615 if (DebugInstrNum == 0)
2616 DebugInstrNum = MF.getNewDebugInstrNum();
2617 return DebugInstrNum;
2618}
2619
2620std::tuple<LLT, LLT> MachineInstr::getFirst2LLTs() const {
2621 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2622 getRegInfo()->getType(getOperand(1).getReg()));
2623}
2624
2625std::tuple<LLT, LLT, LLT> MachineInstr::getFirst3LLTs() const {
2626 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2627 getRegInfo()->getType(getOperand(1).getReg()),
2628 getRegInfo()->getType(getOperand(2).getReg()));
2629}
2630
2631std::tuple<LLT, LLT, LLT, LLT> MachineInstr::getFirst4LLTs() const {
2632 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2633 getRegInfo()->getType(getOperand(1).getReg()),
2634 getRegInfo()->getType(getOperand(2).getReg()),
2635 getRegInfo()->getType(getOperand(3).getReg()));
2636}
2637
2638std::tuple<LLT, LLT, LLT, LLT, LLT> MachineInstr::getFirst5LLTs() const {
2639 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2640 getRegInfo()->getType(getOperand(1).getReg()),
2641 getRegInfo()->getType(getOperand(2).getReg()),
2642 getRegInfo()->getType(getOperand(3).getReg()),
2643 getRegInfo()->getType(getOperand(4).getReg()));
2644}
2645
2646std::tuple<Register, LLT, Register, LLT>
2648 Register Reg0 = getOperand(0).getReg();
2649 Register Reg1 = getOperand(1).getReg();
2650 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2651 getRegInfo()->getType(Reg1));
2652}
2653
2654std::tuple<Register, LLT, Register, LLT, Register, LLT>
2656 Register Reg0 = getOperand(0).getReg();
2657 Register Reg1 = getOperand(1).getReg();
2658 Register Reg2 = getOperand(2).getReg();
2659 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2660 getRegInfo()->getType(Reg1), Reg2,
2661 getRegInfo()->getType(Reg2));
2662}
2663
2664std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT>
2666 Register Reg0 = getOperand(0).getReg();
2667 Register Reg1 = getOperand(1).getReg();
2668 Register Reg2 = getOperand(2).getReg();
2669 Register Reg3 = getOperand(3).getReg();
2670 return std::tuple(
2671 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2672 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3));
2673}
2674
2676 LLT>
2678 Register Reg0 = getOperand(0).getReg();
2679 Register Reg1 = getOperand(1).getReg();
2680 Register Reg2 = getOperand(2).getReg();
2681 Register Reg3 = getOperand(3).getReg();
2682 Register Reg4 = getOperand(4).getReg();
2683 return std::tuple(
2684 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2685 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3),
2686 Reg4, getRegInfo()->getType(Reg4));
2687}
2688
2691 assert(InsertBefore != nullptr && "invalid iterator");
2692 assert(InsertBefore->getParent() == this &&
2693 "iterator points to operand of other inst");
2694 if (Ops.empty())
2695 return;
2696
2697 // Do one pass to untie operands.
2699 for (const MachineOperand &MO : operands()) {
2700 if (MO.isReg() && MO.isTied()) {
2701 unsigned OpNo = getOperandNo(&MO);
2702 unsigned TiedTo = findTiedOperandIdx(OpNo);
2703 TiedOpIndices[OpNo] = TiedTo;
2704 untieRegOperand(OpNo);
2705 }
2706 }
2707
2708 unsigned OpIdx = getOperandNo(InsertBefore);
2709 unsigned NumOperands = getNumOperands();
2710 unsigned OpsToMove = NumOperands - OpIdx;
2711
2713 MovingOps.reserve(OpsToMove);
2714
2715 for (unsigned I = 0; I < OpsToMove; ++I) {
2716 MovingOps.emplace_back(getOperand(OpIdx));
2718 }
2719 for (const MachineOperand &MO : Ops)
2720 addOperand(MO);
2721 for (const MachineOperand &OpMoved : MovingOps)
2722 addOperand(OpMoved);
2723
2724 // Re-tie operands.
2725 for (auto [Tie1, Tie2] : TiedOpIndices) {
2726 if (Tie1 >= OpIdx)
2727 Tie1 += Ops.size();
2728 if (Tie2 >= OpIdx)
2729 Tie2 += Ops.size();
2730 tieOperands(Tie1, Tie2);
2731 }
2732}
2733
2734bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const {
2735 assert(OpId && "expected non-zero operand id");
2736 assert(isInlineAsm() && "should only be used on inline asm");
2737
2738 if (!getOperand(OpId).isReg())
2739 return false;
2740
2741 const MachineOperand &MD = getOperand(OpId - 1);
2742 if (!MD.isImm())
2743 return false;
2744
2745 InlineAsm::Flag F(MD.getImm());
2746 if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
2747 return F.getRegMayBeFolded();
2748 return false;
2749}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition Compiler.h:638
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Forward Handle Accesses
Hexagon Common GEP
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
A set of register units.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
mir Rename Register Operands
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
const unsigned TiedMax
static void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps, MachineRegisterInfo *MRI)
Move NumOps MachineOperands from Src to Dst, with support for overlapping ranges.
static cl::opt< bool > PrintMIAddrs("print-mi-addrs", cl::Hidden, cl::desc("Print addresses of MachineInstrs when dumping"))
static LocationSize getSpillSlotSize(const MMOList &Accesses, const MachineFrameInfo &MFI)
static const DIExpression * computeExprForSpill(const MachineInstr &MI, const SmallVectorImpl< const MachineOperand * > &SpilledOperands)
Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, BatchAAResults *AA, bool UseTBAA, const MachineMemOperand *MMOa, const MachineMemOperand *MMOb)
static iterator_range< filter_iterator< Operand *, std::function< bool(Operand &Op)> > > getDebugOperandsForRegHelper(Instruction *MI, Register Reg)
SmallVector< const MachineMemOperand *, 2 > MMOList
static void tryToGetTargetInfo(const MachineInstr &MI, const TargetRegisterInfo *&TRI, const MachineRegisterInfo *&MRI, const TargetInstrInfo *&TII)
static const MachineFunction * getMFIfAvailable(const MachineInstr &MI)
static bool hasIdenticalMMOs(ArrayRef< MachineMemOperand * > LHS, ArrayRef< MachineMemOperand * > RHS)
Check to see if the MMOs pointed to by the two MemRefs arrays are identical.
Register Reg
Register const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
This file contains the declarations for metadata subclasses.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
MachineInstr unsigned OpIdx
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
This file contains some templates that are useful if you are working with the STL at all.
static cl::opt< bool > UseTBAA("use-tbaa-in-sched-mi", cl::Hidden, cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction"))
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
Capacity getNext() const
Get the next larger capacity.
size_t getSize() const
Get the number of elements in an array with this capacity.
static Capacity get(size_t N)
Get the capacity of an array that can hold at least N elements.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:191
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
DWARF expression.
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI bool isEqualExpression(const DIExpression *FirstExpr, bool FirstIndirect, const DIExpression *SecondExpr, bool SecondIndirect)
Determines whether two debug values should produce equivalent DWARF expressions, using their DIExpres...
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
A debug info location.
Definition DebugLoc.h:124
bool hasTrivialDestructor() const
Check whether this has a trivial destructor.
Definition DebugLoc.h:244
Diagnostic information for inline asm reporting.
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This instruction compares its operands according to the predicate given to the constructor.
static StringRef getMemConstraintName(ConstraintCode C)
Definition InlineAsm.h:470
constexpr bool isValid() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool available(const MachineRegisterInfo &MRI, MCRegister Reg) const
Returns true if register Reg and no aliasing register is in the set.
A set of register units used to track register liveness.
bool hasValue() const
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
bool isScalable() const
TypeSize getValue() const
Describe properties that are true of each instruction in the target description file.
ArrayRef< MCOperandInfo > operands() const
unsigned short Opcode
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
Metadata node.
Definition Metadata.h:1077
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1445
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1451
bool isValid() const
isValid - Returns true until all the operands have been visited.
LLVM_ABI MachineInstr * remove_instr(MachineInstr *I)
Remove the possibly bundled instruction from the instruction list without deleting it.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
instr_iterator erase_instr(MachineInstr *I)
Remove an instruction from the instruction list and delete it.
MachineInstr * remove(MachineInstr *I)
Remove the unbundled instruction from the instruction list without deleting it.
Instructions::iterator instr_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
MachineInstr::ExtraInfo * createMIExtraInfo(ArrayRef< MachineMemOperand * > MMOs, MCSymbol *PreInstrSymbol=nullptr, MCSymbol *PostInstrSymbol=nullptr, MDNode *HeapAllocMarker=nullptr, MDNode *PCSections=nullptr, uint32_t CFIType=0, MDNode *MMRAs=nullptr)
Allocate and construct an extra info structure for a MachineInstr.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array)
Dellocate an array of MachineOperands and recycle the memory.
MachineOperand * allocateOperandArray(OperandCapacity Cap)
Allocate an array of MachineOperands.
void handleChangeDesc(MachineInstr &MI, const MCInstrDesc &TID)
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
bool mayRaiseFPException() const
Return true if this instruction could possibly raise a floating-point exception.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void setRegisterDefReadUndef(Register Reg, bool IsUndef=true)
Mark all subregister defs of register Reg with the undef flag.
bool isDebugValueList() const
LLVM_ABI void bundleWithPred()
Bundle this instruction with its predecessor.
bool isPosition() const
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
LLVM_ABI std::tuple< Register, LLT, Register, LLT, Register, LLT, Register, LLT, Register, LLT > getFirst5RegLLTs() const
LLVM_ABI iterator_range< filter_iterator< const MachineOperand *, std::function< bool(const MachineOperand &Op)> > > getDebugOperandsForReg(Register Reg) const
Returns a range of all of the operands that correspond to a debug use of Reg.
mop_range debug_operands()
Returns all operands that are used to determine the variable location for this DBG_VALUE instruction.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
LLVM_ABI void setCFIType(MachineFunction &MF, uint32_t Type)
Set the CFI type for the instruction.
LLVM_ABI MachineInstr * removeFromParent()
Unlink 'this' from the containing basic block, and return it without deleting it.
const MachineBasicBlock * getParent() const
MDNode * getMMRAMetadata() const
Helper to extract mmra.op metadata.
LLVM_ABI void bundleWithSucc()
Bundle this instruction with its successor.
uint32_t getCFIType() const
Helper to extract a CFI type hash if one has been added.
bool isDebugLabel() const
LLVM_ABI void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just prior to the instruction itself.
bool hasProperty(unsigned MCFlag, QueryType Type=AnyInBundle) const
Return true if the instruction (or in the case of a bundle, the instructions inside the bundle) has t...
LLVM_ABI bool isDereferenceableInvariantLoad() const
Return true if this load instruction never traps and points to a memory location whose value doesn't ...
void setFlags(unsigned flags)
QueryType
API for querying MachineInstr properties.
LLVM_ABI void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
LLVM_ABI std::tuple< LLT, LLT, LLT, LLT, LLT > getFirst5LLTs() const
bool isCall(QueryType Type=AnyInBundle) const
LLVM_ABI std::tuple< Register, LLT, Register, LLT, Register, LLT > getFirst3RegLLTs() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI uint32_t mergeFlagsWith(const MachineInstr &Other) const
Return the MIFlags which represent both MachineInstrs.
LLVM_ABI const MachineOperand & getDebugExpressionOp() const
Return the operand for the complex address expression referenced by this DBG_VALUE instruction.
LLVM_ABI std::pair< bool, bool > readsWritesVirtualRegister(Register Reg, SmallVectorImpl< unsigned > *Ops=nullptr) const
Return a pair of bools (reads, writes) indicating if this instruction reads or writes Reg.
LLVM_ABI Register isConstantValuePHI() const
If the specified instruction is a PHI that always merges together the same virtual register,...
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
LLVM_ABI bool allImplicitDefsAreDead() const
Return true if all the implicit defs of this instruction are dead.
LLVM_ABI void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's memory reference descriptor list and replace ours with it.
LLVM_ABI const TargetRegisterClass * getRegClassConstraintEffectForVReg(Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ExploreBundle=false) const
Applies the constraints (def/use) implied by this MI on Reg to the given CurRC.
LLVM_ABI bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
LLVM_ABI bool mayAlias(BatchAAResults *AA, const MachineInstr &Other, bool UseTBAA) const
Returns true if this instruction's memory access aliases the memory access of Other.
bool isBundle() const
bool isDebugInstr() const
unsigned getNumDebugOperands() const
Returns the total number of operands which are debug locations.
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI MachineInstr * removeFromBundle()
Unlink this instruction from its basic block and return it without deleting it.
LLVM_ABI void dumpr(const MachineRegisterInfo &MRI, unsigned MaxDepth=UINT_MAX) const
Print on dbgs() the current instruction and the instructions defining its operands and so on until we...
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
bool isDebugValueLike() const
bool isInlineAsm() const
bool memoperands_empty() const
Return true if we don't have any memory operands which described the memory access done by this instr...
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
bool isDebugRef() const
LLVM_ABI void collectDebugValues(SmallVectorImpl< MachineInstr * > &DbgValues)
Scan instructions immediately following MI and collect any matching DBG_VALUEs.
LLVM_ABI std::optional< LocationSize > getRestoreSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a restore instruction.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
mop_range implicit_operands()
LLVM_ABI void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
LLVM_ABI bool wouldBeTriviallyDead() const
Return true if this instruction would be trivially dead if all of its defined registers were dead.
bool isBundledWithPred() const
Return true if this instruction is part of a bundle, and it is not the first instruction in the bundl...
LLVM_ABI std::tuple< LLT, LLT > getFirst2LLTs() const
LLVM_ABI std::optional< LocationSize > getFoldedSpillSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a folded spill instruction.
LLVM_ABI void unbundleFromPred()
Break bundle above this instruction.
LLVM_ABI void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
LLVM_ABI bool isStackAligningInlineAsm() const
LLVM_ABI void dropMemRefs(MachineFunction &MF)
Clear this MachineInstr's memory reference descriptor list.
LLVM_ABI int findRegisterUseOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false) const
Returns the operand index that is a use of the specific register or -1 if it is not found.
MDNode * getPCSections() const
Helper to extract PCSections metadata target sections.
bool isCFIInstruction() const
LLVM_ABI int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI unsigned getBundleSize() const
Return the number of instructions inside the MI bundle, excluding the bundle header.
LLVM_ABI void cloneMergedMemRefs(MachineFunction &MF, ArrayRef< const MachineInstr * > MIs)
Clone the merge of multiple MachineInstrs' memory reference descriptors list and replace ours with it...
mop_range operands()
LLVM_ABI bool isCandidateForAdditionalCallInfo(QueryType Type=IgnoreBundle) const
Return true if this is a call instruction that may have an additional information associated with it.
LLVM_ABI std::tuple< Register, LLT, Register, LLT, Register, LLT, Register, LLT > getFirst4RegLLTs() const
LLVM_ABI std::tuple< Register, LLT, Register, LLT > getFirst2RegLLTs() const
unsigned getNumMemOperands() const
Return the number of memory operands.
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
LLVM_ABI std::optional< LocationSize > getFoldedRestoreSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a folded restore instruction.
LLVM_ABI const TargetRegisterClass * getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Applies the constraints (def/use) implied by the OpIdx operand to the given CurRC.
bool isOperandSubregIdx(unsigned OpIdx) const
Return true if operand OpIdx is a subregister index.
LLVM_ABI InlineAsm::AsmDialect getInlineAsmDialect() const
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
LLVM_ABI bool isEquivalentDbgInstr(const MachineInstr &Other) const
Returns true if this instruction is a debug instruction that represents an identical debug value to O...
LLVM_ABI const DILabel * getDebugLabel() const
Return the debug label referenced by this DBG_LABEL instruction.
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
static LLVM_ABI uint32_t copyFlagsFromInstruction(const Instruction &I)
LLVM_ABI void insert(mop_iterator InsertBefore, ArrayRef< MachineOperand > Ops)
Inserts Ops BEFORE It. Can untie/retie tied operands.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool isJumpTableDebugInfo() const
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI void eraseFromBundle()
Unlink 'this' from its basic block and delete it.
LLVM_ABI void setHeapAllocMarker(MachineFunction &MF, MDNode *MD)
Set a marker on instructions that denotes where we should create and emit heap alloc site labels.
LLVM_ABI const DILocalVariable * getDebugVariable() const
Return the debug variable referenced by this DBG_VALUE instruction.
LLVM_ABI bool hasComplexRegisterTies() const
Return true when an instruction has tied register that can't be determined by the instruction's descr...
LLVM_ABI LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes, const MachineRegisterInfo &MRI) const
Debugging supportDetermine the generic type to be printed (if needed) on uses and defs.
bool isLifetimeMarker() const
LLVM_ABI void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
LLVM_ABI unsigned findTiedOperandIdx(unsigned OpIdx) const
Given the index of a tied register operand, find the operand it is tied to.
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
LLVM_ABI void changeDebugValuesDefReg(Register Reg)
Find all DBG_VALUEs that point to the register def in this instruction and point them to Reg instead.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI void emitGenericError(const Twine &ErrMsg) const
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
LLVM_ABI const DIExpression * getDebugExpression() const
Return the complex address expression referenced by this DBG_VALUE instruction.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
LLVM_ABI void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool isNonListDebugValue() const
MachineOperand * mop_iterator
iterator/begin/end - Iterate over all operands of a machine instruction.
LLVM_ABI bool isLoadFoldBarrier() const
Returns true if it is illegal to fold a load across this instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
void setFlag(MIFlag Flag)
Set a MI flag.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI bool isDead(const MachineRegisterInfo &MRI, LiveRegUnits *LivePhysRegs=nullptr) const
Check whether an MI is dead.
LLVM_ABI std::tuple< LLT, LLT, LLT > getFirst3LLTs() const
LLVM_ABI const MachineOperand & getDebugVariableOp() const
Return the operand for the debug variable referenced by this DBG_VALUE instruction.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
friend class MachineFunction
MCSymbol * getPreInstrSymbol() const
Helper to extract a pre-instruction symbol if one has been added.
LLVM_ABI bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
LLVM_ABI void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just after the instruction itself.
bool isDebugValue() const
LLVM_ABI void dump() const
const MachineOperand & getDebugOffset() const
Return the operand containing the offset to be used if this DBG_VALUE instruction is indirect; will b...
MachineOperand & getDebugOperand(unsigned Index)
LLVM_ABI std::optional< LocationSize > getSpillSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a spill instruction.
bool isBundledWithSucc() const
Return true if this instruction is part of a bundle, and it is not the last instruction in the bundle...
LLVM_ABI void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
MDNode * getHeapAllocMarker() const
Helper to extract a heap alloc marker if one has been added.
LLVM_ABI unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
LLVM_ABI std::tuple< LLT, LLT, LLT, LLT > getFirst4LLTs() const
LLVM_ABI void clearRegisterDeads(Register Reg)
Clear all dead flags on operands defining register Reg.
LLVM_ABI void clearRegisterKills(Register Reg, const TargetRegisterInfo *RegInfo)
Clear all kill flags affecting Reg.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI void emitInlineAsmError(const Twine &ErrMsg) const
Emit an error referring to the source location of this instruction.
uint32_t getFlags() const
Return the MI flags bitvector.
bool isPseudoProbe() const
LLVM_ABI bool hasRegisterImplicitUseOperand(Register Reg) const
Returns true if the MachineInstr has an implicit-use operand of exactly the given register (not consi...
LLVM_ABI bool shouldUpdateAdditionalCallInfo() const
Return true if copying, moving, or erasing this instruction requires updating additional call info (s...
MCSymbol * getPostInstrSymbol() const
Helper to extract a post-instruction symbol if one has been added.
LLVM_ABI void unbundleFromSucc()
Break bundle below this instruction.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
LLVM_ABI bool isDebugEntryValue() const
A DBG_VALUE is an entry value iff its debug expression contains the DW_OP_LLVM_entry_value operation.
bool isIndirectDebugValue() const
A DBG_VALUE is indirect iff the location operand is a register and the offset operand is an immediate...
unsigned getNumDefs() const
Returns the total number of definitions.
LLVM_ABI void setPCSections(MachineFunction &MF, MDNode *MD)
bool isKill() const
LLVM_ABI const MDNode * getLocCookieMD() const
For inline asm, get the !srcloc metadata node if we have it, and decode the loc cookie from it.
LLVM_ABI int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
bool isFakeUse() const
bool isVariadic(QueryType Type=IgnoreBundle) const
Return true if this instruction can have a variable number of operands.
LLVM_ABI int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo=nullptr) const
Find the index of the flag word operand that corresponds to operand OpIdx on an inline asm instructio...
LLVM_ABI bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
LLVM_ABI void setMMRAMetadata(MachineFunction &MF, MDNode *MMRAs)
LLVM_ABI const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
LLVM_ABI void moveBefore(MachineInstr *MovePos)
Move the instruction before MovePos.
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
LLVM_ABI bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
LLVM_ABI bool mayFoldInlineAsmRegOp(unsigned OpId) const
Returns true if the register operand can be folded with a load or store into a frame index.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
bool isUnordered() const
Returns true if this memory operation doesn't have any ordering constraints other than normal aliasin...
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
const Value * getValue() const
Return the base address of the memory access.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
LLVM_ABI void substVirtReg(Register Reg, unsigned SubIdx, const TargetRegisterInfo &)
substVirtReg - Substitute the current register with the virtual subregister Reg:SubReg.
static LLVM_ABI void printSubRegIdx(raw_ostream &OS, uint64_t Index, const TargetRegisterInfo *TRI)
Print a subreg index operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
const MDNode * getMetadata() const
void setIsDead(bool Val=true)
void setMetadata(const MDNode *MD)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
bool isMetadata() const
isMetadata - Tests if this is a MO_Metadata operand.
LLVM_ABI void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr) const
Print the MachineOperand to os.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
LLVM_ABI void substPhysReg(MCRegister Reg, const TargetRegisterInfo &)
substPhysReg - Substitute the current register with the physical register Reg, taking any existing Su...
void setIsEarlyClobber(bool Val=true)
void setIsUndef(bool Val=true)
void setIsDebug(bool Val=true)
Register getReg() const
getReg - Returns the register number.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static LLVM_ABI void printSymbol(raw_ostream &OS, MCSymbol &Sym)
Print a MCSymbol as an operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Representation for a specific memory location.
LLVM_ABI void printAsOperand(raw_ostream &OS, const Module *M=nullptr) const
Print as operand.
Manage lifetime of a slot tracker for printing IR.
void incorporateFunction(const Function &F)
Incorporate the given function.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
An or instruction, which can be marked as "disjoint", indicating that the inputs don't have a 1 in th...
Definition InstrTypes.h:404
A udiv, sdiv, lshr, or ashr instruction, which can be marked as "exact", indicating that no bits are ...
Definition Operator.h:154
Instruction that can have a nneg flag (zext/uitofp).
Definition InstrTypes.h:641
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:74
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
SmallBitVector & set()
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static LLVM_ABI unsigned getNextMetaArgIdx(const MachineInstr *MI, unsigned CurIdx)
Get index of next meta operand.
MI-level Statepoint operands.
Definition StackMaps.h:159
LLVM_ABI int getFirstGCPtrIdx()
Get index of first GC pointer operand of -1 if there are none.
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
formatted_raw_ostream & PadToColumn(unsigned NewCol)
PadToColumn - Align the output to some column number.
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
MCInstrDesc const & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
@ UnmodeledSideEffects
std::enable_if_t< detail::IsValidPointer< X, Y >::value, bool > hasa(Y &&MD)
Check whether Metadata has a Value.
Definition Metadata.h:649
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:666
constexpr double e
Definition MathExtras.h:47
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:310
hash_code hash_value(const FixedPointSemantics &Val)
LLVM_ABI formatted_raw_ostream & fdbgs()
fdbgs() - This returns a reference to a formatted_raw_ostream for debug output.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg)
Update a DBG_VALUE whose value has been spilled to FrameIndex.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1714
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
Definition iterator.h:336
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1721
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:198
iterator_range< filter_iterator< detail::IterOfRange< RangeT >, PredicateT > > make_filter_range(RangeT &&Range, PredicateT Pred)
Convenience function that takes a range of elements and a predicate, and return a new filter_iterator...
Definition STLExtras.h:544
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
@ Other
Any other memory.
Definition ModRef.h:68
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1849
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1879
filter_iterator_impl< WrappedIteratorT, PredicateT, detail::fwd_or_bidi_tag< WrappedIteratorT > > filter_iterator
Defines filter_iterator to a suitable specialization of filter_iterator_impl, based on the underlying...
Definition STLExtras.h:531
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:465
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:851
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:760
static LLVM_ABI unsigned getHashValue(const MachineInstr *const &MI)