LLVM 22.0.0git
MachineInstr.cpp
Go to the documentation of this file.
1//===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Methods common to all machine instructions.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/Hashing.h"
16#include "llvm/ADT/STLExtras.h"
38#include "llvm/IR/Constants.h"
40#include "llvm/IR/DebugLoc.h"
41#include "llvm/IR/Function.h"
42#include "llvm/IR/InlineAsm.h"
44#include "llvm/IR/LLVMContext.h"
45#include "llvm/IR/Metadata.h"
46#include "llvm/IR/Module.h"
48#include "llvm/IR/Operator.h"
49#include "llvm/MC/MCInstrDesc.h"
53#include "llvm/Support/Debug.h"
58#include <algorithm>
59#include <cassert>
60#include <cstdint>
61#include <cstring>
62#include <utility>
63
64using namespace llvm;
65
66static cl::opt<bool>
67 PrintMIAddrs("print-mi-addrs", cl::Hidden,
68 cl::desc("Print addresses of MachineInstrs when dumping"));
69
71 if (const MachineBasicBlock *MBB = MI.getParent())
72 if (const MachineFunction *MF = MBB->getParent())
73 return MF;
74 return nullptr;
75}
76
77// Try to crawl up to the machine function and get TRI/MRI/TII from it.
79 const TargetRegisterInfo *&TRI,
81 const TargetInstrInfo *&TII) {
82
83 if (const MachineFunction *MF = getMFIfAvailable(MI)) {
84 TRI = MF->getSubtarget().getRegisterInfo();
85 MRI = &MF->getRegInfo();
86 TII = MF->getSubtarget().getInstrInfo();
87 }
88}
89
91 for (MCPhysReg ImpDef : MCID->implicit_defs())
92 addOperand(MF, MachineOperand::CreateReg(ImpDef, true, true));
93 for (MCPhysReg ImpUse : MCID->implicit_uses())
94 addOperand(MF, MachineOperand::CreateReg(ImpUse, false, true));
95}
96
97/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
98/// implicit operands. It reserves space for the number of operands specified by
99/// the MCInstrDesc.
100MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &TID,
101 DebugLoc DL, bool NoImp)
102 : MCID(&TID), NumOperands(0), Flags(0), AsmPrinterFlags(0),
103 DbgLoc(std::move(DL)), DebugInstrNum(0), Opcode(TID.Opcode) {
104 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
105
106 // Reserve space for the expected number of operands.
107 if (unsigned NumOps = MCID->getNumOperands() + MCID->implicit_defs().size() +
108 MCID->implicit_uses().size()) {
109 CapOperands = OperandCapacity::get(NumOps);
110 Operands = MF.allocateOperandArray(CapOperands);
111 }
112
113 if (!NoImp)
115}
116
117/// MachineInstr ctor - Copies MachineInstr arg exactly.
118/// Does not copy the number from debug instruction numbering, to preserve
119/// uniqueness.
120MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
121 : MCID(&MI.getDesc()), NumOperands(0), Flags(0), AsmPrinterFlags(0),
122 Info(MI.Info), DbgLoc(MI.getDebugLoc()), DebugInstrNum(0),
123 Opcode(MI.getOpcode()) {
124 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
125
126 CapOperands = OperandCapacity::get(MI.getNumOperands());
127 Operands = MF.allocateOperandArray(CapOperands);
128
129 // Copy operands.
130 for (const MachineOperand &MO : MI.operands())
131 addOperand(MF, MO);
132
133 // Replicate ties between the operands, which addOperand was not
134 // able to do reliably.
135 for (unsigned i = 0, e = getNumOperands(); i < e; ++i) {
136 MachineOperand &NewMO = getOperand(i);
137 const MachineOperand &OrigMO = MI.getOperand(i);
138 NewMO.TiedTo = OrigMO.TiedTo;
139 }
140
141 // Copy all the sensible flags.
142 setFlags(MI.Flags);
143}
144
146 if (getParent())
147 getMF()->handleChangeDesc(*this, TID);
148 MCID = &TID;
149 Opcode = TID.Opcode;
150}
151
153 MovePos->getParent()->splice(MovePos, getParent(), getIterator());
154}
155
156/// getRegInfo - If this instruction is embedded into a MachineFunction,
157/// return the MachineRegisterInfo object for the current function, otherwise
158/// return null.
159MachineRegisterInfo *MachineInstr::getRegInfo() {
161 return &MBB->getParent()->getRegInfo();
162 return nullptr;
163}
164
165const MachineRegisterInfo *MachineInstr::getRegInfo() const {
166 if (const MachineBasicBlock *MBB = getParent())
167 return &MBB->getParent()->getRegInfo();
168 return nullptr;
169}
170
171void MachineInstr::removeRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
172 for (MachineOperand &MO : operands())
173 if (MO.isReg())
174 MRI.removeRegOperandFromUseList(&MO);
175}
176
177void MachineInstr::addRegOperandsToUseLists(MachineRegisterInfo &MRI) {
178 for (MachineOperand &MO : operands())
179 if (MO.isReg())
180 MRI.addRegOperandToUseList(&MO);
181}
182
185 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
187 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
188 addOperand(*MF, Op);
189}
190
191/// Move NumOps MachineOperands from Src to Dst, with support for overlapping
192/// ranges. If MRI is non-null also update use-def chains.
194 unsigned NumOps, MachineRegisterInfo *MRI) {
195 if (MRI)
196 return MRI->moveOperands(Dst, Src, NumOps);
197 // MachineOperand is a trivially copyable type so we can just use memmove.
198 assert(Dst && Src && "Unknown operands");
199 std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
200}
201
202/// addOperand - Add the specified operand to the instruction. If it is an
203/// implicit operand, it is added to the end of the operand list. If it is
204/// an explicit operand it is added at the end of the explicit operand list
205/// (before the first implicit operand).
207 assert(isUInt<LLVM_MI_NUMOPERANDS_BITS>(NumOperands + 1) &&
208 "Cannot add more operands.");
209 assert(MCID && "Cannot add operands before providing an instr descriptor");
210
211 // Check if we're adding one of our existing operands.
212 if (&Op >= Operands && &Op < Operands + NumOperands) {
213 // This is unusual: MI->addOperand(MI->getOperand(i)).
214 // If adding Op requires reallocating or moving existing operands around,
215 // the Op reference could go stale. Support it by copying Op.
216 MachineOperand CopyOp(Op);
217 return addOperand(MF, CopyOp);
218 }
219
220 // Find the insert location for the new operand. Implicit registers go at
221 // the end, everything else goes before the implicit regs.
222 //
223 // FIXME: Allow mixed explicit and implicit operands on inline asm.
224 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
225 // implicit-defs, but they must not be moved around. See the FIXME in
226 // InstrEmitter.cpp.
227 unsigned OpNo = getNumOperands();
228 bool isImpReg = Op.isReg() && Op.isImplicit();
229 if (!isImpReg && !isInlineAsm()) {
230 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
231 --OpNo;
232 assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
233 }
234 }
235
236 // OpNo now points as the desired insertion point. Unless this is a variadic
237 // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
238 // RegMask operands go between the explicit and implicit operands.
239 MachineRegisterInfo *MRI = getRegInfo();
240
241 // Determine if the Operands array needs to be reallocated.
242 // Save the old capacity and operand array.
243 OperandCapacity OldCap = CapOperands;
244 MachineOperand *OldOperands = Operands;
245 if (!OldOperands || OldCap.getSize() == getNumOperands()) {
246 CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
247 Operands = MF.allocateOperandArray(CapOperands);
248 // Move the operands before the insertion point.
249 if (OpNo)
250 moveOperands(Operands, OldOperands, OpNo, MRI);
251 }
252
253 // Move the operands following the insertion point.
254 if (OpNo != NumOperands)
255 moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
256 MRI);
257 ++NumOperands;
258
259 // Deallocate the old operand array.
260 if (OldOperands != Operands && OldOperands)
261 MF.deallocateOperandArray(OldCap, OldOperands);
262
263 // Copy Op into place. It still needs to be inserted into the MRI use lists.
264 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
265 NewMO->ParentMI = this;
266
267 // When adding a register operand, tell MRI about it.
268 if (NewMO->isReg()) {
269 // Ensure isOnRegUseList() returns false, regardless of Op's status.
270 NewMO->Contents.Reg.Prev = nullptr;
271 // Ignore existing ties. This is not a property that can be copied.
272 NewMO->TiedTo = 0;
273 // Add the new operand to MRI, but only for instructions in an MBB.
274 if (MRI)
275 MRI->addRegOperandToUseList(NewMO);
276 // The MCID operand information isn't accurate until we start adding
277 // explicit operands. The implicit operands are added first, then the
278 // explicits are inserted before them.
279 if (!isImpReg) {
280 // Tie uses to defs as indicated in MCInstrDesc.
281 if (NewMO->isUse()) {
282 int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
283 if (DefIdx != -1)
284 tieOperands(DefIdx, OpNo);
285 }
286 // If the register operand is flagged as early, mark the operand as such.
287 if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
288 NewMO->setIsEarlyClobber(true);
289 }
290 // Ensure debug instructions set debug flag on register uses.
291 if (NewMO->isUse() && isDebugInstr())
292 NewMO->setIsDebug();
293 }
294}
295
296void MachineInstr::removeOperand(unsigned OpNo) {
297 assert(OpNo < getNumOperands() && "Invalid operand number");
298 untieRegOperand(OpNo);
299
300#ifndef NDEBUG
301 // Moving tied operands would break the ties.
302 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
303 if (Operands[i].isReg())
304 assert(!Operands[i].isTied() && "Cannot move tied operands");
305#endif
306
307 MachineRegisterInfo *MRI = getRegInfo();
308 if (MRI && Operands[OpNo].isReg())
309 MRI->removeRegOperandFromUseList(Operands + OpNo);
310
311 // Don't call the MachineOperand destructor. A lot of this code depends on
312 // MachineOperand having a trivial destructor anyway, and adding a call here
313 // wouldn't make it 'destructor-correct'.
314
315 if (unsigned N = NumOperands - 1 - OpNo)
316 moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
317 --NumOperands;
318}
319
320void MachineInstr::setExtraInfo(MachineFunction &MF,
322 MCSymbol *PreInstrSymbol,
323 MCSymbol *PostInstrSymbol,
324 MDNode *HeapAllocMarker, MDNode *PCSections,
325 uint32_t CFIType, MDNode *MMRAs) {
326 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
327 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
328 bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
329 bool HasPCSections = PCSections != nullptr;
330 bool HasCFIType = CFIType != 0;
331 bool HasMMRAs = MMRAs != nullptr;
332 int NumPointers = MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol +
333 HasHeapAllocMarker + HasPCSections + HasCFIType + HasMMRAs;
334
335 // Drop all extra info if there is none.
336 if (NumPointers <= 0) {
337 Info.clear();
338 return;
339 }
340
341 // If more than one pointer, then store out of line. Store heap alloc markers
342 // out of line because PointerSumType cannot hold more than 4 tag types with
343 // 32-bit pointers.
344 // FIXME: Maybe we should make the symbols in the extra info mutable?
345 else if (NumPointers > 1 || HasMMRAs || HasHeapAllocMarker || HasPCSections ||
346 HasCFIType) {
347 Info.set<EIIK_OutOfLine>(
348 MF.createMIExtraInfo(MMOs, PreInstrSymbol, PostInstrSymbol,
349 HeapAllocMarker, PCSections, CFIType, MMRAs));
350 return;
351 }
352
353 // Otherwise store the single pointer inline.
354 if (HasPreInstrSymbol)
355 Info.set<EIIK_PreInstrSymbol>(PreInstrSymbol);
356 else if (HasPostInstrSymbol)
357 Info.set<EIIK_PostInstrSymbol>(PostInstrSymbol);
358 else
359 Info.set<EIIK_MMO>(MMOs[0]);
360}
361
363 if (memoperands_empty())
364 return;
365
366 setExtraInfo(MF, {}, getPreInstrSymbol(), getPostInstrSymbol(),
369}
370
373 if (MMOs.empty()) {
374 dropMemRefs(MF);
375 return;
376 }
377
378 setExtraInfo(MF, MMOs, getPreInstrSymbol(), getPostInstrSymbol(),
381}
382
384 MachineMemOperand *MO) {
387 MMOs.push_back(MO);
388 setMemRefs(MF, MMOs);
389}
390
392 if (this == &MI)
393 // Nothing to do for a self-clone!
394 return;
395
396 assert(&MF == MI.getMF() &&
397 "Invalid machine functions when cloning memory refrences!");
398 // See if we can just steal the extra info already allocated for the
399 // instruction. We can do this whenever the pre- and post-instruction symbols
400 // are the same (including null).
401 if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
402 getPostInstrSymbol() == MI.getPostInstrSymbol() &&
403 getHeapAllocMarker() == MI.getHeapAllocMarker() &&
404 getPCSections() == MI.getPCSections() && getMMRAMetadata() &&
405 MI.getMMRAMetadata()) {
406 Info = MI.Info;
407 return;
408 }
409
410 // Otherwise, fall back on a copy-based clone.
411 setMemRefs(MF, MI.memoperands());
412}
413
414/// Check to see if the MMOs pointed to by the two MemRefs arrays are
415/// identical.
418 if (LHS.size() != RHS.size())
419 return false;
420
421 auto LHSPointees = make_pointee_range(LHS);
422 auto RHSPointees = make_pointee_range(RHS);
423 return std::equal(LHSPointees.begin(), LHSPointees.end(),
424 RHSPointees.begin());
425}
426
429 // Try handling easy numbers of MIs with simpler mechanisms.
430 if (MIs.empty()) {
431 dropMemRefs(MF);
432 return;
433 }
434 if (MIs.size() == 1) {
435 cloneMemRefs(MF, *MIs[0]);
436 return;
437 }
438 // Because an empty memoperands list provides *no* information and must be
439 // handled conservatively (assuming the instruction can do anything), the only
440 // way to merge with it is to drop all other memoperands.
441 if (MIs[0]->memoperands_empty()) {
442 dropMemRefs(MF);
443 return;
444 }
445
446 // Handle the general case.
448 // Start with the first instruction.
449 assert(&MF == MIs[0]->getMF() &&
450 "Invalid machine functions when cloning memory references!");
451 MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
452 // Now walk all the other instructions and accumulate any different MMOs.
453 for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
454 assert(&MF == MI.getMF() &&
455 "Invalid machine functions when cloning memory references!");
456
457 // Skip MIs with identical operands to the first. This is a somewhat
458 // arbitrary hack but will catch common cases without being quadratic.
459 // TODO: We could fully implement merge semantics here if needed.
460 if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
461 continue;
462
463 // Because an empty memoperands list provides *no* information and must be
464 // handled conservatively (assuming the instruction can do anything), the
465 // only way to merge with it is to drop all other memoperands.
466 if (MI.memoperands_empty()) {
467 dropMemRefs(MF);
468 return;
469 }
470
471 // Otherwise accumulate these into our temporary buffer of the merged state.
472 MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
473 }
474
475 setMemRefs(MF, MergedMMOs);
476}
477
479 // Do nothing if old and new symbols are the same.
480 if (Symbol == getPreInstrSymbol())
481 return;
482
483 // If there was only one symbol and we're removing it, just clear info.
484 if (!Symbol && Info.is<EIIK_PreInstrSymbol>()) {
485 Info.clear();
486 return;
487 }
488
489 setExtraInfo(MF, memoperands(), Symbol, getPostInstrSymbol(),
492}
493
495 // Do nothing if old and new symbols are the same.
496 if (Symbol == getPostInstrSymbol())
497 return;
498
499 // If there was only one symbol and we're removing it, just clear info.
500 if (!Symbol && Info.is<EIIK_PostInstrSymbol>()) {
501 Info.clear();
502 return;
503 }
504
505 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), Symbol,
508}
509
511 // Do nothing if old and new symbols are the same.
512 if (Marker == getHeapAllocMarker())
513 return;
514
515 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
517}
518
520 // Do nothing if old and new symbols are the same.
521 if (PCSections == getPCSections())
522 return;
523
524 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
525 getHeapAllocMarker(), PCSections, getCFIType(),
527}
528
530 // Do nothing if old and new types are the same.
531 if (Type == getCFIType())
532 return;
533
534 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
536}
537
539 // Do nothing if old and new symbols are the same.
540 if (MMRAs == getMMRAMetadata())
541 return;
542
543 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
545}
546
548 const MachineInstr &MI) {
549 if (this == &MI)
550 // Nothing to do for a self-clone!
551 return;
552
553 assert(&MF == MI.getMF() &&
554 "Invalid machine functions when cloning instruction symbols!");
555
556 setPreInstrSymbol(MF, MI.getPreInstrSymbol());
557 setPostInstrSymbol(MF, MI.getPostInstrSymbol());
558 setHeapAllocMarker(MF, MI.getHeapAllocMarker());
559 setPCSections(MF, MI.getPCSections());
560 setMMRAMetadata(MF, MI.getMMRAMetadata());
561}
562
564 // For now, the just return the union of the flags. If the flags get more
565 // complicated over time, we might need more logic here.
566 return getFlags() | Other.getFlags();
567}
568
570 uint32_t MIFlags = 0;
571 // Copy the wrapping flags.
572 if (const OverflowingBinaryOperator *OB =
573 dyn_cast<OverflowingBinaryOperator>(&I)) {
574 if (OB->hasNoSignedWrap())
576 if (OB->hasNoUnsignedWrap())
578 } else if (const TruncInst *TI = dyn_cast<TruncInst>(&I)) {
579 if (TI->hasNoSignedWrap())
581 if (TI->hasNoUnsignedWrap())
583 } else if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
584 if (GEP->hasNoUnsignedSignedWrap())
586 if (GEP->hasNoUnsignedWrap())
588 if (GEP->isInBounds())
590 }
591
592 // Copy the nonneg flag.
593 if (const PossiblyNonNegInst *PNI = dyn_cast<PossiblyNonNegInst>(&I)) {
594 if (PNI->hasNonNeg())
596 // Copy the disjoint flag.
597 } else if (const PossiblyDisjointInst *PD =
598 dyn_cast<PossiblyDisjointInst>(&I)) {
599 if (PD->isDisjoint())
601 }
602
603 // Copy the samesign flag.
604 if (const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I))
605 if (ICmp->hasSameSign())
607
608 // Copy the exact flag.
609 if (const PossiblyExactOperator *PE = dyn_cast<PossiblyExactOperator>(&I))
610 if (PE->isExact())
612
613 // Copy the fast-math flags.
614 if (const FPMathOperator *FP = dyn_cast<FPMathOperator>(&I)) {
615 const FastMathFlags Flags = FP->getFastMathFlags();
616 if (Flags.noNaNs())
618 if (Flags.noInfs())
620 if (Flags.noSignedZeros())
622 if (Flags.allowReciprocal())
624 if (Flags.allowContract())
626 if (Flags.approxFunc())
628 if (Flags.allowReassoc())
630 }
631
632 if (I.getMetadata(LLVMContext::MD_unpredictable))
634
635 return MIFlags;
636}
637
640}
641
642bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
643 assert(!isBundledWithPred() && "Must be called on bundle header");
645 if (MII->getDesc().getFlags() & Mask) {
646 if (Type == AnyInBundle)
647 return true;
648 } else {
649 if (Type == AllInBundle && !MII->isBundle())
650 return false;
651 }
652 // This was the last instruction in the bundle.
653 if (!MII->isBundledWithSucc())
654 return Type == AllInBundle;
655 }
656}
657
659 MICheckType Check) const {
660 // If opcodes or number of operands are not the same then the two
661 // instructions are obviously not identical.
662 if (Other.getOpcode() != getOpcode() ||
663 Other.getNumOperands() != getNumOperands())
664 return false;
665
666 if (isBundle()) {
667 // We have passed the test above that both instructions have the same
668 // opcode, so we know that both instructions are bundles here. Let's compare
669 // MIs inside the bundle.
670 assert(Other.isBundle() && "Expected that both instructions are bundles.");
673 // Loop until we analysed the last intruction inside at least one of the
674 // bundles.
675 while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
676 ++I1;
677 ++I2;
678 if (!I1->isIdenticalTo(*I2, Check))
679 return false;
680 }
681 // If we've reached the end of just one of the two bundles, but not both,
682 // the instructions are not identical.
683 if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
684 return false;
685 }
686
687 // Check operands to make sure they match.
688 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
689 const MachineOperand &MO = getOperand(i);
690 const MachineOperand &OMO = Other.getOperand(i);
691 if (!MO.isReg()) {
692 if (!MO.isIdenticalTo(OMO))
693 return false;
694 continue;
695 }
696
697 // Clients may or may not want to ignore defs when testing for equality.
698 // For example, machine CSE pass only cares about finding common
699 // subexpressions, so it's safe to ignore virtual register defs.
700 if (MO.isDef()) {
701 if (Check == IgnoreDefs)
702 continue;
703 else if (Check == IgnoreVRegDefs) {
704 if (!MO.getReg().isVirtual() || !OMO.getReg().isVirtual())
705 if (!MO.isIdenticalTo(OMO))
706 return false;
707 } else {
708 if (!MO.isIdenticalTo(OMO))
709 return false;
710 if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
711 return false;
712 }
713 } else {
714 if (!MO.isIdenticalTo(OMO))
715 return false;
716 if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
717 return false;
718 }
719 }
720 // If DebugLoc does not match then two debug instructions are not identical.
721 if (isDebugInstr())
722 if (getDebugLoc() && Other.getDebugLoc() &&
723 getDebugLoc() != Other.getDebugLoc())
724 return false;
725 // If pre- or post-instruction symbols do not match then the two instructions
726 // are not identical.
727 if (getPreInstrSymbol() != Other.getPreInstrSymbol() ||
728 getPostInstrSymbol() != Other.getPostInstrSymbol())
729 return false;
730 // Call instructions with different CFI types are not identical.
731 if (isCall() && getCFIType() != Other.getCFIType())
732 return false;
733
734 return true;
735}
736
738 if (!isDebugValueLike() || !Other.isDebugValueLike())
739 return false;
740 if (getDebugLoc() != Other.getDebugLoc())
741 return false;
742 if (getDebugVariable() != Other.getDebugVariable())
743 return false;
744 if (getNumDebugOperands() != Other.getNumDebugOperands())
745 return false;
746 for (unsigned OpIdx = 0; OpIdx < getNumDebugOperands(); ++OpIdx)
747 if (!getDebugOperand(OpIdx).isIdenticalTo(Other.getDebugOperand(OpIdx)))
748 return false;
751 Other.getDebugExpression(), Other.isIndirectDebugValue()))
752 return false;
753 return true;
754}
755
757 return getParent()->getParent();
758}
759
761 assert(getParent() && "Not embedded in a basic block!");
762 return getParent()->remove(this);
763}
764
766 assert(getParent() && "Not embedded in a basic block!");
767 return getParent()->remove_instr(this);
768}
769
771 assert(getParent() && "Not embedded in a basic block!");
772 getParent()->erase(this);
773}
774
776 assert(getParent() && "Not embedded in a basic block!");
777 getParent()->erase_instr(this);
778}
779
781 if (!isCall(Type))
782 return false;
783 switch (getOpcode()) {
784 case TargetOpcode::PATCHPOINT:
785 case TargetOpcode::STACKMAP:
786 case TargetOpcode::STATEPOINT:
787 case TargetOpcode::FENTRY_CALL:
788 return false;
789 }
790 return true;
791}
792
794 if (isBundle())
797}
798
799template <typename Operand, typename Instruction>
800static iterator_range<
801 filter_iterator<Operand *, std::function<bool(Operand &Op)>>>
803 std::function<bool(Operand & Op)> OpUsesReg(
804 [Reg](Operand &Op) { return Op.isReg() && Op.getReg() == Reg; });
805 return make_filter_range(MI->debug_operands(), OpUsesReg);
806}
807
809 std::function<bool(const MachineOperand &Op)>>>
811 return getDebugOperandsForRegHelper<const MachineOperand, const MachineInstr>(
812 this, Reg);
813}
814
818 return getDebugOperandsForRegHelper<MachineOperand, MachineInstr>(this, Reg);
819}
820
822 unsigned NumOperands = MCID->getNumOperands();
823 if (!MCID->isVariadic())
824 return NumOperands;
825
826 for (const MachineOperand &MO : operands_impl().drop_front(NumOperands)) {
827 // The operands must always be in the following order:
828 // - explicit reg defs,
829 // - other explicit operands (reg uses, immediates, etc.),
830 // - implicit reg defs
831 // - implicit reg uses
832 if (MO.isReg() && MO.isImplicit())
833 break;
834 ++NumOperands;
835 }
836 return NumOperands;
837}
838
840 unsigned NumDefs = MCID->getNumDefs();
841 if (!MCID->isVariadic())
842 return NumDefs;
843
844 for (const MachineOperand &MO : operands_impl().drop_front(NumDefs)) {
845 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
846 break;
847 ++NumDefs;
848 }
849 return NumDefs;
850}
851
853 assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
856 --Pred;
857 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
858 Pred->setFlag(BundledSucc);
859}
860
862 assert(!isBundledWithSucc() && "MI is already bundled with its successor");
865 ++Succ;
866 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
867 Succ->setFlag(BundledPred);
868}
869
871 assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
874 --Pred;
875 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
876 Pred->clearFlag(BundledSucc);
877}
878
880 assert(isBundledWithSucc() && "MI isn't bundled with its successor");
883 ++Succ;
884 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
885 Succ->clearFlag(BundledPred);
886}
887
889 if (isInlineAsm()) {
890 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
891 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
892 return true;
893 }
894 return false;
895}
896
898 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
899 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
900 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
901}
902
904 unsigned *GroupNo) const {
905 assert(isInlineAsm() && "Expected an inline asm instruction");
906 assert(OpIdx < getNumOperands() && "OpIdx out of range");
907
908 // Ignore queries about the initial operands.
910 return -1;
911
912 unsigned Group = 0;
913 unsigned NumOps;
914 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
915 i += NumOps) {
916 const MachineOperand &FlagMO = getOperand(i);
917 // If we reach the implicit register operands, stop looking.
918 if (!FlagMO.isImm())
919 return -1;
920 const InlineAsm::Flag F(FlagMO.getImm());
921 NumOps = 1 + F.getNumOperandRegisters();
922 if (i + NumOps > OpIdx) {
923 if (GroupNo)
924 *GroupNo = Group;
925 return i;
926 }
927 ++Group;
928 }
929 return -1;
930}
931
933 assert(isDebugLabel() && "not a DBG_LABEL");
934 return cast<DILabel>(getOperand(0).getMetadata());
935}
936
938 assert((isDebugValueLike()) && "not a DBG_VALUE*");
939 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
940 return getOperand(VariableOp);
941}
942
944 assert((isDebugValueLike()) && "not a DBG_VALUE*");
945 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
946 return getOperand(VariableOp);
947}
948
950 return cast<DILocalVariable>(getDebugVariableOp().getMetadata());
951}
952
954 assert((isDebugValueLike()) && "not a DBG_VALUE*");
955 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
956 return getOperand(ExpressionOp);
957}
958
960 assert((isDebugValueLike()) && "not a DBG_VALUE*");
961 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
962 return getOperand(ExpressionOp);
963}
964
966 return cast<DIExpression>(getDebugExpressionOp().getMetadata());
967}
968
971}
972
975 const TargetInstrInfo *TII,
976 const TargetRegisterInfo *TRI) const {
977 assert(getParent() && "Can't have an MBB reference here!");
978 assert(getMF() && "Can't have an MF reference here!");
979 const MachineFunction &MF = *getMF();
980
981 // Most opcodes have fixed constraints in their MCInstrDesc.
982 if (!isInlineAsm())
983 return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
984
985 if (!getOperand(OpIdx).isReg())
986 return nullptr;
987
988 // For tied uses on inline asm, get the constraint from the def.
989 unsigned DefIdx;
990 if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
991 OpIdx = DefIdx;
992
993 // Inline asm stores register class constraints in the flag word.
994 int FlagIdx = findInlineAsmFlagIdx(OpIdx);
995 if (FlagIdx < 0)
996 return nullptr;
997
998 const InlineAsm::Flag F(getOperand(FlagIdx).getImm());
999 unsigned RCID;
1000 if ((F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind()) &&
1001 F.hasRegClassConstraint(RCID))
1002 return TRI->getRegClass(RCID);
1003
1004 // Assume that all registers in a memory operand are pointers.
1005 if (F.isMemKind())
1006 return TRI->getPointerRegClass(MF);
1007
1008 return nullptr;
1009}
1010
1012 Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
1013 const TargetRegisterInfo *TRI, bool ExploreBundle) const {
1014 // Check every operands inside the bundle if we have
1015 // been asked to.
1016 if (ExploreBundle)
1017 for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
1018 ++OpndIt)
1019 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
1020 OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
1021 else
1022 // Otherwise, just check the current operands.
1023 for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
1024 CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
1025 return CurRC;
1026}
1027
1028const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
1029 unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
1030 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1031 assert(CurRC && "Invalid initial register class");
1032 // Check if Reg is constrained by some of its use/def from MI.
1033 const MachineOperand &MO = getOperand(OpIdx);
1034 if (!MO.isReg() || MO.getReg() != Reg)
1035 return CurRC;
1036 // If yes, accumulate the constraints through the operand.
1037 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
1038}
1039
1041 unsigned OpIdx, const TargetRegisterClass *CurRC,
1042 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1044 const MachineOperand &MO = getOperand(OpIdx);
1045 assert(MO.isReg() &&
1046 "Cannot get register constraints for non-register operand");
1047 assert(CurRC && "Invalid initial register class");
1048 if (unsigned SubIdx = MO.getSubReg()) {
1049 if (OpRC)
1050 CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
1051 else
1052 CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
1053 } else if (OpRC)
1054 CurRC = TRI->getCommonSubClass(CurRC, OpRC);
1055 return CurRC;
1056}
1057
1058/// Return the number of instructions inside the MI bundle, not counting the
1059/// header instruction.
1062 unsigned Size = 0;
1063 while (I->isBundledWithSucc()) {
1064 ++Size;
1065 ++I;
1066 }
1067 return Size;
1068}
1069
1070/// Returns true if the MachineInstr has an implicit-use operand of exactly
1071/// the given register (not considering sub/super-registers).
1073 for (const MachineOperand &MO : implicit_operands()) {
1074 if (MO.isReg() && MO.isUse() && MO.getReg() == Reg)
1075 return true;
1076 }
1077 return false;
1078}
1079
1080/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
1081/// the specific register or -1 if it is not found. It further tightens
1082/// the search criteria to a use that kills the register if isKill is true.
1084 const TargetRegisterInfo *TRI,
1085 bool isKill) const {
1086 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1087 const MachineOperand &MO = getOperand(i);
1088 if (!MO.isReg() || !MO.isUse())
1089 continue;
1090 Register MOReg = MO.getReg();
1091 if (!MOReg)
1092 continue;
1093 if (MOReg == Reg || (TRI && Reg && MOReg && TRI->regsOverlap(MOReg, Reg)))
1094 if (!isKill || MO.isKill())
1095 return i;
1096 }
1097 return -1;
1098}
1099
1100/// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
1101/// indicating if this instruction reads or writes Reg. This also considers
1102/// partial defines.
1103std::pair<bool,bool>
1105 SmallVectorImpl<unsigned> *Ops) const {
1106 bool PartDef = false; // Partial redefine.
1107 bool FullDef = false; // Full define.
1108 bool Use = false;
1109
1110 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1111 const MachineOperand &MO = getOperand(i);
1112 if (!MO.isReg() || MO.getReg() != Reg)
1113 continue;
1114 if (Ops)
1115 Ops->push_back(i);
1116 if (MO.isUse())
1117 Use |= !MO.isUndef();
1118 else if (MO.getSubReg() && !MO.isUndef())
1119 // A partial def undef doesn't count as reading the register.
1120 PartDef = true;
1121 else
1122 FullDef = true;
1123 }
1124 // A partial redefine uses Reg unless there is also a full define.
1125 return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
1126}
1127
1128/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
1129/// the specified register or -1 if it is not found. If isDead is true, defs
1130/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
1131/// also checks if there is a def of a super-register.
1133 const TargetRegisterInfo *TRI,
1134 bool isDead, bool Overlap) const {
1135 bool isPhys = Reg.isPhysical();
1136 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1137 const MachineOperand &MO = getOperand(i);
1138 // Accept regmask operands when Overlap is set.
1139 // Ignore them when looking for a specific def operand (Overlap == false).
1140 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
1141 return i;
1142 if (!MO.isReg() || !MO.isDef())
1143 continue;
1144 Register MOReg = MO.getReg();
1145 bool Found = (MOReg == Reg);
1146 if (!Found && TRI && isPhys && MOReg.isPhysical()) {
1147 if (Overlap)
1148 Found = TRI->regsOverlap(MOReg, Reg);
1149 else
1150 Found = TRI->isSubRegister(MOReg, Reg);
1151 }
1152 if (Found && (!isDead || MO.isDead()))
1153 return i;
1154 }
1155 return -1;
1156}
1157
1158/// findFirstPredOperandIdx() - Find the index of the first operand in the
1159/// operand list that is used to represent the predicate. It returns -1 if
1160/// none is found.
1162 // Don't call MCID.findFirstPredOperandIdx() because this variant
1163 // is sometimes called on an instruction that's not yet complete, and
1164 // so the number of operands is less than the MCID indicates. In
1165 // particular, the PTX target does this.
1166 const MCInstrDesc &MCID = getDesc();
1167 if (MCID.isPredicable()) {
1168 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1169 if (MCID.operands()[i].isPredicate())
1170 return i;
1171 }
1172
1173 return -1;
1174}
1175
1176// MachineOperand::TiedTo is 4 bits wide.
1177const unsigned TiedMax = 15;
1178
1179/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
1180///
1181/// Use and def operands can be tied together, indicated by a non-zero TiedTo
1182/// field. TiedTo can have these values:
1183///
1184/// 0: Operand is not tied to anything.
1185/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
1186/// TiedMax: Tied to an operand >= TiedMax-1.
1187///
1188/// The tied def must be one of the first TiedMax operands on a normal
1189/// instruction. INLINEASM instructions allow more tied defs.
1190///
1191void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1192 MachineOperand &DefMO = getOperand(DefIdx);
1193 MachineOperand &UseMO = getOperand(UseIdx);
1194 assert(DefMO.isDef() && "DefIdx must be a def operand");
1195 assert(UseMO.isUse() && "UseIdx must be a use operand");
1196 assert(!DefMO.isTied() && "Def is already tied to another use");
1197 assert(!UseMO.isTied() && "Use is already tied to another def");
1198
1199 if (DefIdx < TiedMax) {
1200 UseMO.TiedTo = DefIdx + 1;
1201 } else {
1202 // Inline asm can use the group descriptors to find tied operands,
1203 // statepoint tied operands are trivial to match (1-1 reg def with reg use),
1204 // but on normal instruction, the tied def must be within the first TiedMax
1205 // operands.
1206 assert((isInlineAsm() || getOpcode() == TargetOpcode::STATEPOINT) &&
1207 "DefIdx out of range");
1208 UseMO.TiedTo = TiedMax;
1209 }
1210
1211 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1212 DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
1213}
1214
1215/// Given the index of a tied register operand, find the operand it is tied to.
1216/// Defs are tied to uses and vice versa. Returns the index of the tied operand
1217/// which must exist.
1218unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1219 const MachineOperand &MO = getOperand(OpIdx);
1220 assert(MO.isTied() && "Operand isn't tied");
1221
1222 // Normally TiedTo is in range.
1223 if (MO.TiedTo < TiedMax)
1224 return MO.TiedTo - 1;
1225
1226 // Uses on normal instructions can be out of range.
1227 if (!isInlineAsm() && getOpcode() != TargetOpcode::STATEPOINT) {
1228 // Normal tied defs must be in the 0..TiedMax-1 range.
1229 if (MO.isUse())
1230 return TiedMax - 1;
1231 // MO is a def. Search for the tied use.
1232 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1233 const MachineOperand &UseMO = getOperand(i);
1234 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1235 return i;
1236 }
1237 llvm_unreachable("Can't find tied use");
1238 }
1239
1240 if (getOpcode() == TargetOpcode::STATEPOINT) {
1241 // In STATEPOINT defs correspond 1-1 to GC pointer operands passed
1242 // on registers.
1243 StatepointOpers SO(this);
1244 unsigned CurUseIdx = SO.getFirstGCPtrIdx();
1245 assert(CurUseIdx != -1U && "only gc pointer statepoint operands can be tied");
1246 unsigned NumDefs = getNumDefs();
1247 for (unsigned CurDefIdx = 0; CurDefIdx < NumDefs; ++CurDefIdx) {
1248 while (!getOperand(CurUseIdx).isReg())
1249 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1250 if (OpIdx == CurDefIdx)
1251 return CurUseIdx;
1252 if (OpIdx == CurUseIdx)
1253 return CurDefIdx;
1254 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1255 }
1256 llvm_unreachable("Can't find tied use");
1257 }
1258
1259 // Now deal with inline asm by parsing the operand group descriptor flags.
1260 // Find the beginning of each operand group.
1261 SmallVector<unsigned, 8> GroupIdx;
1262 unsigned OpIdxGroup = ~0u;
1263 unsigned NumOps;
1264 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1265 i += NumOps) {
1266 const MachineOperand &FlagMO = getOperand(i);
1267 assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1268 unsigned CurGroup = GroupIdx.size();
1269 GroupIdx.push_back(i);
1270 const InlineAsm::Flag F(FlagMO.getImm());
1271 NumOps = 1 + F.getNumOperandRegisters();
1272 // OpIdx belongs to this operand group.
1273 if (OpIdx > i && OpIdx < i + NumOps)
1274 OpIdxGroup = CurGroup;
1275 unsigned TiedGroup;
1276 if (!F.isUseOperandTiedToDef(TiedGroup))
1277 continue;
1278 // Operands in this group are tied to operands in TiedGroup which must be
1279 // earlier. Find the number of operands between the two groups.
1280 unsigned Delta = i - GroupIdx[TiedGroup];
1281
1282 // OpIdx is a use tied to TiedGroup.
1283 if (OpIdxGroup == CurGroup)
1284 return OpIdx - Delta;
1285
1286 // OpIdx is a def tied to this use group.
1287 if (OpIdxGroup == TiedGroup)
1288 return OpIdx + Delta;
1289 }
1290 llvm_unreachable("Invalid tied operand on inline asm");
1291}
1292
1293/// clearKillInfo - Clears kill flags on all operands.
1294///
1296 for (MachineOperand &MO : operands()) {
1297 if (MO.isReg() && MO.isUse())
1298 MO.setIsKill(false);
1299 }
1300}
1301
1303 unsigned SubIdx,
1304 const TargetRegisterInfo &RegInfo) {
1305 if (ToReg.isPhysical()) {
1306 if (SubIdx)
1307 ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1308 for (MachineOperand &MO : operands()) {
1309 if (!MO.isReg() || MO.getReg() != FromReg)
1310 continue;
1311 MO.substPhysReg(ToReg, RegInfo);
1312 }
1313 } else {
1314 for (MachineOperand &MO : operands()) {
1315 if (!MO.isReg() || MO.getReg() != FromReg)
1316 continue;
1317 MO.substVirtReg(ToReg, SubIdx, RegInfo);
1318 }
1319 }
1320}
1321
1322/// isSafeToMove - Return true if it is safe to move this instruction. If
1323/// SawStore is set to true, it means that there is a store (or call) between
1324/// the instruction's location and its intended destination.
1325bool MachineInstr::isSafeToMove(bool &SawStore) const {
1326 // Ignore stuff that we obviously can't move.
1327 //
1328 // Treat volatile loads as stores. This is not strictly necessary for
1329 // volatiles, but it is required for atomic loads. It is not allowed to move
1330 // a load across an atomic load with Ordering > Monotonic.
1331 if (mayStore() || isCall() || isPHI() ||
1332 (mayLoad() && hasOrderedMemoryRef())) {
1333 SawStore = true;
1334 return false;
1335 }
1336
1337 // Don't touch instructions that have non-trivial invariants. For example,
1338 // terminators have to be at the end of a basic block.
1339 if (isPosition() || isDebugInstr() || isTerminator() ||
1341 return false;
1342
1343 // Don't touch instructions which can have non-load/store effects.
1344 //
1345 // Inline asm has a "sideeffect" marker to indicate whether the asm has
1346 // intentional side-effects. Even if an inline asm is not "sideeffect",
1347 // though, it still can't be speculatively executed: the operation might
1348 // not be valid on the current target, or for some combinations of operands.
1349 // (Some transforms that move an instruction don't speculatively execute it;
1350 // we currently don't try to handle that distinction here.)
1351 //
1352 // Other instructions handled here include those that can raise FP
1353 // exceptions, x86 "DIV" instructions which trap on divide by zero, and
1354 // stack adjustments.
1356 isInlineAsm())
1357 return false;
1358
1359 // See if this instruction does a load. If so, we have to guarantee that the
1360 // loaded value doesn't change between the load and the its intended
1361 // destination. The check for isInvariantLoad gives the target the chance to
1362 // classify the load as always returning a constant, e.g. a constant pool
1363 // load.
1365 // Otherwise, this is a real load. If there is a store between the load and
1366 // end of block, we can't move it.
1367 return !SawStore;
1368
1369 return true;
1370}
1371
1373 // Don't delete frame allocation labels.
1374 // FIXME: Why is LOCAL_ESCAPE not considered in MachineInstr::isLabel?
1375 if (getOpcode() == TargetOpcode::LOCAL_ESCAPE)
1376 return false;
1377
1378 // Don't delete FAKE_USE.
1379 // FIXME: Why is FAKE_USE not considered in MachineInstr::isPosition?
1380 if (isFakeUse())
1381 return false;
1382
1383 // LIFETIME markers should be preserved.
1384 // FIXME: Why are LIFETIME markers not considered in MachineInstr::isPosition?
1385 if (isLifetimeMarker())
1386 return false;
1387
1388 // If we can move an instruction, we can remove it. Otherwise, it has
1389 // a side-effect of some sort.
1390 bool SawStore = false;
1391 return isPHI() || isSafeToMove(SawStore);
1392}
1393
1395 LiveRegUnits *LivePhysRegs) const {
1396 // Instructions without side-effects are dead iff they only define dead regs.
1397 // This function is hot and this loop returns early in the common case,
1398 // so only perform additional checks before this if absolutely necessary.
1399 for (const MachineOperand &MO : all_defs()) {
1400 Register Reg = MO.getReg();
1401 if (Reg.isPhysical()) {
1402 // Don't delete live physreg defs, or any reserved register defs.
1403 if (!LivePhysRegs || !LivePhysRegs->available(Reg) || MRI.isReserved(Reg))
1404 return false;
1405 } else {
1406 if (MO.isDead())
1407 continue;
1408 for (const MachineInstr &Use : MRI.use_nodbg_instructions(Reg)) {
1409 if (&Use != this)
1410 // This def has a non-debug use. Don't delete the instruction!
1411 return false;
1412 }
1413 }
1414 }
1415
1416 // Technically speaking inline asm without side effects and no defs can still
1417 // be deleted. But there is so much bad inline asm code out there, we should
1418 // let them be.
1419 if (isInlineAsm())
1420 return false;
1421
1422 // FIXME: See issue #105950 for why LIFETIME markers are considered dead here.
1423 if (isLifetimeMarker())
1424 return true;
1425
1426 // If there are no defs with uses, then we call the instruction dead so long
1427 // as we do not suspect it may have sideeffects.
1428 return wouldBeTriviallyDead();
1429}
1430
1432 BatchAAResults *AA, bool UseTBAA,
1433 const MachineMemOperand *MMOa,
1434 const MachineMemOperand *MMOb) {
1435 // The following interface to AA is fashioned after DAGCombiner::isAlias and
1436 // operates with MachineMemOperand offset with some important assumptions:
1437 // - LLVM fundamentally assumes flat address spaces.
1438 // - MachineOperand offset can *only* result from legalization and cannot
1439 // affect queries other than the trivial case of overlap checking.
1440 // - These offsets never wrap and never step outside of allocated objects.
1441 // - There should never be any negative offsets here.
1442 //
1443 // FIXME: Modify API to hide this math from "user"
1444 // Even before we go to AA we can reason locally about some memory objects. It
1445 // can save compile time, and possibly catch some corner cases not currently
1446 // covered.
1447
1448 int64_t OffsetA = MMOa->getOffset();
1449 int64_t OffsetB = MMOb->getOffset();
1450 int64_t MinOffset = std::min(OffsetA, OffsetB);
1451
1452 LocationSize WidthA = MMOa->getSize();
1453 LocationSize WidthB = MMOb->getSize();
1454 bool KnownWidthA = WidthA.hasValue();
1455 bool KnownWidthB = WidthB.hasValue();
1456 bool BothMMONonScalable = !WidthA.isScalable() && !WidthB.isScalable();
1457
1458 const Value *ValA = MMOa->getValue();
1459 const Value *ValB = MMOb->getValue();
1460 bool SameVal = (ValA && ValB && (ValA == ValB));
1461 if (!SameVal) {
1462 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1463 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1464 if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1465 return false;
1466 if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1467 return false;
1468 if (PSVa && PSVb && (PSVa == PSVb))
1469 SameVal = true;
1470 }
1471
1472 if (SameVal && BothMMONonScalable) {
1473 if (!KnownWidthA || !KnownWidthB)
1474 return true;
1475 int64_t MaxOffset = std::max(OffsetA, OffsetB);
1476 int64_t LowWidth = (MinOffset == OffsetA)
1477 ? WidthA.getValue().getKnownMinValue()
1478 : WidthB.getValue().getKnownMinValue();
1479 return (MinOffset + LowWidth > MaxOffset);
1480 }
1481
1482 if (!AA)
1483 return true;
1484
1485 if (!ValA || !ValB)
1486 return true;
1487
1488 assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1489 assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1490
1491 // If Scalable Location Size has non-zero offset, Width + Offset does not work
1492 // at the moment
1493 if ((WidthA.isScalable() && OffsetA > 0) ||
1494 (WidthB.isScalable() && OffsetB > 0))
1495 return true;
1496
1497 int64_t OverlapA =
1498 KnownWidthA ? WidthA.getValue().getKnownMinValue() + OffsetA - MinOffset
1500 int64_t OverlapB =
1501 KnownWidthB ? WidthB.getValue().getKnownMinValue() + OffsetB - MinOffset
1503
1504 LocationSize LocA = (WidthA.isScalable() || !KnownWidthA)
1505 ? WidthA
1506 : LocationSize::precise(OverlapA);
1507 LocationSize LocB = (WidthB.isScalable() || !KnownWidthB)
1508 ? WidthB
1509 : LocationSize::precise(OverlapB);
1510
1511 return !AA->isNoAlias(
1512 MemoryLocation(ValA, LocA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1513 MemoryLocation(ValB, LocB, UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1514}
1515
1517 bool UseTBAA) const {
1518 const MachineFunction *MF = getMF();
1520 const MachineFrameInfo &MFI = MF->getFrameInfo();
1521
1522 // Exclude call instruction which may alter the memory but can not be handled
1523 // by this function.
1524 if (isCall() || Other.isCall())
1525 return true;
1526
1527 // If neither instruction stores to memory, they can't alias in any
1528 // meaningful way, even if they read from the same address.
1529 if (!mayStore() && !Other.mayStore())
1530 return false;
1531
1532 // Both instructions must be memory operations to be able to alias.
1533 if (!mayLoadOrStore() || !Other.mayLoadOrStore())
1534 return false;
1535
1536 // Let the target decide if memory accesses cannot possibly overlap.
1538 return false;
1539
1540 // Memory operations without memory operands may access anything. Be
1541 // conservative and assume `MayAlias`.
1542 if (memoperands_empty() || Other.memoperands_empty())
1543 return true;
1544
1545 // Skip if there are too many memory operands.
1546 auto NumChecks = getNumMemOperands() * Other.getNumMemOperands();
1547 if (NumChecks > TII->getMemOperandAACheckLimit())
1548 return true;
1549
1550 // Check each pair of memory operands from both instructions, which can't
1551 // alias only if all pairs won't alias.
1552 for (auto *MMOa : memoperands())
1553 for (auto *MMOb : Other.memoperands())
1554 if (MemOperandsHaveAlias(MFI, AA, UseTBAA, MMOa, MMOb))
1555 return true;
1556
1557 return false;
1558}
1559
1561 bool UseTBAA) const {
1562 if (AA) {
1563 BatchAAResults BAA(*AA);
1564 return mayAlias(&BAA, Other, UseTBAA);
1565 }
1566 return mayAlias(static_cast<BatchAAResults *>(nullptr), Other, UseTBAA);
1567}
1568
1569/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1570/// or volatile memory reference, or if the information describing the memory
1571/// reference is not available. Return false if it is known to have no ordered
1572/// memory references.
1574 // An instruction known never to access memory won't have a volatile access.
1575 if (!mayStore() &&
1576 !mayLoad() &&
1577 !isCall() &&
1579 return false;
1580
1581 // Otherwise, if the instruction has no memory reference information,
1582 // conservatively assume it wasn't preserved.
1583 if (memoperands_empty())
1584 return true;
1585
1586 // Check if any of our memory operands are ordered.
1587 return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1588 return !MMO->isUnordered();
1589 });
1590}
1591
1592/// isDereferenceableInvariantLoad - Return true if this instruction will never
1593/// trap and is loading from a location whose value is invariant across a run of
1594/// this function.
1596 // If the instruction doesn't load at all, it isn't an invariant load.
1597 if (!mayLoad())
1598 return false;
1599
1600 // If the instruction has lost its memoperands, conservatively assume that
1601 // it may not be an invariant load.
1602 if (memoperands_empty())
1603 return false;
1604
1605 const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1606
1607 for (MachineMemOperand *MMO : memoperands()) {
1608 if (!MMO->isUnordered())
1609 // If the memory operand has ordering side effects, we can't move the
1610 // instruction. Such an instruction is technically an invariant load,
1611 // but the caller code would need updated to expect that.
1612 return false;
1613 if (MMO->isStore()) return false;
1614 if (MMO->isInvariant() && MMO->isDereferenceable())
1615 continue;
1616
1617 // A load from a constant PseudoSourceValue is invariant.
1618 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) {
1619 if (PSV->isConstant(&MFI))
1620 continue;
1621 }
1622
1623 // Otherwise assume conservatively.
1624 return false;
1625 }
1626
1627 // Everything checks out.
1628 return true;
1629}
1630
1632 if (!isPHI())
1633 return {};
1634 assert(getNumOperands() >= 3 &&
1635 "It's illegal to have a PHI without source operands");
1636
1637 Register Reg = getOperand(1).getReg();
1638 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1639 if (getOperand(i).getReg() != Reg)
1640 return {};
1641 return Reg;
1642}
1643
1646 return true;
1647 if (isInlineAsm()) {
1648 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1649 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1650 return true;
1651 }
1652
1653 return false;
1654}
1655
1657 return mayStore() || isCall() ||
1659}
1660
1661/// allDefsAreDead - Return true if all the defs of this instruction are dead.
1662///
1664 for (const MachineOperand &MO : operands()) {
1665 if (!MO.isReg() || MO.isUse())
1666 continue;
1667 if (!MO.isDead())
1668 return false;
1669 }
1670 return true;
1671}
1672
1674 for (const MachineOperand &MO : implicit_operands()) {
1675 if (!MO.isReg() || MO.isUse())
1676 continue;
1677 if (!MO.isDead())
1678 return false;
1679 }
1680 return true;
1681}
1682
1683/// copyImplicitOps - Copy implicit register operands from specified
1684/// instruction to this instruction.
1686 const MachineInstr &MI) {
1687 for (const MachineOperand &MO :
1688 llvm::drop_begin(MI.operands(), MI.getDesc().getNumOperands()))
1689 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1690 addOperand(MF, MO);
1691}
1692
1694 const MCInstrDesc &MCID = getDesc();
1695 if (MCID.Opcode == TargetOpcode::STATEPOINT)
1696 return true;
1697 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1698 const auto &Operand = getOperand(I);
1699 if (!Operand.isReg() || Operand.isDef())
1700 // Ignore the defined registers as MCID marks only the uses as tied.
1701 continue;
1702 int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1703 int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1704 if (ExpectedTiedIdx != TiedIdx)
1705 return true;
1706 }
1707 return false;
1708}
1709
1711 const MachineRegisterInfo &MRI) const {
1713 if (!Op.isReg())
1714 return LLT{};
1715
1717 return MRI.getType(Op.getReg());
1718
1719 auto &OpInfo = getDesc().operands()[OpIdx];
1720 if (!OpInfo.isGenericType())
1721 return MRI.getType(Op.getReg());
1722
1723 if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1724 return LLT{};
1725
1726 LLT TypeToPrint = MRI.getType(Op.getReg());
1727 // Don't mark the type index printed if it wasn't actually printed: maybe
1728 // another operand with the same type index has an actual type attached:
1729 if (TypeToPrint.isValid())
1730 PrintedTypes.set(OpInfo.getGenericTypeIndex());
1731 return TypeToPrint;
1732}
1733
1734#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1736 dbgs() << " ";
1737 print(dbgs());
1738}
1739
1740LLVM_DUMP_METHOD void MachineInstr::dumprImpl(
1741 const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
1742 SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const {
1743 if (Depth >= MaxDepth)
1744 return;
1745 if (!AlreadySeenInstrs.insert(this).second)
1746 return;
1747 // PadToColumn always inserts at least one space.
1748 // Don't mess up the alignment if we don't want any space.
1749 if (Depth)
1750 fdbgs().PadToColumn(Depth * 2);
1751 print(fdbgs());
1752 for (const MachineOperand &MO : operands()) {
1753 if (!MO.isReg() || MO.isDef())
1754 continue;
1755 Register Reg = MO.getReg();
1756 if (Reg.isPhysical())
1757 continue;
1758 const MachineInstr *NewMI = MRI.getUniqueVRegDef(Reg);
1759 if (NewMI == nullptr)
1760 continue;
1761 NewMI->dumprImpl(MRI, Depth + 1, MaxDepth, AlreadySeenInstrs);
1762 }
1763}
1764
1766 unsigned MaxDepth) const {
1767 SmallPtrSet<const MachineInstr *, 16> AlreadySeenInstrs;
1768 dumprImpl(MRI, 0, MaxDepth, AlreadySeenInstrs);
1769}
1770#endif
1771
1772void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1773 bool SkipDebugLoc, bool AddNewLine,
1774 const TargetInstrInfo *TII) const {
1775 const Module *M = nullptr;
1776 const Function *F = nullptr;
1777 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1778 F = &MF->getFunction();
1779 M = F->getParent();
1780 if (!TII)
1781 TII = MF->getSubtarget().getInstrInfo();
1782 }
1783
1784 ModuleSlotTracker MST(M);
1785 if (F)
1786 MST.incorporateFunction(*F);
1787 print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
1788}
1789
1791 bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1792 bool AddNewLine, const TargetInstrInfo *TII) const {
1793 // We can be a bit tidier if we know the MachineFunction.
1794 const TargetRegisterInfo *TRI = nullptr;
1795 const MachineRegisterInfo *MRI = nullptr;
1796 tryToGetTargetInfo(*this, TRI, MRI, TII);
1797
1798 if (isCFIInstruction())
1799 assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1800
1801 SmallBitVector PrintedTypes(8);
1802 bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies();
1803 auto getTiedOperandIdx = [&](unsigned OpIdx) {
1804 if (!ShouldPrintRegisterTies)
1805 return 0U;
1806 const MachineOperand &MO = getOperand(OpIdx);
1807 if (MO.isReg() && MO.isTied() && !MO.isDef())
1808 return findTiedOperandIdx(OpIdx);
1809 return 0U;
1810 };
1811 unsigned StartOp = 0;
1812 unsigned e = getNumOperands();
1813
1814 // Print explicitly defined operands on the left of an assignment syntax.
1815 while (StartOp < e) {
1816 const MachineOperand &MO = getOperand(StartOp);
1817 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1818 break;
1819
1820 if (StartOp != 0)
1821 OS << ", ";
1822
1823 LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1824 unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1825 MO.print(OS, MST, TypeToPrint, StartOp, /*PrintDef=*/false, IsStandalone,
1826 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1827 ++StartOp;
1828 }
1829
1830 if (StartOp != 0)
1831 OS << " = ";
1832
1834 OS << "frame-setup ";
1836 OS << "frame-destroy ";
1838 OS << "nnan ";
1840 OS << "ninf ";
1842 OS << "nsz ";
1844 OS << "arcp ";
1846 OS << "contract ";
1848 OS << "afn ";
1850 OS << "reassoc ";
1852 OS << "nuw ";
1854 OS << "nsw ";
1856 OS << "exact ";
1858 OS << "nofpexcept ";
1860 OS << "nomerge ";
1862 OS << "nneg ";
1864 OS << "disjoint ";
1866 OS << "nusw ";
1868 OS << "samesign ";
1870 OS << "inbounds ";
1871
1872 // Print the opcode name.
1873 if (TII)
1874 OS << TII->getName(getOpcode());
1875 else
1876 OS << "UNKNOWN";
1877
1878 if (SkipOpers)
1879 return;
1880
1881 // Print the rest of the operands.
1882 bool FirstOp = true;
1883 unsigned AsmDescOp = ~0u;
1884 unsigned AsmOpCount = 0;
1885
1887 // Print asm string.
1888 OS << " ";
1889 const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1890 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1891 unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1892 getOperand(OpIdx).print(OS, MST, TypeToPrint, OpIdx, /*PrintDef=*/true,
1893 IsStandalone, ShouldPrintRegisterTies,
1894 TiedOperandIdx, TRI);
1895
1896 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1897 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1898 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1899 OS << " [sideeffect]";
1900 if (ExtraInfo & InlineAsm::Extra_MayLoad)
1901 OS << " [mayload]";
1902 if (ExtraInfo & InlineAsm::Extra_MayStore)
1903 OS << " [maystore]";
1904 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1905 OS << " [isconvergent]";
1906 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1907 OS << " [alignstack]";
1909 OS << " [attdialect]";
1911 OS << " [inteldialect]";
1912
1913 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1914 FirstOp = false;
1915 }
1916
1917 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1918 const MachineOperand &MO = getOperand(i);
1919
1920 if (FirstOp) FirstOp = false; else OS << ",";
1921 OS << " ";
1922
1923 if (isDebugValueLike() && MO.isMetadata()) {
1924 // Pretty print DBG_VALUE* instructions.
1925 auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1926 if (DIV && !DIV->getName().empty())
1927 OS << "!\"" << DIV->getName() << '\"';
1928 else {
1929 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1930 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1931 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1932 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1933 }
1934 } else if (isDebugLabel() && MO.isMetadata()) {
1935 // Pretty print DBG_LABEL instructions.
1936 auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1937 if (DIL && !DIL->getName().empty())
1938 OS << "\"" << DIL->getName() << '\"';
1939 else {
1940 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1941 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1942 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1943 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1944 }
1945 } else if (i == AsmDescOp && MO.isImm()) {
1946 // Pretty print the inline asm operand descriptor.
1947 OS << '$' << AsmOpCount++;
1948 unsigned Flag = MO.getImm();
1949 const InlineAsm::Flag F(Flag);
1950 OS << ":[";
1951 OS << F.getKindName();
1952
1953 unsigned RCID;
1954 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
1955 if (TRI) {
1956 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1957 } else
1958 OS << ":RC" << RCID;
1959 }
1960
1961 if (F.isMemKind()) {
1962 const InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1963 OS << ":" << InlineAsm::getMemConstraintName(MCID);
1964 }
1965
1966 unsigned TiedTo;
1967 if (F.isUseOperandTiedToDef(TiedTo))
1968 OS << " tiedto:$" << TiedTo;
1969
1970 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
1971 F.isRegUseKind()) &&
1972 F.getRegMayBeFolded()) {
1973 OS << " foldable";
1974 }
1975
1976 OS << ']';
1977
1978 // Compute the index of the next operand descriptor.
1979 AsmDescOp += 1 + F.getNumOperandRegisters();
1980 } else {
1981 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1982 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1983 if (MO.isImm() && isOperandSubregIdx(i))
1985 else
1986 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1987 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1988 }
1989 }
1990
1991 // Print any optional symbols attached to this instruction as-if they were
1992 // operands.
1993 if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
1994 if (!FirstOp) {
1995 FirstOp = false;
1996 OS << ',';
1997 }
1998 OS << " pre-instr-symbol ";
1999 MachineOperand::printSymbol(OS, *PreInstrSymbol);
2000 }
2001 if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
2002 if (!FirstOp) {
2003 FirstOp = false;
2004 OS << ',';
2005 }
2006 OS << " post-instr-symbol ";
2007 MachineOperand::printSymbol(OS, *PostInstrSymbol);
2008 }
2009 if (MDNode *HeapAllocMarker = getHeapAllocMarker()) {
2010 if (!FirstOp) {
2011 FirstOp = false;
2012 OS << ',';
2013 }
2014 OS << " heap-alloc-marker ";
2015 HeapAllocMarker->printAsOperand(OS, MST);
2016 }
2017 if (MDNode *PCSections = getPCSections()) {
2018 if (!FirstOp) {
2019 FirstOp = false;
2020 OS << ',';
2021 }
2022 OS << " pcsections ";
2023 PCSections->printAsOperand(OS, MST);
2024 }
2025 if (MDNode *MMRA = getMMRAMetadata()) {
2026 if (!FirstOp) {
2027 FirstOp = false;
2028 OS << ',';
2029 }
2030 OS << " mmra ";
2031 MMRA->printAsOperand(OS, MST);
2032 }
2033 if (uint32_t CFIType = getCFIType()) {
2034 if (!FirstOp)
2035 OS << ',';
2036 OS << " cfi-type " << CFIType;
2037 }
2038
2039 if (DebugInstrNum) {
2040 if (!FirstOp)
2041 OS << ",";
2042 OS << " debug-instr-number " << DebugInstrNum;
2043 }
2044
2045 if (!SkipDebugLoc) {
2046 if (const DebugLoc &DL = getDebugLoc()) {
2047 if (!FirstOp)
2048 OS << ',';
2049 OS << " debug-location ";
2050 DL->printAsOperand(OS, MST);
2051 }
2052 }
2053
2054 if (!memoperands_empty()) {
2056 const LLVMContext *Context = nullptr;
2057 std::unique_ptr<LLVMContext> CtxPtr;
2058 const MachineFrameInfo *MFI = nullptr;
2059 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
2060 MFI = &MF->getFrameInfo();
2061 Context = &MF->getFunction().getContext();
2062 } else {
2063 CtxPtr = std::make_unique<LLVMContext>();
2064 Context = CtxPtr.get();
2065 }
2066
2067 OS << " :: ";
2068 bool NeedComma = false;
2069 for (const MachineMemOperand *Op : memoperands()) {
2070 if (NeedComma)
2071 OS << ", ";
2072 Op->print(OS, MST, SSNs, *Context, MFI, TII);
2073 NeedComma = true;
2074 }
2075 }
2076
2077 if (SkipDebugLoc)
2078 return;
2079
2080 bool HaveSemi = false;
2081
2082 // Print debug location information.
2083 if (const DebugLoc &DL = getDebugLoc()) {
2084 if (!HaveSemi) {
2085 OS << ';';
2086 HaveSemi = true;
2087 }
2088 OS << ' ';
2089 DL.print(OS);
2090 }
2091
2092 // Print extra comments for DEBUG_VALUE and friends if they are well-formed.
2093 if ((isNonListDebugValue() && getNumOperands() >= 4) ||
2094 (isDebugValueList() && getNumOperands() >= 2) ||
2095 (isDebugRef() && getNumOperands() >= 3)) {
2096 if (getDebugVariableOp().isMetadata()) {
2097 if (!HaveSemi) {
2098 OS << ";";
2099 HaveSemi = true;
2100 }
2101 auto *DV = getDebugVariable();
2102 OS << " line no:" << DV->getLine();
2104 OS << " indirect";
2105 }
2106 }
2107 // TODO: DBG_LABEL
2108
2109 if (PrintMIAddrs)
2110 OS << " ; " << this;
2111
2112 if (AddNewLine)
2113 OS << '\n';
2114}
2115
2117 const TargetRegisterInfo *RegInfo,
2118 bool AddIfNotFound) {
2119 bool isPhysReg = IncomingReg.isPhysical();
2120 bool hasAliases = isPhysReg &&
2121 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
2122 bool Found = false;
2124 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2125 MachineOperand &MO = getOperand(i);
2126 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
2127 continue;
2128
2129 // DEBUG_VALUE nodes do not contribute to code generation and should
2130 // always be ignored. Failure to do so may result in trying to modify
2131 // KILL flags on DEBUG_VALUE nodes.
2132 if (MO.isDebug())
2133 continue;
2134
2135 Register Reg = MO.getReg();
2136 if (!Reg)
2137 continue;
2138
2139 if (Reg == IncomingReg) {
2140 if (!Found) {
2141 if (MO.isKill())
2142 // The register is already marked kill.
2143 return true;
2144 if (isPhysReg && isRegTiedToDefOperand(i))
2145 // Two-address uses of physregs must not be marked kill.
2146 return true;
2147 MO.setIsKill();
2148 Found = true;
2149 }
2150 } else if (hasAliases && MO.isKill() && Reg.isPhysical()) {
2151 // A super-register kill already exists.
2152 if (RegInfo->isSuperRegister(IncomingReg, Reg))
2153 return true;
2154 if (RegInfo->isSubRegister(IncomingReg, Reg))
2155 DeadOps.push_back(i);
2156 }
2157 }
2158
2159 // Trim unneeded kill operands.
2160 while (!DeadOps.empty()) {
2161 unsigned OpIdx = DeadOps.back();
2162 if (getOperand(OpIdx).isImplicit() &&
2165 else
2166 getOperand(OpIdx).setIsKill(false);
2167 DeadOps.pop_back();
2168 }
2169
2170 // If not found, this means an alias of one of the operands is killed. Add a
2171 // new implicit operand if required.
2172 if (!Found && AddIfNotFound) {
2174 false /*IsDef*/,
2175 true /*IsImp*/,
2176 true /*IsKill*/));
2177 return true;
2178 }
2179 return Found;
2180}
2181
2183 const TargetRegisterInfo *RegInfo) {
2184 if (!Reg.isPhysical())
2185 RegInfo = nullptr;
2186 for (MachineOperand &MO : operands()) {
2187 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
2188 continue;
2189 Register OpReg = MO.getReg();
2190 if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
2191 MO.setIsKill(false);
2192 }
2193}
2194
2196 const TargetRegisterInfo *RegInfo,
2197 bool AddIfNotFound) {
2198 bool isPhysReg = Reg.isPhysical();
2199 bool hasAliases = isPhysReg &&
2200 MCRegAliasIterator(Reg, RegInfo, false).isValid();
2201 bool Found = false;
2203 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2204 MachineOperand &MO = getOperand(i);
2205 if (!MO.isReg() || !MO.isDef())
2206 continue;
2207 Register MOReg = MO.getReg();
2208 if (!MOReg)
2209 continue;
2210
2211 if (MOReg == Reg) {
2212 MO.setIsDead();
2213 Found = true;
2214 } else if (hasAliases && MO.isDead() && MOReg.isPhysical()) {
2215 // There exists a super-register that's marked dead.
2216 if (RegInfo->isSuperRegister(Reg, MOReg))
2217 return true;
2218 if (RegInfo->isSubRegister(Reg, MOReg))
2219 DeadOps.push_back(i);
2220 }
2221 }
2222
2223 // Trim unneeded dead operands.
2224 while (!DeadOps.empty()) {
2225 unsigned OpIdx = DeadOps.back();
2226 if (getOperand(OpIdx).isImplicit() &&
2229 else
2230 getOperand(OpIdx).setIsDead(false);
2231 DeadOps.pop_back();
2232 }
2233
2234 // If not found, this means an alias of one of the operands is dead. Add a
2235 // new implicit operand if required.
2236 if (Found || !AddIfNotFound)
2237 return Found;
2238
2240 true /*IsDef*/,
2241 true /*IsImp*/,
2242 false /*IsKill*/,
2243 true /*IsDead*/));
2244 return true;
2245}
2246
2248 for (MachineOperand &MO : all_defs())
2249 if (MO.getReg() == Reg)
2250 MO.setIsDead(false);
2251}
2252
2254 for (MachineOperand &MO : all_defs())
2255 if (MO.getReg() == Reg && MO.getSubReg() != 0)
2256 MO.setIsUndef(IsUndef);
2257}
2258
2260 const TargetRegisterInfo *RegInfo) {
2261 if (Reg.isPhysical()) {
2262 MachineOperand *MO = findRegisterDefOperand(Reg, RegInfo, false, false);
2263 if (MO)
2264 return;
2265 } else {
2266 for (const MachineOperand &MO : all_defs()) {
2267 if (MO.getReg() == Reg && MO.getSubReg() == 0)
2268 return;
2269 }
2270 }
2272 true /*IsDef*/,
2273 true /*IsImp*/));
2274}
2275
2277 const TargetRegisterInfo &TRI) {
2278 bool HasRegMask = false;
2279 for (MachineOperand &MO : operands()) {
2280 if (MO.isRegMask()) {
2281 HasRegMask = true;
2282 continue;
2283 }
2284 if (!MO.isReg() || !MO.isDef()) continue;
2285 Register Reg = MO.getReg();
2286 if (!Reg.isPhysical())
2287 continue;
2288 // If there are no uses, including partial uses, the def is dead.
2289 if (llvm::none_of(UsedRegs,
2290 [&](MCRegister Use) { return TRI.regsOverlap(Use, Reg); }))
2291 MO.setIsDead();
2292 }
2293
2294 // This is a call with a register mask operand.
2295 // Mask clobbers are always dead, so add defs for the non-dead defines.
2296 if (HasRegMask)
2297 for (const Register &UsedReg : UsedRegs)
2298 addRegisterDefined(UsedReg, &TRI);
2299}
2300
2301unsigned
2303 // Build up a buffer of hash code components.
2304 SmallVector<size_t, 16> HashComponents;
2305 HashComponents.reserve(MI->getNumOperands() + 1);
2306 HashComponents.push_back(MI->getOpcode());
2307 for (const MachineOperand &MO : MI->operands()) {
2308 if (MO.isReg() && MO.isDef() && MO.getReg().isVirtual())
2309 continue; // Skip virtual register defs.
2310
2311 HashComponents.push_back(hash_value(MO));
2312 }
2313 return hash_combine_range(HashComponents);
2314}
2315
2317 // Find the source location cookie.
2318 const MDNode *LocMD = nullptr;
2319 for (unsigned i = getNumOperands(); i != 0; --i) {
2320 if (getOperand(i-1).isMetadata() &&
2321 (LocMD = getOperand(i-1).getMetadata()) &&
2322 LocMD->getNumOperands() != 0) {
2323 if (mdconst::hasa<ConstantInt>(LocMD->getOperand(0)))
2324 return LocMD;
2325 }
2326 }
2327
2328 return nullptr;
2329}
2330
2333 const MDNode *LocMD = getLocCookieMD();
2334 uint64_t LocCookie =
2335 LocMD
2336 ? mdconst::extract<ConstantInt>(LocMD->getOperand(0))->getZExtValue()
2337 : 0;
2339 Ctx.diagnose(DiagnosticInfoInlineAsm(LocCookie, Msg));
2340}
2341
2343 const Function &Fn = getMF()->getFunction();
2344 Fn.getContext().diagnose(
2346}
2347
2349 const MCInstrDesc &MCID, bool IsIndirect,
2350 Register Reg, const MDNode *Variable,
2351 const MDNode *Expr) {
2352 assert(isa<DILocalVariable>(Variable) && "not a variable");
2353 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2354 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2355 "Expected inlined-at fields to agree");
2356 auto MIB = BuildMI(MF, DL, MCID).addReg(Reg);
2357 if (IsIndirect)
2358 MIB.addImm(0U);
2359 else
2360 MIB.addReg(0U);
2361 return MIB.addMetadata(Variable).addMetadata(Expr);
2362}
2363
2365 const MCInstrDesc &MCID, bool IsIndirect,
2366 ArrayRef<MachineOperand> DebugOps,
2367 const MDNode *Variable, const MDNode *Expr) {
2368 assert(isa<DILocalVariable>(Variable) && "not a variable");
2369 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2370 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2371 "Expected inlined-at fields to agree");
2372 if (MCID.Opcode == TargetOpcode::DBG_VALUE) {
2373 assert(DebugOps.size() == 1 &&
2374 "DBG_VALUE must contain exactly one debug operand");
2375 MachineOperand DebugOp = DebugOps[0];
2376 if (DebugOp.isReg())
2377 return BuildMI(MF, DL, MCID, IsIndirect, DebugOp.getReg(), Variable,
2378 Expr);
2379
2380 auto MIB = BuildMI(MF, DL, MCID).add(DebugOp);
2381 if (IsIndirect)
2382 MIB.addImm(0U);
2383 else
2384 MIB.addReg(0U);
2385 return MIB.addMetadata(Variable).addMetadata(Expr);
2386 }
2387
2388 auto MIB = BuildMI(MF, DL, MCID);
2389 MIB.addMetadata(Variable).addMetadata(Expr);
2390 for (const MachineOperand &DebugOp : DebugOps)
2391 if (DebugOp.isReg())
2392 MIB.addReg(DebugOp.getReg());
2393 else
2394 MIB.add(DebugOp);
2395 return MIB;
2396}
2397
2400 const DebugLoc &DL, const MCInstrDesc &MCID,
2401 bool IsIndirect, Register Reg,
2402 const MDNode *Variable, const MDNode *Expr) {
2403 MachineFunction &MF = *BB.getParent();
2404 MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
2405 BB.insert(I, MI);
2406 return MachineInstrBuilder(MF, MI);
2407}
2408
2411 const DebugLoc &DL, const MCInstrDesc &MCID,
2412 bool IsIndirect,
2413 ArrayRef<MachineOperand> DebugOps,
2414 const MDNode *Variable, const MDNode *Expr) {
2415 MachineFunction &MF = *BB.getParent();
2416 MachineInstr *MI =
2417 BuildMI(MF, DL, MCID, IsIndirect, DebugOps, Variable, Expr);
2418 BB.insert(I, MI);
2419 return MachineInstrBuilder(MF, *MI);
2420}
2421
2422/// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
2423/// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
2425 const MachineInstr &MI,
2426 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2427 assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
2428 "Expected inlined-at fields to agree");
2429
2430 const DIExpression *Expr = MI.getDebugExpression();
2431 if (MI.isIndirectDebugValue()) {
2432 assert(MI.getDebugOffset().getImm() == 0 &&
2433 "DBG_VALUE with nonzero offset");
2435 } else if (MI.isDebugValueList()) {
2436 // We will replace the spilled register with a frame index, so
2437 // immediately deref all references to the spilled register.
2438 std::array<uint64_t, 1> Ops{{dwarf::DW_OP_deref}};
2439 for (const MachineOperand *Op : SpilledOperands) {
2440 unsigned OpIdx = MI.getDebugOperandIndex(Op);
2441 Expr = DIExpression::appendOpsToArg(Expr, Ops, OpIdx);
2442 }
2443 }
2444 return Expr;
2445}
2447 Register SpillReg) {
2448 assert(MI.hasDebugOperandForReg(SpillReg) && "Spill Reg is not used in MI.");
2450 llvm::make_pointer_range(MI.getDebugOperandsForReg(SpillReg)));
2451 return computeExprForSpill(MI, SpillOperands);
2452}
2453
2456 const MachineInstr &Orig,
2457 int FrameIndex, Register SpillReg) {
2458 assert(!Orig.isDebugRef() &&
2459 "DBG_INSTR_REF should not reference a virtual register.");
2460 const DIExpression *Expr = computeExprForSpill(Orig, SpillReg);
2461 MachineInstrBuilder NewMI =
2462 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2463 // Non-Variadic Operands: Location, Offset, Variable, Expression
2464 // Variadic Operands: Variable, Expression, Locations...
2465 if (Orig.isNonListDebugValue())
2466 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2467 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2468 if (Orig.isDebugValueList()) {
2469 for (const MachineOperand &Op : Orig.debug_operands())
2470 if (Op.isReg() && Op.getReg() == SpillReg)
2471 NewMI.addFrameIndex(FrameIndex);
2472 else
2473 NewMI.add(MachineOperand(Op));
2474 }
2475 return NewMI;
2476}
2479 const MachineInstr &Orig, int FrameIndex,
2480 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2481 const DIExpression *Expr = computeExprForSpill(Orig, SpilledOperands);
2482 MachineInstrBuilder NewMI =
2483 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2484 // Non-Variadic Operands: Location, Offset, Variable, Expression
2485 // Variadic Operands: Variable, Expression, Locations...
2486 if (Orig.isNonListDebugValue())
2487 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2488 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2489 if (Orig.isDebugValueList()) {
2490 for (const MachineOperand &Op : Orig.debug_operands())
2491 if (is_contained(SpilledOperands, &Op))
2492 NewMI.addFrameIndex(FrameIndex);
2493 else
2494 NewMI.add(MachineOperand(Op));
2495 }
2496 return NewMI;
2497}
2498
2500 Register Reg) {
2501 const DIExpression *Expr = computeExprForSpill(Orig, Reg);
2502 if (Orig.isNonListDebugValue())
2504 for (MachineOperand &Op : Orig.getDebugOperandsForReg(Reg))
2505 Op.ChangeToFrameIndex(FrameIndex);
2506 Orig.getDebugExpressionOp().setMetadata(Expr);
2507}
2508
2511 MachineInstr &MI = *this;
2512 if (!MI.getOperand(0).isReg())
2513 return;
2514
2516 for (MachineBasicBlock::iterator DE = MI.getParent()->end();
2517 DI != DE; ++DI) {
2518 if (!DI->isDebugValue())
2519 return;
2520 if (DI->hasDebugOperandForReg(MI.getOperand(0).getReg()))
2521 DbgValues.push_back(&*DI);
2522 }
2523}
2524
2526 // Collect matching debug values.
2528
2529 if (!getOperand(0).isReg())
2530 return;
2531
2532 Register DefReg = getOperand(0).getReg();
2533 auto *MRI = getRegInfo();
2534 for (auto &MO : MRI->use_operands(DefReg)) {
2535 auto *DI = MO.getParent();
2536 if (!DI->isDebugValue())
2537 continue;
2538 if (DI->hasDebugOperandForReg(DefReg)) {
2539 DbgValues.push_back(DI);
2540 }
2541 }
2542
2543 // Propagate Reg to debug value instructions.
2544 for (auto *DBI : DbgValues)
2545 for (MachineOperand &Op : DBI->getDebugOperandsForReg(DefReg))
2546 Op.setReg(Reg);
2547}
2548
2550
2552 const MachineFrameInfo &MFI) {
2553 std::optional<TypeSize> Size;
2554 for (const auto *A : Accesses) {
2555 if (MFI.isSpillSlotObjectIndex(
2556 cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
2557 ->getFrameIndex())) {
2558 LocationSize S = A->getSize();
2559 if (!S.hasValue())
2561 if (!Size)
2562 Size = S.getValue();
2563 else
2564 Size = *Size + S.getValue();
2565 }
2566 }
2567 if (!Size)
2568 return LocationSize::precise(0);
2569 return LocationSize::precise(*Size);
2570}
2571
2572std::optional<LocationSize>
2574 int FI;
2575 if (TII->isStoreToStackSlotPostFE(*this, FI)) {
2576 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2577 if (MFI.isSpillSlotObjectIndex(FI))
2578 return (*memoperands_begin())->getSize();
2579 }
2580 return std::nullopt;
2581}
2582
2583std::optional<LocationSize>
2586 if (TII->hasStoreToStackSlot(*this, Accesses))
2587 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2588 return std::nullopt;
2589}
2590
2591std::optional<LocationSize>
2593 int FI;
2594 if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
2595 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2596 if (MFI.isSpillSlotObjectIndex(FI))
2597 return (*memoperands_begin())->getSize();
2598 }
2599 return std::nullopt;
2600}
2601
2602std::optional<LocationSize>
2605 if (TII->hasLoadFromStackSlot(*this, Accesses))
2606 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2607 return std::nullopt;
2608}
2609
2611 if (DebugInstrNum == 0)
2612 DebugInstrNum = getParent()->getParent()->getNewDebugInstrNum();
2613 return DebugInstrNum;
2614}
2615
2617 if (DebugInstrNum == 0)
2618 DebugInstrNum = MF.getNewDebugInstrNum();
2619 return DebugInstrNum;
2620}
2621
2622std::tuple<LLT, LLT> MachineInstr::getFirst2LLTs() const {
2623 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2624 getRegInfo()->getType(getOperand(1).getReg()));
2625}
2626
2627std::tuple<LLT, LLT, LLT> MachineInstr::getFirst3LLTs() const {
2628 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2629 getRegInfo()->getType(getOperand(1).getReg()),
2630 getRegInfo()->getType(getOperand(2).getReg()));
2631}
2632
2633std::tuple<LLT, LLT, LLT, LLT> MachineInstr::getFirst4LLTs() const {
2634 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2635 getRegInfo()->getType(getOperand(1).getReg()),
2636 getRegInfo()->getType(getOperand(2).getReg()),
2637 getRegInfo()->getType(getOperand(3).getReg()));
2638}
2639
2640std::tuple<LLT, LLT, LLT, LLT, LLT> MachineInstr::getFirst5LLTs() const {
2641 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2642 getRegInfo()->getType(getOperand(1).getReg()),
2643 getRegInfo()->getType(getOperand(2).getReg()),
2644 getRegInfo()->getType(getOperand(3).getReg()),
2645 getRegInfo()->getType(getOperand(4).getReg()));
2646}
2647
2648std::tuple<Register, LLT, Register, LLT>
2650 Register Reg0 = getOperand(0).getReg();
2651 Register Reg1 = getOperand(1).getReg();
2652 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2653 getRegInfo()->getType(Reg1));
2654}
2655
2656std::tuple<Register, LLT, Register, LLT, Register, LLT>
2658 Register Reg0 = getOperand(0).getReg();
2659 Register Reg1 = getOperand(1).getReg();
2660 Register Reg2 = getOperand(2).getReg();
2661 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2662 getRegInfo()->getType(Reg1), Reg2,
2663 getRegInfo()->getType(Reg2));
2664}
2665
2666std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT>
2668 Register Reg0 = getOperand(0).getReg();
2669 Register Reg1 = getOperand(1).getReg();
2670 Register Reg2 = getOperand(2).getReg();
2671 Register Reg3 = getOperand(3).getReg();
2672 return std::tuple(
2673 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2674 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3));
2675}
2676
2678 LLT>
2680 Register Reg0 = getOperand(0).getReg();
2681 Register Reg1 = getOperand(1).getReg();
2682 Register Reg2 = getOperand(2).getReg();
2683 Register Reg3 = getOperand(3).getReg();
2684 Register Reg4 = getOperand(4).getReg();
2685 return std::tuple(
2686 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2687 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3),
2688 Reg4, getRegInfo()->getType(Reg4));
2689}
2690
2693 assert(InsertBefore != nullptr && "invalid iterator");
2694 assert(InsertBefore->getParent() == this &&
2695 "iterator points to operand of other inst");
2696 if (Ops.empty())
2697 return;
2698
2699 // Do one pass to untie operands.
2701 for (const MachineOperand &MO : operands()) {
2702 if (MO.isReg() && MO.isTied()) {
2703 unsigned OpNo = getOperandNo(&MO);
2704 unsigned TiedTo = findTiedOperandIdx(OpNo);
2705 TiedOpIndices[OpNo] = TiedTo;
2706 untieRegOperand(OpNo);
2707 }
2708 }
2709
2710 unsigned OpIdx = getOperandNo(InsertBefore);
2711 unsigned NumOperands = getNumOperands();
2712 unsigned OpsToMove = NumOperands - OpIdx;
2713
2715 MovingOps.reserve(OpsToMove);
2716
2717 for (unsigned I = 0; I < OpsToMove; ++I) {
2718 MovingOps.emplace_back(getOperand(OpIdx));
2720 }
2721 for (const MachineOperand &MO : Ops)
2722 addOperand(MO);
2723 for (const MachineOperand &OpMoved : MovingOps)
2724 addOperand(OpMoved);
2725
2726 // Re-tie operands.
2727 for (auto [Tie1, Tie2] : TiedOpIndices) {
2728 if (Tie1 >= OpIdx)
2729 Tie1 += Ops.size();
2730 if (Tie2 >= OpIdx)
2731 Tie2 += Ops.size();
2732 tieOperands(Tie1, Tie2);
2733 }
2734}
2735
2736bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const {
2737 assert(OpId && "expected non-zero operand id");
2738 assert(isInlineAsm() && "should only be used on inline asm");
2739
2740 if (!getOperand(OpId).isReg())
2741 return false;
2742
2743 const MachineOperand &MD = getOperand(OpId - 1);
2744 if (!MD.isImm())
2745 return false;
2746
2747 InlineAsm::Flag F(MD.getImm());
2748 if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
2749 return F.getRegMayBeFolded();
2750 return false;
2751}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:638
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Forward Handle Accesses
uint64_t Size
#define Check(C,...)
Hexagon Common GEP
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
A set of register units.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
const unsigned TiedMax
static void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps, MachineRegisterInfo *MRI)
Move NumOps MachineOperands from Src to Dst, with support for overlapping ranges.
static cl::opt< bool > PrintMIAddrs("print-mi-addrs", cl::Hidden, cl::desc("Print addresses of MachineInstrs when dumping"))
static LocationSize getSpillSlotSize(const MMOList &Accesses, const MachineFrameInfo &MFI)
static const DIExpression * computeExprForSpill(const MachineInstr &MI, const SmallVectorImpl< const MachineOperand * > &SpilledOperands)
Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, BatchAAResults *AA, bool UseTBAA, const MachineMemOperand *MMOa, const MachineMemOperand *MMOb)
static iterator_range< filter_iterator< Operand *, std::function< bool(Operand &Op)> > > getDebugOperandsForRegHelper(Instruction *MI, Register Reg)
static void tryToGetTargetInfo(const MachineInstr &MI, const TargetRegisterInfo *&TRI, const MachineRegisterInfo *&MRI, const TargetInstrInfo *&TII)
static const MachineFunction * getMFIfAvailable(const MachineInstr &MI)
static bool hasIdenticalMMOs(ArrayRef< MachineMemOperand * > LHS, ArrayRef< MachineMemOperand * > RHS)
Check to see if the MMOs pointed to by the two MemRefs arrays are identical.
Register const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
This file contains the declarations for metadata subclasses.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
MachineInstr unsigned OpIdx
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static cl::opt< bool > UseTBAA("use-tbaa-in-sched-mi", cl::Hidden, cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction"))
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:247
Value * RHS
Value * LHS
A private abstract base class describing the concept of an individual alias analysis implementation.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:142
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition: ArrayRef.h:191
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
DWARF expression.
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI bool isEqualExpression(const DIExpression *FirstExpr, bool FirstIndirect, const DIExpression *SecondExpr, bool SecondIndirect)
Determines whether two debug values should produce equivalent DWARF expressions, using their DIExpres...
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:124
bool hasTrivialDestructor() const
Check whether this has a trivial destructor.
Definition: DebugLoc.h:244
Diagnostic information for inline asm reporting.
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition: Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:22
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:359
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:949
bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const override
Check if the instruction or the bundle of instructions has store to stack slots.
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const override
Check if the instruction or the bundle of instructions has load from stack slots.
This instruction compares its operands according to the predicate given to the constructor.
static StringRef getMemConstraintName(ConstraintCode C)
Definition: InlineAsm.h:470
constexpr bool isValid() const
Definition: LowLevelType.h:146
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:52
bool available(const MachineRegisterInfo &MRI, MCRegister Reg) const
Returns true if register Reg and no aliasing register is in the set.
A set of register units used to track register liveness.
Definition: LiveRegUnits.h:31
bool hasValue() const
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
bool isScalable() const
TypeSize getValue() const
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:199
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:238
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:240
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:249
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:220
ArrayRef< MCPhysReg > implicit_defs() const
Return a list of registers that are potentially written by any instance of this machine instruction.
Definition: MCInstrDesc.h:581
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
Definition: MCInstrDesc.h:340
unsigned short Opcode
Definition: MCInstrDesc.h:206
bool isVariadic() const
Return true if this instruction can have a variable number of operands.
Definition: MCInstrDesc.h:262
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
Definition: MCInstrDesc.h:567
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isSubRegister(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA.
bool isSuperRegister(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a super-register of RegA.
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
Metadata node.
Definition: Metadata.h:1077
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1445
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1451
bool isValid() const
isValid - Returns true until all the operands have been visited.
LLVM_ABI MachineInstr * remove_instr(MachineInstr *I)
Remove the possibly bundled instruction from the instruction list without deleting it.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
instr_iterator erase_instr(MachineInstr *I)
Remove an instruction from the instruction list and delete it.
LLVM_ABI void printAsOperand(raw_ostream &OS, bool PrintType=true) const
MachineInstr * remove(MachineInstr *I)
Remove the unbundled instruction from the instruction list without deleting it.
LLVM_ABI void print(raw_ostream &OS, const SlotIndexes *=nullptr, bool IsStandalone=true) const
Instructions::iterator instr_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
MachineInstr::ExtraInfo * createMIExtraInfo(ArrayRef< MachineMemOperand * > MMOs, MCSymbol *PreInstrSymbol=nullptr, MCSymbol *PostInstrSymbol=nullptr, MDNode *HeapAllocMarker=nullptr, MDNode *PCSections=nullptr, uint32_t CFIType=0, MDNode *MMRAs=nullptr)
Allocate and construct an extra info structure for a MachineInstr.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array)
Dellocate an array of MachineOperands and recycle the memory.
MachineOperand * allocateOperandArray(OperandCapacity Cap)
Allocate an array of MachineOperands.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void handleChangeDesc(MachineInstr &MI, const MCInstrDesc &TID)
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:72
bool mayRaiseFPException() const
Return true if this instruction could possibly raise a floating-point exception.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:587
LLVM_ABI void setRegisterDefReadUndef(Register Reg, bool IsUndef=true)
Mark all subregister defs of register Reg with the undef flag.
bool isDebugValueList() const
LLVM_ABI void bundleWithPred()
Bundle this instruction with its predecessor.
bool isPosition() const
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:974
LLVM_ABI std::tuple< Register, LLT, Register, LLT, Register, LLT, Register, LLT, Register, LLT > getFirst5RegLLTs() const
LLVM_ABI iterator_range< filter_iterator< const MachineOperand *, std::function< bool(const MachineOperand &Op)> > > getDebugOperandsForReg(Register Reg) const
Returns a range of all of the operands that correspond to a debug use of Reg.
mop_range debug_operands()
Returns all operands that are used to determine the variable location for this DBG_VALUE instruction.
Definition: MachineInstr.h:711
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
LLVM_ABI void setCFIType(MachineFunction &MF, uint32_t Type)
Set the CFI type for the instruction.
LLVM_ABI MachineInstr * removeFromParent()
Unlink 'this' from the containing basic block, and return it without deleting it.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:359
MDNode * getMMRAMetadata() const
Helper to extract mmra.op metadata.
Definition: MachineInstr.h:863
LLVM_ABI void bundleWithSucc()
Bundle this instruction with its successor.
uint32_t getCFIType() const
Helper to extract a CFI type hash if one has been added.
Definition: MachineInstr.h:872
bool isDebugLabel() const
LLVM_ABI void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just prior to the instruction itself.
bool hasProperty(unsigned MCFlag, QueryType Type=AnyInBundle) const
Return true if the instruction (or in the case of a bundle, the instructions inside the bundle) has t...
Definition: MachineInstr.h:895
LLVM_ABI bool isDereferenceableInvariantLoad() const
Return true if this load instruction never traps and points to a memory location whose value doesn't ...
void setFlags(unsigned flags)
Definition: MachineInstr.h:422
QueryType
API for querying MachineInstr properties.
Definition: MachineInstr.h:884
LLVM_ABI void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
Definition: MachineInstr.h:754
LLVM_ABI std::tuple< LLT, LLT, LLT, LLT, LLT > getFirst5LLTs() const
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:948
LLVM_ABI std::tuple< Register, LLT, Register, LLT, Register, LLT > getFirst3RegLLTs() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:409
LLVM_ABI uint32_t mergeFlagsWith(const MachineInstr &Other) const
Return the MIFlags which represent both MachineInstrs.
LLVM_ABI const MachineOperand & getDebugExpressionOp() const
Return the operand for the complex address expression referenced by this DBG_VALUE instruction.
LLVM_ABI std::pair< bool, bool > readsWritesVirtualRegister(Register Reg, SmallVectorImpl< unsigned > *Ops=nullptr) const
Return a pair of bools (reads, writes) indicating if this instruction reads or writes Reg.
LLVM_ABI Register isConstantValuePHI() const
If the specified instruction is a PHI that always merges together the same virtual register,...
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
LLVM_ABI bool allImplicitDefsAreDead() const
Return true if all the implicit defs of this instruction are dead.
LLVM_ABI void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's memory reference descriptor list and replace ours with it.
LLVM_ABI const TargetRegisterClass * getRegClassConstraintEffectForVReg(Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ExploreBundle=false) const
Applies the constraints (def/use) implied by this MI on Reg to the given CurRC.
LLVM_ABI bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
LLVM_ABI bool mayAlias(BatchAAResults *AA, const MachineInstr &Other, bool UseTBAA) const
Returns true if this instruction's memory access aliases the memory access of Other.
bool isBundle() const
bool isDebugInstr() const
unsigned getNumDebugOperands() const
Returns the total number of operands which are debug locations.
Definition: MachineInstr.h:593
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:590
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI MachineInstr * removeFromBundle()
Unlink this instruction from its basic block and return it without deleting it.
LLVM_ABI void dumpr(const MachineRegisterInfo &MRI, unsigned MaxDepth=UINT_MAX) const
Print on dbgs() the current instruction and the instructions defining its operands and so on until we...
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
bool isDebugValueLike() const
bool isInlineAsm() const
bool memoperands_empty() const
Return true if we don't have any memory operands which described the memory access done by this instr...
Definition: MachineInstr.h:810
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:805
bool isDebugRef() const
LLVM_ABI void collectDebugValues(SmallVectorImpl< MachineInstr * > &DbgValues)
Scan instructions immediately following MI and collect any matching DBG_VALUEs.
LLVM_ABI std::optional< LocationSize > getRestoreSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a restore instruction.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
Definition: MachineInstr.h:773
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
mop_range implicit_operands()
Definition: MachineInstr.h:702
LLVM_ABI void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
LLVM_ABI bool wouldBeTriviallyDead() const
Return true if this instruction would be trivially dead if all of its defined registers were dead.
bool isBundledWithPred() const
Return true if this instruction is part of a bundle, and it is not the first instruction in the bundl...
Definition: MachineInstr.h:490
LLVM_ABI std::tuple< LLT, LLT > getFirst2LLTs() const
LLVM_ABI std::optional< LocationSize > getFoldedSpillSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a folded spill instruction.
LLVM_ABI void unbundleFromPred()
Break bundle above this instruction.
LLVM_ABI void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
LLVM_ABI bool isStackAligningInlineAsm() const
LLVM_ABI void dropMemRefs(MachineFunction &MF)
Clear this MachineInstr's memory reference descriptor list.
LLVM_ABI int findRegisterUseOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false) const
Returns the operand index that is a use of the specific register or -1 if it is not found.
MDNode * getPCSections() const
Helper to extract PCSections metadata target sections.
Definition: MachineInstr.h:853
bool isCFIInstruction() const
LLVM_ABI int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:584
LLVM_ABI unsigned getBundleSize() const
Return the number of instructions inside the MI bundle, excluding the bundle header.
LLVM_ABI void cloneMergedMemRefs(MachineFunction &MF, ArrayRef< const MachineInstr * > MIs)
Clone the merge of multiple MachineInstrs' memory reference descriptors list and replace ours with it...
mop_range operands()
Definition: MachineInstr.h:693
LLVM_ABI bool isCandidateForAdditionalCallInfo(QueryType Type=IgnoreBundle) const
Return true if this is a call instruction that may have an additional information associated with it.
LLVM_ABI std::tuple< Register, LLT, Register, LLT, Register, LLT, Register, LLT > getFirst4RegLLTs() const
LLVM_ABI std::tuple< Register, LLT, Register, LLT > getFirst2RegLLTs() const
unsigned getNumMemOperands() const
Return the number of memory operands.
Definition: MachineInstr.h:816
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
Definition: MachineInstr.h:431
LLVM_ABI std::optional< LocationSize > getFoldedRestoreSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a folded restore instruction.
LLVM_ABI const TargetRegisterClass * getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Applies the constraints (def/use) implied by the OpIdx operand to the given CurRC.
bool isOperandSubregIdx(unsigned OpIdx) const
Return true if operand OpIdx is a subregister index.
Definition: MachineInstr.h:655
LLVM_ABI InlineAsm::AsmDialect getInlineAsmDialect() const
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
LLVM_ABI bool isEquivalentDbgInstr(const MachineInstr &Other) const
Returns true if this instruction is a debug instruction that represents an identical debug value to O...
LLVM_ABI const DILabel * getDebugLabel() const
Return the debug label referenced by this DBG_LABEL instruction.
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
static LLVM_ABI uint32_t copyFlagsFromInstruction(const Instruction &I)
LLVM_ABI void insert(mop_iterator InsertBefore, ArrayRef< MachineOperand > Ops)
Inserts Ops BEFORE It. Can untie/retie tied operands.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool isJumpTableDebugInfo() const
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI void eraseFromBundle()
Unlink 'this' from its basic block and delete it.
LLVM_ABI void setHeapAllocMarker(MachineFunction &MF, MDNode *MD)
Set a marker on instructions that denotes where we should create and emit heap alloc site labels.
LLVM_ABI const DILocalVariable * getDebugVariable() const
Return the debug variable referenced by this DBG_VALUE instruction.
LLVM_ABI bool hasComplexRegisterTies() const
Return true when an instruction has tied register that can't be determined by the instruction's descr...
LLVM_ABI LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes, const MachineRegisterInfo &MRI) const
Debugging supportDetermine the generic type to be printed (if needed) on uses and defs.
bool isLifetimeMarker() const
LLVM_ABI void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
LLVM_ABI unsigned findTiedOperandIdx(unsigned OpIdx) const
Given the index of a tied register operand, find the operand it is tied to.
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:798
LLVM_ABI void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
LLVM_ABI void changeDebugValuesDefReg(Register Reg)
Find all DBG_VALUEs that point to the register def in this instruction and point them to Reg instead.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI void emitGenericError(const Twine &ErrMsg) const
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
LLVM_ABI const DIExpression * getDebugExpression() const
Return the complex address expression referenced by this DBG_VALUE instruction.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:780
LLVM_ABI void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool isNonListDebugValue() const
LLVM_ABI bool isLoadFoldBarrier() const
Returns true if it is illegal to fold a load across this instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
void setFlag(MIFlag Flag)
Set a MI flag.
Definition: MachineInstr.h:416
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:511
LLVM_ABI bool isDead(const MachineRegisterInfo &MRI, LiveRegUnits *LivePhysRegs=nullptr) const
Check whether an MI is dead.
LLVM_ABI std::tuple< LLT, LLT, LLT > getFirst3LLTs() const
LLVM_ABI const MachineOperand & getDebugVariableOp() const
Return the operand for the debug variable referenced by this DBG_VALUE instruction.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
MCSymbol * getPreInstrSymbol() const
Helper to extract a pre-instruction symbol if one has been added.
Definition: MachineInstr.h:819
LLVM_ABI bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
LLVM_ABI void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just after the instruction itself.
bool isDebugValue() const
LLVM_ABI void dump() const
const MachineOperand & getDebugOffset() const
Return the operand containing the offset to be used if this DBG_VALUE instruction is indirect; will b...
Definition: MachineInstr.h:516
MachineOperand & getDebugOperand(unsigned Index)
Definition: MachineInstr.h:600
LLVM_ABI std::optional< LocationSize > getSpillSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a spill instruction.
bool isBundledWithSucc() const
Return true if this instruction is part of a bundle, and it is not the last instruction in the bundle...
Definition: MachineInstr.h:494
LLVM_ABI void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
MDNode * getHeapAllocMarker() const
Helper to extract a heap alloc marker if one has been added.
Definition: MachineInstr.h:843
LLVM_ABI unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
LLVM_ABI std::tuple< LLT, LLT, LLT, LLT > getFirst4LLTs() const
bool isPHI() const
LLVM_ABI void clearRegisterDeads(Register Reg)
Clear all dead flags on operands defining register Reg.
LLVM_ABI void clearRegisterKills(Register Reg, const TargetRegisterInfo *RegInfo)
Clear all kill flags affecting Reg.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:595
LLVM_ABI void emitInlineAsmError(const Twine &ErrMsg) const
Emit an error referring to the source location of this instruction.
uint32_t getFlags() const
Return the MI flags bitvector.
Definition: MachineInstr.h:404
bool isPseudoProbe() const
LLVM_ABI bool hasRegisterImplicitUseOperand(Register Reg) const
Returns true if the MachineInstr has an implicit-use operand of exactly the given register (not consi...
LLVM_ABI bool shouldUpdateAdditionalCallInfo() const
Return true if copying, moving, or erasing this instruction requires updating additional call info (s...
MCSymbol * getPostInstrSymbol() const
Helper to extract a post-instruction symbol if one has been added.
Definition: MachineInstr.h:831
LLVM_ABI void unbundleFromSucc()
Break bundle below this instruction.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
LLVM_ABI bool isDebugEntryValue() const
A DBG_VALUE is an entry value iff its debug expression contains the DW_OP_LLVM_entry_value operation.
bool isIndirectDebugValue() const
A DBG_VALUE is indirect iff the location operand is a register and the offset operand is an immediate...
unsigned getNumDefs() const
Returns the total number of definitions.
Definition: MachineInstr.h:637
LLVM_ABI void setPCSections(MachineFunction &MF, MDNode *MD)
bool isKill() const
LLVM_ABI const MDNode * getLocCookieMD() const
For inline asm, get the !srcloc metadata node if we have it, and decode the loc cookie from it.
LLVM_ABI int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
bool isFakeUse() const
bool isVariadic(QueryType Type=IgnoreBundle) const
Return true if this instruction can have a variable number of operands.
Definition: MachineInstr.h:916
LLVM_ABI int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo=nullptr) const
Find the index of the flag word operand that corresponds to operand OpIdx on an inline asm instructio...
LLVM_ABI bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
LLVM_ABI void setMMRAMetadata(MachineFunction &MF, MDNode *MMRAs)
LLVM_ABI const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
LLVM_ABI void moveBefore(MachineInstr *MovePos)
Move the instruction before MovePos.
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
LLVM_ABI bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
LLVM_ABI bool mayFoldInlineAsmRegOp(unsigned OpId) const
Returns true if the register operand can be folded with a load or store into a frame index.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
bool isUnordered() const
Returns true if this memory operation doesn't have any ordering constraints other than normal aliasin...
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
const Value * getValue() const
Return the base address of the memory access.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
LLVM_ABI void substVirtReg(Register Reg, unsigned SubIdx, const TargetRegisterInfo &)
substVirtReg - Substitute the current register with the virtual subregister Reg:SubReg.
static LLVM_ABI void printSubRegIdx(raw_ostream &OS, uint64_t Index, const TargetRegisterInfo *TRI)
Print a subreg index operand.
int64_t getImm() const
bool isImplicit() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
const MDNode * getMetadata() const
void setIsDead(bool Val=true)
void setMetadata(const MDNode *MD)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
bool isMetadata() const
isMetadata - Tests if this is a MO_Metadata operand.
LLVM_ABI void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr) const
Print the MachineOperand to os.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
LLVM_ABI void substPhysReg(MCRegister Reg, const TargetRegisterInfo &)
substPhysReg - Substitute the current register with the physical register Reg, taking any existing Su...
void setIsEarlyClobber(bool Val=true)
void setIsUndef(bool Val=true)
void setIsDebug(bool Val=true)
Register getReg() const
getReg - Returns the register number.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static LLVM_ABI void printSymbol(raw_ostream &OS, MCSymbol &Sym)
Print a MCSymbol as an operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Representation for a specific memory location.
LLVM_ABI void printAsOperand(raw_ostream &OS, const Module *M=nullptr) const
Print as operand.
Definition: AsmWriter.cpp:5411
Manage lifetime of a slot tracker for printing IR.
void incorporateFunction(const Function &F)
Incorporate the given function.
Definition: AsmWriter.cpp:963
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition: Operator.h:78
An or instruction, which can be marked as "disjoint", indicating that the inputs don't have a 1 in th...
Definition: InstrTypes.h:404
A udiv, sdiv, lshr, or ashr instruction, which can be marked as "exact", indicating that no bits are ...
Definition: Operator.h:154
Instruction that can have a nneg flag (zext/uitofp).
Definition: InstrTypes.h:641
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:74
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:78
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
SmallBitVector & set()
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:380
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:401
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
bool empty() const
Definition: SmallVector.h:82
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:938
void reserve(size_type N)
Definition: SmallVector.h:664
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:684
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
static LLVM_ABI unsigned getNextMetaArgIdx(const MachineInstr *MI, unsigned CurIdx)
Get index of next meta operand.
Definition: StackMaps.cpp:170
MI-level Statepoint operands.
Definition: StackMaps.h:159
LLVM_ABI int getFirstGCPtrIdx()
Get index of first GC pointer operand of -1 if there are none.
Definition: StackMaps.cpp:124
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
virtual const TargetInstrInfo * getInstrInfo() const
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
LLVM Value Representation.
Definition: Value.h:75
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:169
Specialization of filter_iterator_base for forward iteration only.
Definition: STLExtras.h:506
formatted_raw_ostream & PadToColumn(unsigned NewCol)
PadToColumn - Align the output to some column number.
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
MCInstrDesc const & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
@ UnmodeledSideEffects
Definition: MCInstrDesc.h:174
constexpr double e
Definition: MathExtras.h:47
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:338
hash_code hash_value(const FixedPointSemantics &Val)
Definition: APFixedPoint.h:137
LLVM_ABI formatted_raw_ostream & fdbgs()
fdbgs() - This returns a reference to a formatted_raw_ostream for debug output.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg)
Update a DBG_VALUE whose value has been spilled to FrameIndex.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1751
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
Definition: iterator.h:336
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:976
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1758
iterator_range< filter_iterator< detail::IterOfRange< RangeT >, PredicateT > > make_filter_range(RangeT &&Range, PredicateT Pred)
Convenience function that takes a range of elements and a predicate, and return a new filter_iterator...
Definition: STLExtras.h:581
@ Other
Any other memory.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1886
LLVM_ABI MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition: iterator.h:363
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1916
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition: Hashing.h:469
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:856
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
static LLVM_ABI unsigned getHashValue(const MachineInstr *const &MI)