LLVM 22.0.0git
MachineVerifier.cpp
Go to the documentation of this file.
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
24#include "llvm/ADT/BitVector.h"
25#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/DenseSet.h"
29#include "llvm/ADT/STLExtras.h"
33#include "llvm/ADT/StringRef.h"
34#include "llvm/ADT/Twine.h"
64#include "llvm/IR/BasicBlock.h"
65#include "llvm/IR/Constants.h"
67#include "llvm/IR/Function.h"
68#include "llvm/IR/InlineAsm.h"
71#include "llvm/MC/LaneBitmask.h"
72#include "llvm/MC/MCAsmInfo.h"
73#include "llvm/MC/MCDwarf.h"
74#include "llvm/MC/MCInstrDesc.h"
77#include "llvm/Pass.h"
82#include "llvm/Support/ModRef.h"
83#include "llvm/Support/Mutex.h"
86#include <algorithm>
87#include <cassert>
88#include <cstddef>
89#include <cstdint>
90#include <iterator>
91#include <string>
92#include <utility>
93
94using namespace llvm;
95
96namespace {
97
98/// Used the by the ReportedErrors class to guarantee only one error is reported
99/// at one time.
100static ManagedStatic<sys::SmartMutex<true>> ReportedErrorsLock;
101
102struct MachineVerifier {
103 MachineVerifier(MachineFunctionAnalysisManager &MFAM, const char *b,
104 raw_ostream *OS, bool AbortOnError = true)
105 : MFAM(&MFAM), OS(OS ? *OS : nulls()), Banner(b),
106 ReportedErrs(AbortOnError) {}
107
108 MachineVerifier(Pass *pass, const char *b, raw_ostream *OS,
109 bool AbortOnError = true)
110 : PASS(pass), OS(OS ? *OS : nulls()), Banner(b),
111 ReportedErrs(AbortOnError) {}
112
113 MachineVerifier(const char *b, LiveVariables *LiveVars,
114 LiveIntervals *LiveInts, LiveStacks *LiveStks,
115 SlotIndexes *Indexes, raw_ostream *OS,
116 bool AbortOnError = true)
117 : OS(OS ? *OS : nulls()), Banner(b), LiveVars(LiveVars),
118 LiveInts(LiveInts), LiveStks(LiveStks), Indexes(Indexes),
119 ReportedErrs(AbortOnError) {}
120
121 /// \returns true if no problems were found.
122 bool verify(const MachineFunction &MF);
123
124 MachineFunctionAnalysisManager *MFAM = nullptr;
125 Pass *const PASS = nullptr;
127 const char *Banner;
128 const MachineFunction *MF = nullptr;
129 const TargetMachine *TM = nullptr;
130 const TargetInstrInfo *TII = nullptr;
131 const TargetRegisterInfo *TRI = nullptr;
132 const MachineRegisterInfo *MRI = nullptr;
133 const RegisterBankInfo *RBI = nullptr;
134
135 // Avoid querying the MachineFunctionProperties for each operand.
136 bool isFunctionRegBankSelected = false;
137 bool isFunctionSelected = false;
138 bool isFunctionTracksDebugUserValues = false;
139
140 using RegVector = SmallVector<Register, 16>;
141 using RegMaskVector = SmallVector<const uint32_t *, 4>;
142 using RegSet = DenseSet<Register>;
145
146 const MachineInstr *FirstNonPHI = nullptr;
147 const MachineInstr *FirstTerminator = nullptr;
148 BlockSet FunctionBlocks;
149
150 BitVector regsReserved;
151 RegSet regsLive;
152 RegVector regsDefined, regsDead, regsKilled;
153 RegMaskVector regMasks;
154
155 SlotIndex lastIndex;
156
157 // Add Reg and any sub-registers to RV
158 void addRegWithSubRegs(RegVector &RV, Register Reg) {
159 RV.push_back(Reg);
160 if (Reg.isPhysical())
161 append_range(RV, TRI->subregs(Reg.asMCReg()));
162 }
163
164 struct BBInfo {
165 // Is this MBB reachable from the MF entry point?
166 bool reachable = false;
167
168 // Vregs that must be live in because they are used without being
169 // defined. Map value is the user. vregsLiveIn doesn't include regs
170 // that only are used by PHI nodes.
171 RegMap vregsLiveIn;
172
173 // Regs killed in MBB. They may be defined again, and will then be in both
174 // regsKilled and regsLiveOut.
175 RegSet regsKilled;
176
177 // Regs defined in MBB and live out. Note that vregs passing through may
178 // be live out without being mentioned here.
179 RegSet regsLiveOut;
180
181 // Vregs that pass through MBB untouched. This set is disjoint from
182 // regsKilled and regsLiveOut.
183 RegSet vregsPassed;
184
185 // Vregs that must pass through MBB because they are needed by a successor
186 // block. This set is disjoint from regsLiveOut.
187 RegSet vregsRequired;
188
189 // Set versions of block's predecessor and successor lists.
190 BlockSet Preds, Succs;
191
192 BBInfo() = default;
193
194 // Add register to vregsRequired if it belongs there. Return true if
195 // anything changed.
196 bool addRequired(Register Reg) {
197 if (!Reg.isVirtual())
198 return false;
199 if (regsLiveOut.count(Reg))
200 return false;
201 return vregsRequired.insert(Reg).second;
202 }
203
204 // Same for a full set.
205 bool addRequired(const RegSet &RS) {
206 bool Changed = false;
207 for (Register Reg : RS)
208 Changed |= addRequired(Reg);
209 return Changed;
210 }
211
212 // Same for a full map.
213 bool addRequired(const RegMap &RM) {
214 bool Changed = false;
215 for (const auto &I : RM)
216 Changed |= addRequired(I.first);
217 return Changed;
218 }
219
220 // Live-out registers are either in regsLiveOut or vregsPassed.
221 bool isLiveOut(Register Reg) const {
222 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
223 }
224 };
225
226 // Extra register info per MBB.
228
229 bool isReserved(Register Reg) {
230 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
231 }
232
233 bool isAllocatable(Register Reg) const {
234 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
235 !regsReserved.test(Reg.id());
236 }
237
238 // Analysis information if available
239 LiveVariables *LiveVars = nullptr;
240 LiveIntervals *LiveInts = nullptr;
241 LiveStacks *LiveStks = nullptr;
242 SlotIndexes *Indexes = nullptr;
243
244 /// A class to track the number of reported error and to guarantee that only
245 /// one error is reported at one time.
246 class ReportedErrors {
247 unsigned NumReported = 0;
248 bool AbortOnError;
249
250 public:
251 /// \param AbortOnError -- If set, abort after printing the first error.
252 ReportedErrors(bool AbortOnError) : AbortOnError(AbortOnError) {}
253
254 ~ReportedErrors() {
255 if (!hasError())
256 return;
257 if (AbortOnError)
258 report_fatal_error("Found " + Twine(NumReported) +
259 " machine code errors.");
260 // Since we haven't aborted, release the lock to allow other threads to
261 // report errors.
262 ReportedErrorsLock->unlock();
263 }
264
265 /// Increment the number of reported errors.
266 /// \returns true if this is the first reported error.
267 bool increment() {
268 // If this is the first error this thread has encountered, grab the lock
269 // to prevent other threads from reporting errors at the same time.
270 // Otherwise we assume we already have the lock.
271 if (!hasError())
272 ReportedErrorsLock->lock();
273 ++NumReported;
274 return NumReported == 1;
275 }
276
277 /// \returns true if an error was reported.
278 bool hasError() { return NumReported; }
279 };
280 ReportedErrors ReportedErrs;
281
282 // This is calculated only when trying to verify convergence control tokens.
283 // Similar to the LLVM IR verifier, we calculate this locally instead of
284 // relying on the pass manager.
286
287 void visitMachineFunctionBefore();
288 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
289 void visitMachineBundleBefore(const MachineInstr *MI);
290
291 /// Verify that all of \p MI's virtual register operands are scalars.
292 /// \returns True if all virtual register operands are scalar. False
293 /// otherwise.
294 bool verifyAllRegOpsScalar(const MachineInstr &MI,
295 const MachineRegisterInfo &MRI);
296 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
297
298 bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
299 bool verifyGIntrinsicConvergence(const MachineInstr *MI);
300 void verifyPreISelGenericInstruction(const MachineInstr *MI);
301
302 void visitMachineInstrBefore(const MachineInstr *MI);
303 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
304 void visitMachineBundleAfter(const MachineInstr *MI);
305 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
306 void visitMachineFunctionAfter();
307
308 void report(const char *msg, const MachineFunction *MF);
309 void report(const char *msg, const MachineBasicBlock *MBB);
310 void report(const char *msg, const MachineInstr *MI);
311 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
312 LLT MOVRegType = LLT{});
313 void report(const Twine &Msg, const MachineInstr *MI);
314
315 void report_context(const LiveInterval &LI) const;
316 void report_context(const LiveRange &LR, VirtRegOrUnit VRegOrUnit,
317 LaneBitmask LaneMask) const;
318 void report_context(const LiveRange::Segment &S) const;
319 void report_context(const VNInfo &VNI) const;
320 void report_context(SlotIndex Pos) const;
321 void report_context(MCPhysReg PhysReg) const;
322 void report_context_liverange(const LiveRange &LR) const;
323 void report_context_lanemask(LaneBitmask LaneMask) const;
324 void report_context_vreg(Register VReg) const;
325 void report_context_vreg_regunit(VirtRegOrUnit VRegOrUnit) const;
326
327 void verifyInlineAsm(const MachineInstr *MI);
328
329 void checkLiveness(const MachineOperand *MO, unsigned MONum);
330 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
331 SlotIndex UseIdx, const LiveRange &LR,
332 VirtRegOrUnit VRegOrUnit,
333 LaneBitmask LaneMask = LaneBitmask::getNone());
334 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
335 SlotIndex DefIdx, const LiveRange &LR,
336 VirtRegOrUnit VRegOrUnit, bool SubRangeCheck = false,
337 LaneBitmask LaneMask = LaneBitmask::getNone());
338
339 void markReachable(const MachineBasicBlock *MBB);
340 void calcRegsPassed();
341 void checkPHIOps(const MachineBasicBlock &MBB);
342
343 void calcRegsRequired();
344 void verifyLiveVariables();
345 void verifyLiveIntervals();
346 void verifyLiveInterval(const LiveInterval &);
347 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, VirtRegOrUnit,
349 void verifyLiveRangeSegment(const LiveRange &,
352 void verifyLiveRange(const LiveRange &, VirtRegOrUnit,
353 LaneBitmask LaneMask = LaneBitmask::getNone());
354
355 void verifyStackFrame();
356 /// Check that the stack protector is the top-most object in the stack.
357 void verifyStackProtector();
358
359 void verifySlotIndexes() const;
360 void verifyProperties(const MachineFunction &MF);
361};
362
363struct MachineVerifierLegacyPass : public MachineFunctionPass {
364 static char ID; // Pass ID, replacement for typeid
365
366 const std::string Banner;
367
368 MachineVerifierLegacyPass(std::string banner = std::string())
369 : MachineFunctionPass(ID), Banner(std::move(banner)) {
371 }
372
373 void getAnalysisUsage(AnalysisUsage &AU) const override {
378 AU.setPreservesAll();
380 }
381
382 bool runOnMachineFunction(MachineFunction &MF) override {
383 // Skip functions that have known verification problems.
384 // FIXME: Remove this mechanism when all problematic passes have been
385 // fixed.
386 if (MF.getProperties().hasFailsVerification())
387 return false;
388
389 MachineVerifier(this, Banner.c_str(), &errs()).verify(MF);
390 return false;
391 }
392};
393
394} // end anonymous namespace
395
399 // Skip functions that have known verification problems.
400 // FIXME: Remove this mechanism when all problematic passes have been
401 // fixed.
402 if (MF.getProperties().hasFailsVerification())
403 return PreservedAnalyses::all();
404 MachineVerifier(MFAM, Banner.c_str(), &errs()).verify(MF);
405 return PreservedAnalyses::all();
406}
407
408char MachineVerifierLegacyPass::ID = 0;
409
410INITIALIZE_PASS(MachineVerifierLegacyPass, "machineverifier",
411 "Verify generated machine code", false, false)
412
414 return new MachineVerifierLegacyPass(Banner);
415}
416
417void llvm::verifyMachineFunction(const std::string &Banner,
418 const MachineFunction &MF) {
419 // TODO: Use MFAM after porting below analyses.
420 // LiveVariables *LiveVars;
421 // LiveIntervals *LiveInts;
422 // LiveStacks *LiveStks;
423 // SlotIndexes *Indexes;
424 MachineVerifier(nullptr, Banner.c_str(), &errs()).verify(MF);
425}
426
427bool MachineFunction::verify(Pass *p, const char *Banner, raw_ostream *OS,
428 bool AbortOnError) const {
429 return MachineVerifier(p, Banner, OS, AbortOnError).verify(*this);
430}
431
433 const char *Banner, raw_ostream *OS,
434 bool AbortOnError) const {
435 return MachineVerifier(MFAM, Banner, OS, AbortOnError).verify(*this);
436}
437
439 const char *Banner, raw_ostream *OS,
440 bool AbortOnError) const {
441 return MachineVerifier(Banner, /*LiveVars=*/nullptr, LiveInts,
442 /*LiveStks=*/nullptr, Indexes, OS, AbortOnError)
443 .verify(*this);
444}
445
446void MachineVerifier::verifySlotIndexes() const {
447 if (Indexes == nullptr)
448 return;
449
450 // Ensure the IdxMBB list is sorted by slot indexes.
453 E = Indexes->MBBIndexEnd(); I != E; ++I) {
454 assert(!Last.isValid() || I->first > Last);
455 Last = I->first;
456 }
457}
458
459void MachineVerifier::verifyProperties(const MachineFunction &MF) {
460 // If a pass has introduced virtual registers without clearing the
461 // NoVRegs property (or set it without allocating the vregs)
462 // then report an error.
463 if (MF.getProperties().hasNoVRegs() && MRI->getNumVirtRegs())
464 report("Function has NoVRegs property but there are VReg operands", &MF);
465}
466
467bool MachineVerifier::verify(const MachineFunction &MF) {
468 this->MF = &MF;
469 TM = &MF.getTarget();
472 RBI = MF.getSubtarget().getRegBankInfo();
473 MRI = &MF.getRegInfo();
474
475 const MachineFunctionProperties &Props = MF.getProperties();
476 const bool isFunctionFailedISel = Props.hasFailedISel();
477
478 // If we're mid-GlobalISel and we already triggered the fallback path then
479 // it's expected that the MIR is somewhat broken but that's ok since we'll
480 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
481 if (isFunctionFailedISel)
482 return true;
483
484 isFunctionRegBankSelected = Props.hasRegBankSelected();
485 isFunctionSelected = Props.hasSelected();
486 isFunctionTracksDebugUserValues = Props.hasTracksDebugUserValues();
487
488 if (PASS) {
489 auto *LISWrapper = PASS->getAnalysisIfAvailable<LiveIntervalsWrapperPass>();
490 LiveInts = LISWrapper ? &LISWrapper->getLIS() : nullptr;
491 // We don't want to verify LiveVariables if LiveIntervals is available.
492 auto *LVWrapper = PASS->getAnalysisIfAvailable<LiveVariablesWrapperPass>();
493 if (!LiveInts)
494 LiveVars = LVWrapper ? &LVWrapper->getLV() : nullptr;
495 auto *LSWrapper = PASS->getAnalysisIfAvailable<LiveStacksWrapperLegacy>();
496 LiveStks = LSWrapper ? &LSWrapper->getLS() : nullptr;
497 auto *SIWrapper = PASS->getAnalysisIfAvailable<SlotIndexesWrapperPass>();
498 Indexes = SIWrapper ? &SIWrapper->getSI() : nullptr;
499 }
500 if (MFAM) {
501 MachineFunction &Func = const_cast<MachineFunction &>(MF);
502 LiveInts = MFAM->getCachedResult<LiveIntervalsAnalysis>(Func);
503 if (!LiveInts)
504 LiveVars = MFAM->getCachedResult<LiveVariablesAnalysis>(Func);
505 // TODO: LiveStks = MFAM->getCachedResult<LiveStacksAnalysis>(Func);
506 Indexes = MFAM->getCachedResult<SlotIndexesAnalysis>(Func);
507 }
508
509 verifySlotIndexes();
510
511 verifyProperties(MF);
512
513 visitMachineFunctionBefore();
514 for (const MachineBasicBlock &MBB : MF) {
515 visitMachineBasicBlockBefore(&MBB);
516 // Keep track of the current bundle header.
517 const MachineInstr *CurBundle = nullptr;
518 // Do we expect the next instruction to be part of the same bundle?
519 bool InBundle = false;
520
521 for (const MachineInstr &MI : MBB.instrs()) {
522 if (MI.getParent() != &MBB) {
523 report("Bad instruction parent pointer", &MBB);
524 OS << "Instruction: " << MI;
525 continue;
526 }
527
528 // Check for consistent bundle flags.
529 if (InBundle && !MI.isBundledWithPred())
530 report("Missing BundledPred flag, "
531 "BundledSucc was set on predecessor",
532 &MI);
533 if (!InBundle && MI.isBundledWithPred())
534 report("BundledPred flag is set, "
535 "but BundledSucc not set on predecessor",
536 &MI);
537
538 // Is this a bundle header?
539 if (!MI.isInsideBundle()) {
540 if (CurBundle)
541 visitMachineBundleAfter(CurBundle);
542 CurBundle = &MI;
543 visitMachineBundleBefore(CurBundle);
544 } else if (!CurBundle)
545 report("No bundle header", &MI);
546 visitMachineInstrBefore(&MI);
547 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
548 const MachineOperand &Op = MI.getOperand(I);
549 if (Op.getParent() != &MI) {
550 // Make sure to use correct addOperand / removeOperand / ChangeTo
551 // functions when replacing operands of a MachineInstr.
552 report("Instruction has operand with wrong parent set", &MI);
553 }
554
555 visitMachineOperand(&Op, I);
556 }
557
558 // Was this the last bundled instruction?
559 InBundle = MI.isBundledWithSucc();
560 }
561 if (CurBundle)
562 visitMachineBundleAfter(CurBundle);
563 if (InBundle)
564 report("BundledSucc flag set on last instruction in block", &MBB.back());
565 visitMachineBasicBlockAfter(&MBB);
566 }
567 visitMachineFunctionAfter();
568
569 // Clean up.
570 regsLive.clear();
571 regsDefined.clear();
572 regsDead.clear();
573 regsKilled.clear();
574 regMasks.clear();
575 MBBInfoMap.clear();
576
577 return !ReportedErrs.hasError();
578}
579
580void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
581 assert(MF);
582 OS << '\n';
583 if (ReportedErrs.increment()) {
584 if (Banner)
585 OS << "# " << Banner << '\n';
586
587 if (LiveInts != nullptr)
588 LiveInts->print(OS);
589 else
590 MF->print(OS, Indexes);
591 }
592
593 OS << "*** Bad machine code: " << msg << " ***\n"
594 << "- function: " << MF->getName() << '\n';
595}
596
597void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
598 assert(MBB);
599 report(msg, MBB->getParent());
600 OS << "- basic block: " << printMBBReference(*MBB) << ' ' << MBB->getName()
601 << " (" << (const void *)MBB << ')';
602 if (Indexes)
603 OS << " [" << Indexes->getMBBStartIdx(MBB) << ';'
604 << Indexes->getMBBEndIdx(MBB) << ')';
605 OS << '\n';
606}
607
608void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
609 assert(MI);
610 report(msg, MI->getParent());
611 OS << "- instruction: ";
612 if (Indexes && Indexes->hasIndex(*MI))
613 OS << Indexes->getInstructionIndex(*MI) << '\t';
614 MI->print(OS, /*IsStandalone=*/true);
615}
616
617void MachineVerifier::report(const char *msg, const MachineOperand *MO,
618 unsigned MONum, LLT MOVRegType) {
619 assert(MO);
620 report(msg, MO->getParent());
621 OS << "- operand " << MONum << ": ";
622 MO->print(OS, MOVRegType, TRI);
623 OS << '\n';
624}
625
626void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
627 report(Msg.str().c_str(), MI);
628}
629
630void MachineVerifier::report_context(SlotIndex Pos) const {
631 OS << "- at: " << Pos << '\n';
632}
633
634void MachineVerifier::report_context(const LiveInterval &LI) const {
635 OS << "- interval: " << LI << '\n';
636}
637
638void MachineVerifier::report_context(const LiveRange &LR,
639 VirtRegOrUnit VRegOrUnit,
640 LaneBitmask LaneMask) const {
641 report_context_liverange(LR);
642 report_context_vreg_regunit(VRegOrUnit);
643 if (LaneMask.any())
644 report_context_lanemask(LaneMask);
645}
646
647void MachineVerifier::report_context(const LiveRange::Segment &S) const {
648 OS << "- segment: " << S << '\n';
649}
650
651void MachineVerifier::report_context(const VNInfo &VNI) const {
652 OS << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
653}
654
655void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
656 OS << "- liverange: " << LR << '\n';
657}
658
659void MachineVerifier::report_context(MCPhysReg PReg) const {
660 OS << "- p. register: " << printReg(PReg, TRI) << '\n';
661}
662
663void MachineVerifier::report_context_vreg(Register VReg) const {
664 OS << "- v. register: " << printReg(VReg, TRI) << '\n';
665}
666
667void MachineVerifier::report_context_vreg_regunit(
668 VirtRegOrUnit VRegOrUnit) const {
669 if (VRegOrUnit.isVirtualReg()) {
670 report_context_vreg(VRegOrUnit.asVirtualReg());
671 } else {
672 OS << "- regunit: " << printRegUnit(VRegOrUnit.asMCRegUnit(), TRI)
673 << '\n';
674 }
675}
676
677void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
678 OS << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
679}
680
681void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
682 BBInfo &MInfo = MBBInfoMap[MBB];
683 if (!MInfo.reachable) {
684 MInfo.reachable = true;
685 for (const MachineBasicBlock *Succ : MBB->successors())
686 markReachable(Succ);
687 }
688}
689
690void MachineVerifier::visitMachineFunctionBefore() {
691 lastIndex = SlotIndex();
692 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
693 : TRI->getReservedRegs(*MF);
694
695 if (!MF->empty())
696 markReachable(&MF->front());
697
698 // Build a set of the basic blocks in the function.
699 FunctionBlocks.clear();
700 for (const auto &MBB : *MF) {
701 FunctionBlocks.insert(&MBB);
702 BBInfo &MInfo = MBBInfoMap[&MBB];
703
704 MInfo.Preds.insert_range(MBB.predecessors());
705 if (MInfo.Preds.size() != MBB.pred_size())
706 report("MBB has duplicate entries in its predecessor list.", &MBB);
707
708 MInfo.Succs.insert_range(MBB.successors());
709 if (MInfo.Succs.size() != MBB.succ_size())
710 report("MBB has duplicate entries in its successor list.", &MBB);
711 }
712
713 // Check that the register use lists are sane.
714 MRI->verifyUseLists();
715
716 if (!MF->empty()) {
717 verifyStackFrame();
718 verifyStackProtector();
719 }
720}
721
722void
723MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
724 FirstTerminator = nullptr;
725 FirstNonPHI = nullptr;
726
727 if (!MF->getProperties().hasNoPHIs() && MRI->tracksLiveness()) {
728 // If this block has allocatable physical registers live-in, check that
729 // it is an entry block or landing pad.
730 for (const auto &LI : MBB->liveins()) {
731 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
732 MBB->getIterator() != MBB->getParent()->begin() &&
734 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
735 "inlineasm-br-indirect-target.",
736 MBB);
737 report_context(LI.PhysReg);
738 }
739 }
740 }
741
742 if (MBB->isIRBlockAddressTaken()) {
744 report("ir-block-address-taken is associated with basic block not used by "
745 "a blockaddress.",
746 MBB);
747 }
748
749 // Count the number of landing pad successors.
751 for (const auto *succ : MBB->successors()) {
752 if (succ->isEHPad())
753 LandingPadSuccs.insert(succ);
754 if (!FunctionBlocks.count(succ))
755 report("MBB has successor that isn't part of the function.", MBB);
756 if (!MBBInfoMap[succ].Preds.count(MBB)) {
757 report("Inconsistent CFG", MBB);
758 OS << "MBB is not in the predecessor list of the successor "
759 << printMBBReference(*succ) << ".\n";
760 }
761 }
762
763 // Check the predecessor list.
764 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
765 if (!FunctionBlocks.count(Pred))
766 report("MBB has predecessor that isn't part of the function.", MBB);
767 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
768 report("Inconsistent CFG", MBB);
769 OS << "MBB is not in the successor list of the predecessor "
770 << printMBBReference(*Pred) << ".\n";
771 }
772 }
773
774 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
775 const BasicBlock *BB = MBB->getBasicBlock();
776 const Function &F = MF->getFunction();
777 if (LandingPadSuccs.size() > 1 &&
778 !(AsmInfo &&
780 BB && isa<SwitchInst>(BB->getTerminator())) &&
781 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
782 report("MBB has more than one landing pad successor", MBB);
783
784 // Call analyzeBranch. If it succeeds, there several more conditions to check.
785 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
787 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
788 Cond)) {
789 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
790 // check whether its answers match up with reality.
791 if (!TBB && !FBB) {
792 // Block falls through to its successor.
793 if (!MBB->empty() && MBB->back().isBarrier() &&
794 !TII->isPredicated(MBB->back())) {
795 report("MBB exits via unconditional fall-through but ends with a "
796 "barrier instruction!", MBB);
797 }
798 if (!Cond.empty()) {
799 report("MBB exits via unconditional fall-through but has a condition!",
800 MBB);
801 }
802 } else if (TBB && !FBB && Cond.empty()) {
803 // Block unconditionally branches somewhere.
804 if (MBB->empty()) {
805 report("MBB exits via unconditional branch but doesn't contain "
806 "any instructions!", MBB);
807 } else if (!MBB->back().isBarrier()) {
808 report("MBB exits via unconditional branch but doesn't end with a "
809 "barrier instruction!", MBB);
810 } else if (!MBB->back().isTerminator()) {
811 report("MBB exits via unconditional branch but the branch isn't a "
812 "terminator instruction!", MBB);
813 }
814 } else if (TBB && !FBB && !Cond.empty()) {
815 // Block conditionally branches somewhere, otherwise falls through.
816 if (MBB->empty()) {
817 report("MBB exits via conditional branch/fall-through but doesn't "
818 "contain any instructions!", MBB);
819 } else if (MBB->back().isBarrier()) {
820 report("MBB exits via conditional branch/fall-through but ends with a "
821 "barrier instruction!", MBB);
822 } else if (!MBB->back().isTerminator()) {
823 report("MBB exits via conditional branch/fall-through but the branch "
824 "isn't a terminator instruction!", MBB);
825 }
826 } else if (TBB && FBB) {
827 // Block conditionally branches somewhere, otherwise branches
828 // somewhere else.
829 if (MBB->empty()) {
830 report("MBB exits via conditional branch/branch but doesn't "
831 "contain any instructions!", MBB);
832 } else if (!MBB->back().isBarrier()) {
833 report("MBB exits via conditional branch/branch but doesn't end with a "
834 "barrier instruction!", MBB);
835 } else if (!MBB->back().isTerminator()) {
836 report("MBB exits via conditional branch/branch but the branch "
837 "isn't a terminator instruction!", MBB);
838 }
839 if (Cond.empty()) {
840 report("MBB exits via conditional branch/branch but there's no "
841 "condition!", MBB);
842 }
843 } else {
844 report("analyzeBranch returned invalid data!", MBB);
845 }
846
847 // Now check that the successors match up with the answers reported by
848 // analyzeBranch.
849 if (TBB && !MBB->isSuccessor(TBB))
850 report("MBB exits via jump or conditional branch, but its target isn't a "
851 "CFG successor!",
852 MBB);
853 if (FBB && !MBB->isSuccessor(FBB))
854 report("MBB exits via conditional branch, but its target isn't a CFG "
855 "successor!",
856 MBB);
857
858 // There might be a fallthrough to the next block if there's either no
859 // unconditional true branch, or if there's a condition, and one of the
860 // branches is missing.
861 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
862
863 // A conditional fallthrough must be an actual CFG successor, not
864 // unreachable. (Conversely, an unconditional fallthrough might not really
865 // be a successor, because the block might end in unreachable.)
866 if (!Cond.empty() && !FBB) {
868 if (MBBI == MF->end()) {
869 report("MBB conditionally falls through out of function!", MBB);
870 } else if (!MBB->isSuccessor(&*MBBI))
871 report("MBB exits via conditional branch/fall-through but the CFG "
872 "successors don't match the actual successors!",
873 MBB);
874 }
875
876 // Verify that there aren't any extra un-accounted-for successors.
877 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
878 // If this successor is one of the branch targets, it's okay.
879 if (SuccMBB == TBB || SuccMBB == FBB)
880 continue;
881 // If we might have a fallthrough, and the successor is the fallthrough
882 // block, that's also ok.
883 if (Fallthrough && SuccMBB == MBB->getNextNode())
884 continue;
885 // Also accept successors which are for exception-handling or might be
886 // inlineasm_br targets.
887 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
888 continue;
889 report("MBB has unexpected successors which are not branch targets, "
890 "fallthrough, EHPads, or inlineasm_br targets.",
891 MBB);
892 }
893 }
894
895 regsLive.clear();
896 if (MRI->tracksLiveness()) {
897 for (const auto &LI : MBB->liveins()) {
898 if (!LI.PhysReg.isPhysical()) {
899 report("MBB live-in list contains non-physical register", MBB);
900 continue;
901 }
902 regsLive.insert_range(TRI->subregs_inclusive(LI.PhysReg));
903 }
904 }
905
906 const MachineFrameInfo &MFI = MF->getFrameInfo();
907 BitVector PR = MFI.getPristineRegs(*MF);
908 for (unsigned I : PR.set_bits())
909 regsLive.insert_range(TRI->subregs_inclusive(I));
910
911 regsKilled.clear();
912 regsDefined.clear();
913
914 if (Indexes)
915 lastIndex = Indexes->getMBBStartIdx(MBB);
916}
917
918// This function gets called for all bundle headers, including normal
919// stand-alone unbundled instructions.
920void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
921 if (Indexes && Indexes->hasIndex(*MI)) {
922 SlotIndex idx = Indexes->getInstructionIndex(*MI);
923 if (!(idx > lastIndex)) {
924 report("Instruction index out of order", MI);
925 OS << "Last instruction was at " << lastIndex << '\n';
926 }
927 lastIndex = idx;
928 }
929
930 // Ensure non-terminators don't follow terminators.
931 if (MI->isTerminator()) {
932 if (!FirstTerminator)
933 FirstTerminator = MI;
934 } else if (FirstTerminator) {
935 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
936 // precede non-terminators.
937 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
938 report("Non-terminator instruction after the first terminator", MI);
939 OS << "First terminator was:\t" << *FirstTerminator;
940 }
941 }
942}
943
944// The operands on an INLINEASM instruction must follow a template.
945// Verify that the flag operands make sense.
946void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
947 // The first two operands on INLINEASM are the asm string and global flags.
948 if (MI->getNumOperands() < 2) {
949 report("Too few operands on inline asm", MI);
950 return;
951 }
952 if (!MI->getOperand(0).isSymbol())
953 report("Asm string must be an external symbol", MI);
954 if (!MI->getOperand(1).isImm())
955 report("Asm flags must be an immediate", MI);
956 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
957 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
958 // and Extra_IsConvergent = 32.
959 if (!isUInt<6>(MI->getOperand(1).getImm()))
960 report("Unknown asm flags", &MI->getOperand(1), 1);
961
962 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
963
964 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
965 unsigned NumOps;
966 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
967 const MachineOperand &MO = MI->getOperand(OpNo);
968 // There may be implicit ops after the fixed operands.
969 if (!MO.isImm())
970 break;
971 const InlineAsm::Flag F(MO.getImm());
972 NumOps = 1 + F.getNumOperandRegisters();
973 }
974
975 if (OpNo > MI->getNumOperands())
976 report("Missing operands in last group", MI);
977
978 // An optional MDNode follows the groups.
979 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
980 ++OpNo;
981
982 // All trailing operands must be implicit registers.
983 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
984 const MachineOperand &MO = MI->getOperand(OpNo);
985 if (!MO.isReg() || !MO.isImplicit())
986 report("Expected implicit register after groups", &MO, OpNo);
987 }
988
989 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
990 const MachineBasicBlock *MBB = MI->getParent();
991
992 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
993 i != e; ++i) {
994 const MachineOperand &MO = MI->getOperand(i);
995
996 if (!MO.isMBB())
997 continue;
998
999 // Check the successor & predecessor lists look ok, assume they are
1000 // not. Find the indirect target without going through the successors.
1001 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
1002 if (!IndirectTargetMBB) {
1003 report("INLINEASM_BR indirect target does not exist", &MO, i);
1004 break;
1005 }
1006
1007 if (!MBB->isSuccessor(IndirectTargetMBB))
1008 report("INLINEASM_BR indirect target missing from successor list", &MO,
1009 i);
1010
1011 if (!IndirectTargetMBB->isPredecessor(MBB))
1012 report("INLINEASM_BR indirect target predecessor list missing parent",
1013 &MO, i);
1014 }
1015 }
1016}
1017
1018bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
1019 const MachineRegisterInfo &MRI) {
1020 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
1021 if (!Op.isReg())
1022 return false;
1023 const auto Reg = Op.getReg();
1024 if (Reg.isPhysical())
1025 return false;
1026 return !MRI.getType(Reg).isScalar();
1027 }))
1028 return true;
1029 report("All register operands must have scalar types", &MI);
1030 return false;
1031}
1032
1033/// Check that types are consistent when two operands need to have the same
1034/// number of vector elements.
1035/// \return true if the types are valid.
1036bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
1037 const MachineInstr *MI) {
1038 if (Ty0.isVector() != Ty1.isVector()) {
1039 report("operand types must be all-vector or all-scalar", MI);
1040 // Generally we try to report as many issues as possible at once, but in
1041 // this case it's not clear what should we be comparing the size of the
1042 // scalar with: the size of the whole vector or its lane. Instead of
1043 // making an arbitrary choice and emitting not so helpful message, let's
1044 // avoid the extra noise and stop here.
1045 return false;
1046 }
1047
1048 if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
1049 report("operand types must preserve number of vector elements", MI);
1050 return false;
1051 }
1052
1053 return true;
1054}
1055
1056bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
1057 auto Opcode = MI->getOpcode();
1058 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
1059 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
1060 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1061 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1063 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1064 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
1065 if (NoSideEffects && DeclHasSideEffects) {
1066 report(Twine(TII->getName(Opcode),
1067 " used with intrinsic that accesses memory"),
1068 MI);
1069 return false;
1070 }
1071 if (!NoSideEffects && !DeclHasSideEffects) {
1072 report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
1073 return false;
1074 }
1075 }
1076
1077 return true;
1078}
1079
1080bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1081 auto Opcode = MI->getOpcode();
1082 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1083 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1084 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1085 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1087 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1088 bool DeclIsConvergent = Attrs.hasAttribute(Attribute::Convergent);
1089 if (NotConvergent && DeclIsConvergent) {
1090 report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1091 MI);
1092 return false;
1093 }
1094 if (!NotConvergent && !DeclIsConvergent) {
1095 report(
1096 Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1097 MI);
1098 return false;
1099 }
1100 }
1101
1102 return true;
1103}
1104
1105void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1106 if (isFunctionSelected)
1107 report("Unexpected generic instruction in a Selected function", MI);
1108
1109 const MCInstrDesc &MCID = MI->getDesc();
1110 unsigned NumOps = MI->getNumOperands();
1111
1112 // Branches must reference a basic block if they are not indirect
1113 if (MI->isBranch() && !MI->isIndirectBranch()) {
1114 bool HasMBB = false;
1115 for (const MachineOperand &Op : MI->operands()) {
1116 if (Op.isMBB()) {
1117 HasMBB = true;
1118 break;
1119 }
1120 }
1121
1122 if (!HasMBB) {
1123 report("Branch instruction is missing a basic block operand or "
1124 "isIndirectBranch property",
1125 MI);
1126 }
1127 }
1128
1129 // Check types.
1131 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
1132 I != E; ++I) {
1133 if (!MCID.operands()[I].isGenericType())
1134 continue;
1135 // Generic instructions specify type equality constraints between some of
1136 // their operands. Make sure these are consistent.
1137 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1138 Types.resize(std::max(TypeIdx + 1, Types.size()));
1139
1140 const MachineOperand *MO = &MI->getOperand(I);
1141 if (!MO->isReg()) {
1142 report("generic instruction must use register operands", MI);
1143 continue;
1144 }
1145
1146 LLT OpTy = MRI->getType(MO->getReg());
1147 // Don't report a type mismatch if there is no actual mismatch, only a
1148 // type missing, to reduce noise:
1149 if (OpTy.isValid()) {
1150 // Only the first valid type for a type index will be printed: don't
1151 // overwrite it later so it's always clear which type was expected:
1152 if (!Types[TypeIdx].isValid())
1153 Types[TypeIdx] = OpTy;
1154 else if (Types[TypeIdx] != OpTy)
1155 report("Type mismatch in generic instruction", MO, I, OpTy);
1156 } else {
1157 // Generic instructions must have types attached to their operands.
1158 report("Generic instruction is missing a virtual register type", MO, I);
1159 }
1160 }
1161
1162 // Generic opcodes must not have physical register operands.
1163 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1164 const MachineOperand *MO = &MI->getOperand(I);
1165 if (MO->isReg() && MO->getReg().isPhysical())
1166 report("Generic instruction cannot have physical register", MO, I);
1167 }
1168
1169 // Avoid out of bounds in checks below. This was already reported earlier.
1170 if (MI->getNumOperands() < MCID.getNumOperands())
1171 return;
1172
1174 if (!TII->verifyInstruction(*MI, ErrorInfo))
1175 report(ErrorInfo.data(), MI);
1176
1177 // Verify properties of various specific instruction types
1178 unsigned Opc = MI->getOpcode();
1179 switch (Opc) {
1180 case TargetOpcode::G_ASSERT_SEXT:
1181 case TargetOpcode::G_ASSERT_ZEXT: {
1182 std::string OpcName =
1183 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1184 if (!MI->getOperand(2).isImm()) {
1185 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1186 break;
1187 }
1188
1189 Register Dst = MI->getOperand(0).getReg();
1190 Register Src = MI->getOperand(1).getReg();
1191 LLT SrcTy = MRI->getType(Src);
1192 int64_t Imm = MI->getOperand(2).getImm();
1193 if (Imm <= 0) {
1194 report(Twine(OpcName, " size must be >= 1"), MI);
1195 break;
1196 }
1197
1198 if (Imm >= SrcTy.getScalarSizeInBits()) {
1199 report(Twine(OpcName, " size must be less than source bit width"), MI);
1200 break;
1201 }
1202
1203 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1204 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1205
1206 // Allow only the source bank to be set.
1207 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1208 report(Twine(OpcName, " cannot change register bank"), MI);
1209 break;
1210 }
1211
1212 // Don't allow a class change. Do allow member class->regbank.
1213 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1214 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1215 report(
1216 Twine(OpcName, " source and destination register classes must match"),
1217 MI);
1218 break;
1219 }
1220
1221 break;
1222 }
1223
1224 case TargetOpcode::G_CONSTANT:
1225 case TargetOpcode::G_FCONSTANT: {
1226 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1227 if (DstTy.isVector())
1228 report("Instruction cannot use a vector result type", MI);
1229
1230 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1231 if (!MI->getOperand(1).isCImm()) {
1232 report("G_CONSTANT operand must be cimm", MI);
1233 break;
1234 }
1235
1236 const ConstantInt *CI = MI->getOperand(1).getCImm();
1237 if (CI->getBitWidth() != DstTy.getSizeInBits())
1238 report("inconsistent constant size", MI);
1239 } else {
1240 if (!MI->getOperand(1).isFPImm()) {
1241 report("G_FCONSTANT operand must be fpimm", MI);
1242 break;
1243 }
1244 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1245
1247 DstTy.getSizeInBits()) {
1248 report("inconsistent constant size", MI);
1249 }
1250 }
1251
1252 break;
1253 }
1254 case TargetOpcode::G_LOAD:
1255 case TargetOpcode::G_STORE:
1256 case TargetOpcode::G_ZEXTLOAD:
1257 case TargetOpcode::G_SEXTLOAD: {
1258 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1259 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1260 if (!PtrTy.isPointer())
1261 report("Generic memory instruction must access a pointer", MI);
1262
1263 // Generic loads and stores must have a single MachineMemOperand
1264 // describing that access.
1265 if (!MI->hasOneMemOperand()) {
1266 report("Generic instruction accessing memory must have one mem operand",
1267 MI);
1268 } else {
1269 const MachineMemOperand &MMO = **MI->memoperands_begin();
1270 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1271 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1273 ValTy.getSizeInBits()))
1274 report("Generic extload must have a narrower memory type", MI);
1275 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1277 ValTy.getSizeInBytes()))
1278 report("load memory size cannot exceed result size", MI);
1279
1280 if (MMO.getRanges()) {
1281 ConstantInt *i =
1282 mdconst::extract<ConstantInt>(MMO.getRanges()->getOperand(0));
1283 const LLT RangeTy = LLT::scalar(i->getIntegerType()->getBitWidth());
1284 const LLT MemTy = MMO.getMemoryType();
1285 if (MemTy.getScalarType() != RangeTy ||
1286 ValTy.isScalar() != MemTy.isScalar() ||
1287 (ValTy.isVector() &&
1288 ValTy.getNumElements() != MemTy.getNumElements())) {
1289 report("range is incompatible with the result type", MI);
1290 }
1291 }
1292 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1294 MMO.getSize().getValue()))
1295 report("store memory size cannot exceed value size", MI);
1296 }
1297
1298 const AtomicOrdering Order = MMO.getSuccessOrdering();
1299 if (Opc == TargetOpcode::G_STORE) {
1300 if (Order == AtomicOrdering::Acquire ||
1302 report("atomic store cannot use acquire ordering", MI);
1303
1304 } else {
1305 if (Order == AtomicOrdering::Release ||
1307 report("atomic load cannot use release ordering", MI);
1308 }
1309 }
1310
1311 break;
1312 }
1313 case TargetOpcode::G_PHI: {
1314 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1315 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1316 [this, &DstTy](const MachineOperand &MO) {
1317 if (!MO.isReg())
1318 return true;
1319 LLT Ty = MRI->getType(MO.getReg());
1320 if (!Ty.isValid() || (Ty != DstTy))
1321 return false;
1322 return true;
1323 }))
1324 report("Generic Instruction G_PHI has operands with incompatible/missing "
1325 "types",
1326 MI);
1327 break;
1328 }
1329 case TargetOpcode::G_BITCAST: {
1330 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1331 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1332 if (!DstTy.isValid() || !SrcTy.isValid())
1333 break;
1334
1335 if (SrcTy.isPointer() != DstTy.isPointer())
1336 report("bitcast cannot convert between pointers and other types", MI);
1337
1338 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1339 report("bitcast sizes must match", MI);
1340
1341 if (SrcTy == DstTy)
1342 report("bitcast must change the type", MI);
1343
1344 break;
1345 }
1346 case TargetOpcode::G_INTTOPTR:
1347 case TargetOpcode::G_PTRTOINT:
1348 case TargetOpcode::G_ADDRSPACE_CAST: {
1349 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1350 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1351 if (!DstTy.isValid() || !SrcTy.isValid())
1352 break;
1353
1354 verifyVectorElementMatch(DstTy, SrcTy, MI);
1355
1356 DstTy = DstTy.getScalarType();
1357 SrcTy = SrcTy.getScalarType();
1358
1359 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1360 if (!DstTy.isPointer())
1361 report("inttoptr result type must be a pointer", MI);
1362 if (SrcTy.isPointer())
1363 report("inttoptr source type must not be a pointer", MI);
1364 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1365 if (!SrcTy.isPointer())
1366 report("ptrtoint source type must be a pointer", MI);
1367 if (DstTy.isPointer())
1368 report("ptrtoint result type must not be a pointer", MI);
1369 } else {
1370 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1371 if (!SrcTy.isPointer() || !DstTy.isPointer())
1372 report("addrspacecast types must be pointers", MI);
1373 else {
1374 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1375 report("addrspacecast must convert different address spaces", MI);
1376 }
1377 }
1378
1379 break;
1380 }
1381 case TargetOpcode::G_PTR_ADD: {
1382 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1383 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1384 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1385 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1386 break;
1387
1388 if (!PtrTy.isPointerOrPointerVector())
1389 report("gep first operand must be a pointer", MI);
1390
1391 if (OffsetTy.isPointerOrPointerVector())
1392 report("gep offset operand must not be a pointer", MI);
1393
1394 if (PtrTy.isPointerOrPointerVector()) {
1395 const DataLayout &DL = MF->getDataLayout();
1396 unsigned AS = PtrTy.getAddressSpace();
1397 unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
1398 if (OffsetTy.getScalarSizeInBits() != IndexSizeInBits) {
1399 report("gep offset operand must match index size for address space",
1400 MI);
1401 }
1402 }
1403
1404 // TODO: Is the offset allowed to be a scalar with a vector?
1405 break;
1406 }
1407 case TargetOpcode::G_PTRMASK: {
1408 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1409 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1410 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1411 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1412 break;
1413
1414 if (!DstTy.isPointerOrPointerVector())
1415 report("ptrmask result type must be a pointer", MI);
1416
1417 if (!MaskTy.getScalarType().isScalar())
1418 report("ptrmask mask type must be an integer", MI);
1419
1420 verifyVectorElementMatch(DstTy, MaskTy, MI);
1421 break;
1422 }
1423 case TargetOpcode::G_SEXT:
1424 case TargetOpcode::G_ZEXT:
1425 case TargetOpcode::G_ANYEXT:
1426 case TargetOpcode::G_TRUNC:
1427 case TargetOpcode::G_TRUNC_SSAT_S:
1428 case TargetOpcode::G_TRUNC_SSAT_U:
1429 case TargetOpcode::G_TRUNC_USAT_U:
1430 case TargetOpcode::G_FPEXT:
1431 case TargetOpcode::G_FPTRUNC: {
1432 // Number of operands and presense of types is already checked (and
1433 // reported in case of any issues), so no need to report them again. As
1434 // we're trying to report as many issues as possible at once, however, the
1435 // instructions aren't guaranteed to have the right number of operands or
1436 // types attached to them at this point
1437 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1438 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1439 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1440 if (!DstTy.isValid() || !SrcTy.isValid())
1441 break;
1442
1444 report("Generic extend/truncate can not operate on pointers", MI);
1445
1446 verifyVectorElementMatch(DstTy, SrcTy, MI);
1447
1448 unsigned DstSize = DstTy.getScalarSizeInBits();
1449 unsigned SrcSize = SrcTy.getScalarSizeInBits();
1450 switch (MI->getOpcode()) {
1451 default:
1452 if (DstSize <= SrcSize)
1453 report("Generic extend has destination type no larger than source", MI);
1454 break;
1455 case TargetOpcode::G_TRUNC:
1456 case TargetOpcode::G_TRUNC_SSAT_S:
1457 case TargetOpcode::G_TRUNC_SSAT_U:
1458 case TargetOpcode::G_TRUNC_USAT_U:
1459 case TargetOpcode::G_FPTRUNC:
1460 if (DstSize >= SrcSize)
1461 report("Generic truncate has destination type no smaller than source",
1462 MI);
1463 break;
1464 }
1465 break;
1466 }
1467 case TargetOpcode::G_SELECT: {
1468 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1469 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1470 if (!SelTy.isValid() || !CondTy.isValid())
1471 break;
1472
1473 // Scalar condition select on a vector is valid.
1474 if (CondTy.isVector())
1475 verifyVectorElementMatch(SelTy, CondTy, MI);
1476 break;
1477 }
1478 case TargetOpcode::G_MERGE_VALUES: {
1479 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1480 // e.g. s2N = MERGE sN, sN
1481 // Merging multiple scalars into a vector is not allowed, should use
1482 // G_BUILD_VECTOR for that.
1483 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1484 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1485 if (DstTy.isVector() || SrcTy.isVector())
1486 report("G_MERGE_VALUES cannot operate on vectors", MI);
1487
1488 const unsigned NumOps = MI->getNumOperands();
1489 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1490 report("G_MERGE_VALUES result size is inconsistent", MI);
1491
1492 for (unsigned I = 2; I != NumOps; ++I) {
1493 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1494 report("G_MERGE_VALUES source types do not match", MI);
1495 }
1496
1497 break;
1498 }
1499 case TargetOpcode::G_UNMERGE_VALUES: {
1500 unsigned NumDsts = MI->getNumOperands() - 1;
1501 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1502 for (unsigned i = 1; i < NumDsts; ++i) {
1503 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1504 report("G_UNMERGE_VALUES destination types do not match", MI);
1505 break;
1506 }
1507 }
1508
1509 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1510 if (DstTy.isVector()) {
1511 // This case is the converse of G_CONCAT_VECTORS.
1512 if (!SrcTy.isVector() ||
1513 (SrcTy.getScalarType() != DstTy.getScalarType() &&
1514 !SrcTy.isPointerVector()) ||
1515 SrcTy.isScalableVector() != DstTy.isScalableVector() ||
1516 SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1517 report("G_UNMERGE_VALUES source operand does not match vector "
1518 "destination operands",
1519 MI);
1520 } else if (SrcTy.isVector()) {
1521 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1522 // mismatched types as long as the total size matches:
1523 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1524 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1525 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1526 "destination operands",
1527 MI);
1528 } else {
1529 // This case is the converse of G_MERGE_VALUES.
1530 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1531 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1532 "destination operands",
1533 MI);
1534 }
1535 }
1536 break;
1537 }
1538 case TargetOpcode::G_BUILD_VECTOR: {
1539 // Source types must be scalars, dest type a vector. Total size of scalars
1540 // must match the dest vector size.
1541 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1542 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1543 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1544 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1545 break;
1546 }
1547
1548 if (DstTy.getElementType() != SrcEltTy)
1549 report("G_BUILD_VECTOR result element type must match source type", MI);
1550
1551 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1552 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1553
1554 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1555 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1556 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1557
1558 break;
1559 }
1560 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1561 // Source types must be scalars, dest type a vector. Scalar types must be
1562 // larger than the dest vector elt type, as this is a truncating operation.
1563 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1564 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1565 if (!DstTy.isVector() || SrcEltTy.isVector())
1566 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1567 MI);
1568 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1569 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1570 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1571 MI);
1572 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1573 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1574 "dest elt type",
1575 MI);
1576 break;
1577 }
1578 case TargetOpcode::G_CONCAT_VECTORS: {
1579 // Source types should be vectors, and total size should match the dest
1580 // vector size.
1581 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1582 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1583 if (!DstTy.isVector() || !SrcTy.isVector())
1584 report("G_CONCAT_VECTOR requires vector source and destination operands",
1585 MI);
1586
1587 if (MI->getNumOperands() < 3)
1588 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1589
1590 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1591 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1592 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1593 if (DstTy.getElementCount() !=
1594 SrcTy.getElementCount() * (MI->getNumOperands() - 1))
1595 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1596 break;
1597 }
1598 case TargetOpcode::G_ICMP:
1599 case TargetOpcode::G_FCMP: {
1600 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1601 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1602
1603 if ((DstTy.isVector() != SrcTy.isVector()) ||
1604 (DstTy.isVector() &&
1605 DstTy.getElementCount() != SrcTy.getElementCount()))
1606 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1607
1608 break;
1609 }
1610 case TargetOpcode::G_SCMP:
1611 case TargetOpcode::G_UCMP: {
1612 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1613 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1614
1615 if (SrcTy.isPointerOrPointerVector()) {
1616 report("Generic scmp/ucmp does not support pointers as operands", MI);
1617 break;
1618 }
1619
1620 if (DstTy.isPointerOrPointerVector()) {
1621 report("Generic scmp/ucmp does not support pointers as a result", MI);
1622 break;
1623 }
1624
1625 if (DstTy.getScalarSizeInBits() < 2) {
1626 report("Result type must be at least 2 bits wide", MI);
1627 break;
1628 }
1629
1630 if ((DstTy.isVector() != SrcTy.isVector()) ||
1631 (DstTy.isVector() &&
1632 DstTy.getElementCount() != SrcTy.getElementCount())) {
1633 report("Generic vector scmp/ucmp must preserve number of lanes", MI);
1634 break;
1635 }
1636
1637 break;
1638 }
1639 case TargetOpcode::G_EXTRACT: {
1640 const MachineOperand &SrcOp = MI->getOperand(1);
1641 if (!SrcOp.isReg()) {
1642 report("extract source must be a register", MI);
1643 break;
1644 }
1645
1646 const MachineOperand &OffsetOp = MI->getOperand(2);
1647 if (!OffsetOp.isImm()) {
1648 report("extract offset must be a constant", MI);
1649 break;
1650 }
1651
1652 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1653 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1654 if (SrcSize == DstSize)
1655 report("extract source must be larger than result", MI);
1656
1657 if (DstSize + OffsetOp.getImm() > SrcSize)
1658 report("extract reads past end of register", MI);
1659 break;
1660 }
1661 case TargetOpcode::G_INSERT: {
1662 const MachineOperand &SrcOp = MI->getOperand(2);
1663 if (!SrcOp.isReg()) {
1664 report("insert source must be a register", MI);
1665 break;
1666 }
1667
1668 const MachineOperand &OffsetOp = MI->getOperand(3);
1669 if (!OffsetOp.isImm()) {
1670 report("insert offset must be a constant", MI);
1671 break;
1672 }
1673
1674 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1675 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1676
1677 if (DstSize <= SrcSize)
1678 report("inserted size must be smaller than total register", MI);
1679
1680 if (SrcSize + OffsetOp.getImm() > DstSize)
1681 report("insert writes past end of register", MI);
1682
1683 break;
1684 }
1685 case TargetOpcode::G_JUMP_TABLE: {
1686 if (!MI->getOperand(1).isJTI())
1687 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1688 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1689 if (!DstTy.isPointer())
1690 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1691 break;
1692 }
1693 case TargetOpcode::G_BRJT: {
1694 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1695 report("G_BRJT src operand 0 must be a pointer type", MI);
1696
1697 if (!MI->getOperand(1).isJTI())
1698 report("G_BRJT src operand 1 must be a jump table index", MI);
1699
1700 const auto &IdxOp = MI->getOperand(2);
1701 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1702 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1703 break;
1704 }
1705 case TargetOpcode::G_INTRINSIC:
1706 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1707 case TargetOpcode::G_INTRINSIC_CONVERGENT:
1708 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1709 // TODO: Should verify number of def and use operands, but the current
1710 // interface requires passing in IR types for mangling.
1711 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1712 if (!IntrIDOp.isIntrinsicID()) {
1713 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1714 break;
1715 }
1716
1717 if (!verifyGIntrinsicSideEffects(MI))
1718 break;
1719 if (!verifyGIntrinsicConvergence(MI))
1720 break;
1721
1722 break;
1723 }
1724 case TargetOpcode::G_SEXT_INREG: {
1725 if (!MI->getOperand(2).isImm()) {
1726 report("G_SEXT_INREG expects an immediate operand #2", MI);
1727 break;
1728 }
1729
1730 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1731 int64_t Imm = MI->getOperand(2).getImm();
1732 if (Imm <= 0)
1733 report("G_SEXT_INREG size must be >= 1", MI);
1734 if (Imm >= SrcTy.getScalarSizeInBits())
1735 report("G_SEXT_INREG size must be less than source bit width", MI);
1736 break;
1737 }
1738 case TargetOpcode::G_BSWAP: {
1739 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1740 if (DstTy.getScalarSizeInBits() % 16 != 0)
1741 report("G_BSWAP size must be a multiple of 16 bits", MI);
1742 break;
1743 }
1744 case TargetOpcode::G_VSCALE: {
1745 if (!MI->getOperand(1).isCImm()) {
1746 report("G_VSCALE operand must be cimm", MI);
1747 break;
1748 }
1749 if (MI->getOperand(1).getCImm()->isZero()) {
1750 report("G_VSCALE immediate cannot be zero", MI);
1751 break;
1752 }
1753 break;
1754 }
1755 case TargetOpcode::G_STEP_VECTOR: {
1756 if (!MI->getOperand(1).isCImm()) {
1757 report("operand must be cimm", MI);
1758 break;
1759 }
1760
1761 if (!MI->getOperand(1).getCImm()->getValue().isStrictlyPositive()) {
1762 report("step must be > 0", MI);
1763 break;
1764 }
1765
1766 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1767 if (!DstTy.isScalableVector()) {
1768 report("Destination type must be a scalable vector", MI);
1769 break;
1770 }
1771
1772 // <vscale x 2 x p0>
1773 if (!DstTy.getElementType().isScalar()) {
1774 report("Destination element type must be scalar", MI);
1775 break;
1776 }
1777
1778 if (MI->getOperand(1).getCImm()->getBitWidth() !=
1780 report("step bitwidth differs from result type element bitwidth", MI);
1781 break;
1782 }
1783 break;
1784 }
1785 case TargetOpcode::G_INSERT_SUBVECTOR: {
1786 const MachineOperand &Src0Op = MI->getOperand(1);
1787 if (!Src0Op.isReg()) {
1788 report("G_INSERT_SUBVECTOR first source must be a register", MI);
1789 break;
1790 }
1791
1792 const MachineOperand &Src1Op = MI->getOperand(2);
1793 if (!Src1Op.isReg()) {
1794 report("G_INSERT_SUBVECTOR second source must be a register", MI);
1795 break;
1796 }
1797
1798 const MachineOperand &IndexOp = MI->getOperand(3);
1799 if (!IndexOp.isImm()) {
1800 report("G_INSERT_SUBVECTOR index must be an immediate", MI);
1801 break;
1802 }
1803
1804 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1805 LLT Src1Ty = MRI->getType(Src1Op.getReg());
1806
1807 if (!DstTy.isVector()) {
1808 report("Destination type must be a vector", MI);
1809 break;
1810 }
1811
1812 if (!Src1Ty.isVector()) {
1813 report("Second source must be a vector", MI);
1814 break;
1815 }
1816
1817 if (DstTy.getElementType() != Src1Ty.getElementType()) {
1818 report("Element type of vectors must be the same", MI);
1819 break;
1820 }
1821
1822 if (Src1Ty.isScalable() != DstTy.isScalable()) {
1823 report("Vector types must both be fixed or both be scalable", MI);
1824 break;
1825 }
1826
1828 DstTy.getElementCount())) {
1829 report("Second source must be smaller than destination vector", MI);
1830 break;
1831 }
1832
1833 uint64_t Idx = IndexOp.getImm();
1834 uint64_t Src1MinLen = Src1Ty.getElementCount().getKnownMinValue();
1835 if (IndexOp.getImm() % Src1MinLen != 0) {
1836 report("Index must be a multiple of the second source vector's "
1837 "minimum vector length",
1838 MI);
1839 break;
1840 }
1841
1842 uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1843 if (Idx >= DstMinLen || Idx + Src1MinLen > DstMinLen) {
1844 report("Subvector type and index must not cause insert to overrun the "
1845 "vector being inserted into",
1846 MI);
1847 break;
1848 }
1849
1850 break;
1851 }
1852 case TargetOpcode::G_EXTRACT_SUBVECTOR: {
1853 const MachineOperand &SrcOp = MI->getOperand(1);
1854 if (!SrcOp.isReg()) {
1855 report("G_EXTRACT_SUBVECTOR first source must be a register", MI);
1856 break;
1857 }
1858
1859 const MachineOperand &IndexOp = MI->getOperand(2);
1860 if (!IndexOp.isImm()) {
1861 report("G_EXTRACT_SUBVECTOR index must be an immediate", MI);
1862 break;
1863 }
1864
1865 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1866 LLT SrcTy = MRI->getType(SrcOp.getReg());
1867
1868 if (!DstTy.isVector()) {
1869 report("Destination type must be a vector", MI);
1870 break;
1871 }
1872
1873 if (!SrcTy.isVector()) {
1874 report("Source must be a vector", MI);
1875 break;
1876 }
1877
1878 if (DstTy.getElementType() != SrcTy.getElementType()) {
1879 report("Element type of vectors must be the same", MI);
1880 break;
1881 }
1882
1883 if (SrcTy.isScalable() != DstTy.isScalable()) {
1884 report("Vector types must both be fixed or both be scalable", MI);
1885 break;
1886 }
1887
1889 SrcTy.getElementCount())) {
1890 report("Destination vector must be smaller than source vector", MI);
1891 break;
1892 }
1893
1894 uint64_t Idx = IndexOp.getImm();
1895 uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1896 if (Idx % DstMinLen != 0) {
1897 report("Index must be a multiple of the destination vector's minimum "
1898 "vector length",
1899 MI);
1900 break;
1901 }
1902
1903 uint64_t SrcMinLen = SrcTy.getElementCount().getKnownMinValue();
1904 if (Idx >= SrcMinLen || Idx + DstMinLen > SrcMinLen) {
1905 report("Destination type and index must not cause extract to overrun the "
1906 "source vector",
1907 MI);
1908 break;
1909 }
1910
1911 break;
1912 }
1913 case TargetOpcode::G_SHUFFLE_VECTOR: {
1914 const MachineOperand &MaskOp = MI->getOperand(3);
1915 if (!MaskOp.isShuffleMask()) {
1916 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1917 break;
1918 }
1919
1920 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1921 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1922 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1923
1924 if (Src0Ty != Src1Ty)
1925 report("Source operands must be the same type", MI);
1926
1927 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1928 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1929
1930 // Don't check that all operands are vector because scalars are used in
1931 // place of 1 element vectors.
1932 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1933 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1934
1935 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1936
1937 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1938 report("Wrong result type for shufflemask", MI);
1939
1940 for (int Idx : MaskIdxes) {
1941 if (Idx < 0)
1942 continue;
1943
1944 if (Idx >= 2 * SrcNumElts)
1945 report("Out of bounds shuffle index", MI);
1946 }
1947
1948 break;
1949 }
1950
1951 case TargetOpcode::G_SPLAT_VECTOR: {
1952 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1953 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1954
1955 if (!DstTy.isScalableVector()) {
1956 report("Destination type must be a scalable vector", MI);
1957 break;
1958 }
1959
1960 if (!SrcTy.isScalar() && !SrcTy.isPointer()) {
1961 report("Source type must be a scalar or pointer", MI);
1962 break;
1963 }
1964
1966 SrcTy.getSizeInBits())) {
1967 report("Element type of the destination must be the same size or smaller "
1968 "than the source type",
1969 MI);
1970 break;
1971 }
1972
1973 break;
1974 }
1975 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1976 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1977 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1978 LLT IdxTy = MRI->getType(MI->getOperand(2).getReg());
1979
1980 if (!DstTy.isScalar() && !DstTy.isPointer()) {
1981 report("Destination type must be a scalar or pointer", MI);
1982 break;
1983 }
1984
1985 if (!SrcTy.isVector()) {
1986 report("First source must be a vector", MI);
1987 break;
1988 }
1989
1990 auto TLI = MF->getSubtarget().getTargetLowering();
1991 if (IdxTy.getSizeInBits() != TLI->getVectorIdxWidth(MF->getDataLayout())) {
1992 report("Index type must match VectorIdxTy", MI);
1993 break;
1994 }
1995
1996 break;
1997 }
1998 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1999 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2000 LLT VecTy = MRI->getType(MI->getOperand(1).getReg());
2001 LLT ScaTy = MRI->getType(MI->getOperand(2).getReg());
2002 LLT IdxTy = MRI->getType(MI->getOperand(3).getReg());
2003
2004 if (!DstTy.isVector()) {
2005 report("Destination type must be a vector", MI);
2006 break;
2007 }
2008
2009 if (VecTy != DstTy) {
2010 report("Destination type and vector type must match", MI);
2011 break;
2012 }
2013
2014 if (!ScaTy.isScalar() && !ScaTy.isPointer()) {
2015 report("Inserted element must be a scalar or pointer", MI);
2016 break;
2017 }
2018
2019 auto TLI = MF->getSubtarget().getTargetLowering();
2020 if (IdxTy.getSizeInBits() != TLI->getVectorIdxWidth(MF->getDataLayout())) {
2021 report("Index type must match VectorIdxTy", MI);
2022 break;
2023 }
2024
2025 break;
2026 }
2027 case TargetOpcode::G_DYN_STACKALLOC: {
2028 const MachineOperand &DstOp = MI->getOperand(0);
2029 const MachineOperand &AllocOp = MI->getOperand(1);
2030 const MachineOperand &AlignOp = MI->getOperand(2);
2031
2032 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
2033 report("dst operand 0 must be a pointer type", MI);
2034 break;
2035 }
2036
2037 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
2038 report("src operand 1 must be a scalar reg type", MI);
2039 break;
2040 }
2041
2042 if (!AlignOp.isImm()) {
2043 report("src operand 2 must be an immediate type", MI);
2044 break;
2045 }
2046 break;
2047 }
2048 case TargetOpcode::G_MEMCPY_INLINE:
2049 case TargetOpcode::G_MEMCPY:
2050 case TargetOpcode::G_MEMMOVE: {
2051 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2052 if (MMOs.size() != 2) {
2053 report("memcpy/memmove must have 2 memory operands", MI);
2054 break;
2055 }
2056
2057 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
2058 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
2059 report("wrong memory operand types", MI);
2060 break;
2061 }
2062
2063 if (MMOs[0]->getSize() != MMOs[1]->getSize())
2064 report("inconsistent memory operand sizes", MI);
2065
2066 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2067 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
2068
2069 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
2070 report("memory instruction operand must be a pointer", MI);
2071 break;
2072 }
2073
2074 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2075 report("inconsistent store address space", MI);
2076 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
2077 report("inconsistent load address space", MI);
2078
2079 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
2080 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
2081 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
2082
2083 break;
2084 }
2085 case TargetOpcode::G_BZERO:
2086 case TargetOpcode::G_MEMSET: {
2087 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2088 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
2089 if (MMOs.size() != 1) {
2090 report(Twine(Name, " must have 1 memory operand"), MI);
2091 break;
2092 }
2093
2094 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
2095 report(Twine(Name, " memory operand must be a store"), MI);
2096 break;
2097 }
2098
2099 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2100 if (!DstPtrTy.isPointer()) {
2101 report(Twine(Name, " operand must be a pointer"), MI);
2102 break;
2103 }
2104
2105 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2106 report("inconsistent " + Twine(Name, " address space"), MI);
2107
2108 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
2109 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
2110 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
2111
2112 break;
2113 }
2114 case TargetOpcode::G_UBSANTRAP: {
2115 const MachineOperand &KindOp = MI->getOperand(0);
2116 if (!MI->getOperand(0).isImm()) {
2117 report("Crash kind must be an immediate", &KindOp, 0);
2118 break;
2119 }
2120 int64_t Kind = MI->getOperand(0).getImm();
2121 if (!isInt<8>(Kind))
2122 report("Crash kind must be 8 bit wide", &KindOp, 0);
2123 break;
2124 }
2125 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
2126 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
2127 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2128 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2129 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2130 if (!DstTy.isScalar())
2131 report("Vector reduction requires a scalar destination type", MI);
2132 if (!Src1Ty.isScalar())
2133 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
2134 if (!Src2Ty.isVector())
2135 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
2136 break;
2137 }
2138 case TargetOpcode::G_VECREDUCE_FADD:
2139 case TargetOpcode::G_VECREDUCE_FMUL:
2140 case TargetOpcode::G_VECREDUCE_FMAX:
2141 case TargetOpcode::G_VECREDUCE_FMIN:
2142 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
2143 case TargetOpcode::G_VECREDUCE_FMINIMUM:
2144 case TargetOpcode::G_VECREDUCE_ADD:
2145 case TargetOpcode::G_VECREDUCE_MUL:
2146 case TargetOpcode::G_VECREDUCE_AND:
2147 case TargetOpcode::G_VECREDUCE_OR:
2148 case TargetOpcode::G_VECREDUCE_XOR:
2149 case TargetOpcode::G_VECREDUCE_SMAX:
2150 case TargetOpcode::G_VECREDUCE_SMIN:
2151 case TargetOpcode::G_VECREDUCE_UMAX:
2152 case TargetOpcode::G_VECREDUCE_UMIN: {
2153 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2154 if (!DstTy.isScalar())
2155 report("Vector reduction requires a scalar destination type", MI);
2156 break;
2157 }
2158
2159 case TargetOpcode::G_SBFX:
2160 case TargetOpcode::G_UBFX: {
2161 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2162 if (DstTy.isVector()) {
2163 report("Bitfield extraction is not supported on vectors", MI);
2164 break;
2165 }
2166 break;
2167 }
2168 case TargetOpcode::G_SHL:
2169 case TargetOpcode::G_LSHR:
2170 case TargetOpcode::G_ASHR:
2171 case TargetOpcode::G_ROTR:
2172 case TargetOpcode::G_ROTL: {
2173 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2174 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2175 if (Src1Ty.isVector() != Src2Ty.isVector()) {
2176 report("Shifts and rotates require operands to be either all scalars or "
2177 "all vectors",
2178 MI);
2179 break;
2180 }
2181 break;
2182 }
2183 case TargetOpcode::G_LLROUND:
2184 case TargetOpcode::G_LROUND: {
2185 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2186 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2187 if (!DstTy.isValid() || !SrcTy.isValid())
2188 break;
2189 if (SrcTy.isPointer() || DstTy.isPointer()) {
2190 StringRef Op = SrcTy.isPointer() ? "Source" : "Destination";
2191 report(Twine(Op, " operand must not be a pointer type"), MI);
2192 } else if (SrcTy.isScalar()) {
2193 verifyAllRegOpsScalar(*MI, *MRI);
2194 break;
2195 } else if (SrcTy.isVector()) {
2196 verifyVectorElementMatch(SrcTy, DstTy, MI);
2197 break;
2198 }
2199 break;
2200 }
2201 case TargetOpcode::G_IS_FPCLASS: {
2202 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
2203 LLT DestEltTy = DestTy.getScalarType();
2204 if (!DestEltTy.isScalar()) {
2205 report("Destination must be a scalar or vector of scalars", MI);
2206 break;
2207 }
2208 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2209 LLT SrcEltTy = SrcTy.getScalarType();
2210 if (!SrcEltTy.isScalar()) {
2211 report("Source must be a scalar or vector of scalars", MI);
2212 break;
2213 }
2214 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
2215 break;
2216 const MachineOperand &TestMO = MI->getOperand(2);
2217 if (!TestMO.isImm()) {
2218 report("floating-point class set (operand 2) must be an immediate", MI);
2219 break;
2220 }
2221 int64_t Test = TestMO.getImm();
2222 if (Test < 0 || Test > fcAllFlags) {
2223 report("Incorrect floating-point class set (operand 2)", MI);
2224 break;
2225 }
2226 break;
2227 }
2228 case TargetOpcode::G_PREFETCH: {
2229 const MachineOperand &AddrOp = MI->getOperand(0);
2230 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer()) {
2231 report("addr operand must be a pointer", &AddrOp, 0);
2232 break;
2233 }
2234 const MachineOperand &RWOp = MI->getOperand(1);
2235 if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) {
2236 report("rw operand must be an immediate 0-1", &RWOp, 1);
2237 break;
2238 }
2239 const MachineOperand &LocalityOp = MI->getOperand(2);
2240 if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) {
2241 report("locality operand must be an immediate 0-3", &LocalityOp, 2);
2242 break;
2243 }
2244 const MachineOperand &CacheTypeOp = MI->getOperand(3);
2245 if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) {
2246 report("cache type operand must be an immediate 0-1", &CacheTypeOp, 3);
2247 break;
2248 }
2249 break;
2250 }
2251 case TargetOpcode::G_ASSERT_ALIGN: {
2252 if (MI->getOperand(2).getImm() < 1)
2253 report("alignment immediate must be >= 1", MI);
2254 break;
2255 }
2256 case TargetOpcode::G_CONSTANT_POOL: {
2257 if (!MI->getOperand(1).isCPI())
2258 report("Src operand 1 must be a constant pool index", MI);
2259 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
2260 report("Dst operand 0 must be a pointer", MI);
2261 break;
2262 }
2263 case TargetOpcode::G_PTRAUTH_GLOBAL_VALUE: {
2264 const MachineOperand &AddrOp = MI->getOperand(1);
2265 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer())
2266 report("addr operand must be a pointer", &AddrOp, 1);
2267 break;
2268 }
2269 default:
2270 break;
2271 }
2272}
2273
2274void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
2275 const MCInstrDesc &MCID = MI->getDesc();
2276 if (MI->getNumOperands() < MCID.getNumOperands()) {
2277 report("Too few operands", MI);
2278 OS << MCID.getNumOperands() << " operands expected, but "
2279 << MI->getNumOperands() << " given.\n";
2280 }
2281
2282 if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
2283 report("NoConvergent flag expected only on convergent instructions.", MI);
2284
2285 if (MI->isPHI()) {
2286 if (MF->getProperties().hasNoPHIs())
2287 report("Found PHI instruction with NoPHIs property set", MI);
2288
2289 if (FirstNonPHI)
2290 report("Found PHI instruction after non-PHI", MI);
2291 } else if (FirstNonPHI == nullptr)
2292 FirstNonPHI = MI;
2293
2294 // Check the tied operands.
2295 if (MI->isInlineAsm())
2296 verifyInlineAsm(MI);
2297
2298 // Check that unspillable terminators define a reg and have at most one use.
2299 if (TII->isUnspillableTerminator(MI)) {
2300 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
2301 report("Unspillable Terminator does not define a reg", MI);
2302 Register Def = MI->getOperand(0).getReg();
2303 if (Def.isVirtual() && !MF->getProperties().hasNoPHIs() &&
2304 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
2305 report("Unspillable Terminator expected to have at most one use!", MI);
2306 }
2307
2308 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
2309 // DBG_VALUEs: these are convenient to use in tests, but should never get
2310 // generated.
2311 if (MI->isDebugValue() && MI->getNumOperands() == 4)
2312 if (!MI->getDebugLoc())
2313 report("Missing DebugLoc for debug instruction", MI);
2314
2315 // Meta instructions should never be the subject of debug value tracking,
2316 // they don't create a value in the output program at all.
2317 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
2318 report("Metadata instruction should not have a value tracking number", MI);
2319
2320 // Check the MachineMemOperands for basic consistency.
2321 for (MachineMemOperand *Op : MI->memoperands()) {
2322 if (Op->isLoad() && !MI->mayLoad())
2323 report("Missing mayLoad flag", MI);
2324 if (Op->isStore() && !MI->mayStore())
2325 report("Missing mayStore flag", MI);
2326 }
2327
2328 // Debug values must not have a slot index.
2329 // Other instructions must have one, unless they are inside a bundle.
2330 if (LiveInts) {
2331 bool mapped = !LiveInts->isNotInMIMap(*MI);
2332 if (MI->isDebugOrPseudoInstr()) {
2333 if (mapped)
2334 report("Debug instruction has a slot index", MI);
2335 } else if (MI->isInsideBundle()) {
2336 if (mapped)
2337 report("Instruction inside bundle has a slot index", MI);
2338 } else {
2339 if (!mapped)
2340 report("Missing slot index", MI);
2341 }
2342 }
2343
2344 unsigned Opc = MCID.getOpcode();
2346 verifyPreISelGenericInstruction(MI);
2347 return;
2348 }
2349
2351 if (!TII->verifyInstruction(*MI, ErrorInfo))
2352 report(ErrorInfo.data(), MI);
2353
2354 // Verify properties of various specific instruction types
2355 switch (MI->getOpcode()) {
2356 case TargetOpcode::COPY: {
2357 const MachineOperand &DstOp = MI->getOperand(0);
2358 const MachineOperand &SrcOp = MI->getOperand(1);
2359 const Register SrcReg = SrcOp.getReg();
2360 const Register DstReg = DstOp.getReg();
2361
2362 LLT DstTy = MRI->getType(DstReg);
2363 LLT SrcTy = MRI->getType(SrcReg);
2364 if (SrcTy.isValid() && DstTy.isValid()) {
2365 // If both types are valid, check that the types are the same.
2366 if (SrcTy != DstTy) {
2367 report("Copy Instruction is illegal with mismatching types", MI);
2368 OS << "Def = " << DstTy << ", Src = " << SrcTy << '\n';
2369 }
2370
2371 break;
2372 }
2373
2374 if (!SrcTy.isValid() && !DstTy.isValid())
2375 break;
2376
2377 // If we have only one valid type, this is likely a copy between a virtual
2378 // and physical register.
2379 TypeSize SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2380 TypeSize DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2381 if (SrcReg.isPhysical() && DstTy.isValid()) {
2382 const TargetRegisterClass *SrcRC =
2383 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
2384 if (SrcRC)
2385 SrcSize = TRI->getRegSizeInBits(*SrcRC);
2386 }
2387
2388 if (DstReg.isPhysical() && SrcTy.isValid()) {
2389 const TargetRegisterClass *DstRC =
2390 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
2391 if (DstRC)
2392 DstSize = TRI->getRegSizeInBits(*DstRC);
2393 }
2394
2395 // The next two checks allow COPY between physical and virtual registers,
2396 // when the virtual register has a scalable size and the physical register
2397 // has a fixed size. These checks allow COPY between *potentialy* mismatched
2398 // sizes. However, once RegisterBankSelection occurs, MachineVerifier should
2399 // be able to resolve a fixed size for the scalable vector, and at that
2400 // point this function will know for sure whether the sizes are mismatched
2401 // and correctly report a size mismatch.
2402 if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
2403 !SrcSize.isScalable())
2404 break;
2405 if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
2406 !DstSize.isScalable())
2407 break;
2408
2409 if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
2410 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
2411 report("Copy Instruction is illegal with mismatching sizes", MI);
2412 OS << "Def Size = " << DstSize << ", Src Size = " << SrcSize << '\n';
2413 }
2414 }
2415 break;
2416 }
2417 case TargetOpcode::STATEPOINT: {
2418 StatepointOpers SO(MI);
2419 if (!MI->getOperand(SO.getIDPos()).isImm() ||
2420 !MI->getOperand(SO.getNBytesPos()).isImm() ||
2421 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
2422 report("meta operands to STATEPOINT not constant!", MI);
2423 break;
2424 }
2425
2426 auto VerifyStackMapConstant = [&](unsigned Offset) {
2427 if (Offset >= MI->getNumOperands()) {
2428 report("stack map constant to STATEPOINT is out of range!", MI);
2429 return;
2430 }
2431 if (!MI->getOperand(Offset - 1).isImm() ||
2432 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
2433 !MI->getOperand(Offset).isImm())
2434 report("stack map constant to STATEPOINT not well formed!", MI);
2435 };
2436 VerifyStackMapConstant(SO.getCCIdx());
2437 VerifyStackMapConstant(SO.getFlagsIdx());
2438 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2439 VerifyStackMapConstant(SO.getNumGCPtrIdx());
2440 VerifyStackMapConstant(SO.getNumAllocaIdx());
2441 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2442
2443 // Verify that all explicit statepoint defs are tied to gc operands as
2444 // they are expected to be a relocation of gc operands.
2445 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2446 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2447 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2448 unsigned UseOpIdx;
2449 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
2450 report("STATEPOINT defs expected to be tied", MI);
2451 break;
2452 }
2453 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2454 report("STATEPOINT def tied to non-gc operand", MI);
2455 break;
2456 }
2457 }
2458
2459 // TODO: verify we have properly encoded deopt arguments
2460 } break;
2461 case TargetOpcode::INSERT_SUBREG: {
2462 unsigned InsertedSize;
2463 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
2464 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
2465 else
2466 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
2467 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
2468 if (SubRegSize < InsertedSize) {
2469 report("INSERT_SUBREG expected inserted value to have equal or lesser "
2470 "size than the subreg it was inserted into", MI);
2471 break;
2472 }
2473 } break;
2474 case TargetOpcode::REG_SEQUENCE: {
2475 unsigned NumOps = MI->getNumOperands();
2476 if (!(NumOps & 1)) {
2477 report("Invalid number of operands for REG_SEQUENCE", MI);
2478 break;
2479 }
2480
2481 for (unsigned I = 1; I != NumOps; I += 2) {
2482 const MachineOperand &RegOp = MI->getOperand(I);
2483 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
2484
2485 if (!RegOp.isReg())
2486 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
2487
2488 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2489 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2490 report("Invalid subregister index operand for REG_SEQUENCE",
2491 &SubRegOp, I + 1);
2492 }
2493 }
2494
2495 Register DstReg = MI->getOperand(0).getReg();
2496 if (DstReg.isPhysical())
2497 report("REG_SEQUENCE does not support physical register results", MI);
2498
2499 if (MI->getOperand(0).getSubReg())
2500 report("Invalid subreg result for REG_SEQUENCE", MI);
2501
2502 break;
2503 }
2504 }
2505}
2506
2507void
2508MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2509 const MachineInstr *MI = MO->getParent();
2510 const MCInstrDesc &MCID = MI->getDesc();
2511 unsigned NumDefs = MCID.getNumDefs();
2512 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2513 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2514
2515 // The first MCID.NumDefs operands must be explicit register defines
2516 if (MONum < NumDefs) {
2517 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2518 if (!MO->isReg())
2519 report("Explicit definition must be a register", MO, MONum);
2520 else if (!MO->isDef() && !MCOI.isOptionalDef())
2521 report("Explicit definition marked as use", MO, MONum);
2522 else if (MO->isImplicit())
2523 report("Explicit definition marked as implicit", MO, MONum);
2524 } else if (MONum < MCID.getNumOperands()) {
2525 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2526 // Don't check if it's the last operand in a variadic instruction. See,
2527 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2528 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2529 if (!IsOptional) {
2530 if (MO->isReg()) {
2531 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2532 report("Explicit operand marked as def", MO, MONum);
2533 if (MO->isImplicit())
2534 report("Explicit operand marked as implicit", MO, MONum);
2535 }
2536
2537 // Check that an instruction has register operands only as expected.
2538 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2539 !MO->isReg() && !MO->isFI())
2540 report("Expected a register operand.", MO, MONum);
2541 if (MO->isReg()) {
2544 !TII->isPCRelRegisterOperandLegal(*MO)))
2545 report("Expected a non-register operand.", MO, MONum);
2546 }
2547 }
2548
2549 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2550 if (TiedTo != -1) {
2551 if (!MO->isReg())
2552 report("Tied use must be a register", MO, MONum);
2553 else if (!MO->isTied())
2554 report("Operand should be tied", MO, MONum);
2555 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2556 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2557 else if (MO->getReg().isPhysical()) {
2558 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2559 if (!MOTied.isReg())
2560 report("Tied counterpart must be a register", &MOTied, TiedTo);
2561 else if (MOTied.getReg().isPhysical() &&
2562 MO->getReg() != MOTied.getReg())
2563 report("Tied physical registers must match.", &MOTied, TiedTo);
2564 }
2565 } else if (MO->isReg() && MO->isTied())
2566 report("Explicit operand should not be tied", MO, MONum);
2567 } else if (!MI->isVariadic()) {
2568 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2569 if (!MO->isValidExcessOperand())
2570 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2571 }
2572
2573 switch (MO->getType()) {
2575 // Verify debug flag on debug instructions. Check this first because reg0
2576 // indicates an undefined debug value.
2577 if (MI->isDebugInstr() && MO->isUse()) {
2578 if (!MO->isDebug())
2579 report("Register operand must be marked debug", MO, MONum);
2580 } else if (MO->isDebug()) {
2581 report("Register operand must not be marked debug", MO, MONum);
2582 }
2583
2584 const Register Reg = MO->getReg();
2585 if (!Reg)
2586 return;
2587 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2588 checkLiveness(MO, MONum);
2589
2590 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2591 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2592 report("Undef virtual register def operands require a subregister", MO, MONum);
2593
2594 // Verify the consistency of tied operands.
2595 if (MO->isTied()) {
2596 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2597 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2598 if (!OtherMO.isReg())
2599 report("Must be tied to a register", MO, MONum);
2600 if (!OtherMO.isTied())
2601 report("Missing tie flags on tied operand", MO, MONum);
2602 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2603 report("Inconsistent tie links", MO, MONum);
2604 if (MONum < MCID.getNumDefs()) {
2605 if (OtherIdx < MCID.getNumOperands()) {
2606 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2607 report("Explicit def tied to explicit use without tie constraint",
2608 MO, MONum);
2609 } else {
2610 if (!OtherMO.isImplicit())
2611 report("Explicit def should be tied to implicit use", MO, MONum);
2612 }
2613 }
2614 }
2615
2616 // Verify two-address constraints after the twoaddressinstruction pass.
2617 // Both twoaddressinstruction pass and phi-node-elimination pass call
2618 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2619 // after twoaddressinstruction pass not after phi-node-elimination pass. So
2620 // we shouldn't use the IsSSA as the condition, we should based on
2621 // TiedOpsRewritten property to verify two-address constraints, this
2622 // property will be set in twoaddressinstruction pass.
2623 unsigned DefIdx;
2624 if (MF->getProperties().hasTiedOpsRewritten() && MO->isUse() &&
2625 MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2626 Reg != MI->getOperand(DefIdx).getReg())
2627 report("Two-address instruction operands must be identical", MO, MONum);
2628
2629 // Check register classes.
2630 unsigned SubIdx = MO->getSubReg();
2631
2632 if (Reg.isPhysical()) {
2633 if (SubIdx) {
2634 report("Illegal subregister index for physical register", MO, MONum);
2635 return;
2636 }
2637 if (MONum < MCID.getNumOperands()) {
2638 if (const TargetRegisterClass *DRC =
2639 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2640 if (!DRC->contains(Reg)) {
2641 report("Illegal physical register for instruction", MO, MONum);
2642 OS << printReg(Reg, TRI) << " is not a "
2643 << TRI->getRegClassName(DRC) << " register.\n";
2644 }
2645 }
2646 }
2647 if (MO->isRenamable()) {
2648 if (MRI->isReserved(Reg)) {
2649 report("isRenamable set on reserved register", MO, MONum);
2650 return;
2651 }
2652 }
2653 } else {
2654 // Virtual register.
2655 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2656 if (!RC) {
2657 // This is a generic virtual register.
2658
2659 // Do not allow undef uses for generic virtual registers. This ensures
2660 // getVRegDef can never fail and return null on a generic register.
2661 //
2662 // FIXME: This restriction should probably be broadened to all SSA
2663 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2664 // run on the SSA function just before phi elimination.
2665 if (MO->isUndef())
2666 report("Generic virtual register use cannot be undef", MO, MONum);
2667
2668 // Debug value instruction is permitted to use undefined vregs.
2669 // This is a performance measure to skip the overhead of immediately
2670 // pruning unused debug operands. The final undef substitution occurs
2671 // when debug values are allocated in LDVImpl::handleDebugValue, so
2672 // these verifications always apply after this pass.
2673 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2674 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2675 // If we're post-Select, we can't have gvregs anymore.
2676 if (isFunctionSelected) {
2677 report("Generic virtual register invalid in a Selected function",
2678 MO, MONum);
2679 return;
2680 }
2681
2682 // The gvreg must have a type and it must not have a SubIdx.
2683 LLT Ty = MRI->getType(Reg);
2684 if (!Ty.isValid()) {
2685 report("Generic virtual register must have a valid type", MO,
2686 MONum);
2687 return;
2688 }
2689
2690 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2691 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2692
2693 // If we're post-RegBankSelect, the gvreg must have a bank.
2694 if (!RegBank && isFunctionRegBankSelected) {
2695 report("Generic virtual register must have a bank in a "
2696 "RegBankSelected function",
2697 MO, MONum);
2698 return;
2699 }
2700
2701 // Make sure the register fits into its register bank if any.
2702 if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2703 RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2704 report("Register bank is too small for virtual register", MO,
2705 MONum);
2706 OS << "Register bank " << RegBank->getName() << " too small("
2707 << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2708 << Ty.getSizeInBits() << "-bits\n";
2709 return;
2710 }
2711 }
2712
2713 if (SubIdx) {
2714 report("Generic virtual register does not allow subregister index", MO,
2715 MONum);
2716 return;
2717 }
2718
2719 // If this is a target specific instruction and this operand
2720 // has register class constraint, the virtual register must
2721 // comply to it.
2722 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2723 MONum < MCID.getNumOperands() &&
2724 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2725 report("Virtual register does not match instruction constraint", MO,
2726 MONum);
2727 OS << "Expect register class "
2728 << TRI->getRegClassName(TII->getRegClass(MCID, MONum, TRI, *MF))
2729 << " but got nothing\n";
2730 return;
2731 }
2732
2733 break;
2734 }
2735 if (SubIdx) {
2736 const TargetRegisterClass *SRC =
2737 TRI->getSubClassWithSubReg(RC, SubIdx);
2738 if (!SRC) {
2739 report("Invalid subregister index for virtual register", MO, MONum);
2740 OS << "Register class " << TRI->getRegClassName(RC)
2741 << " does not support subreg index "
2742 << TRI->getSubRegIndexName(SubIdx) << '\n';
2743 return;
2744 }
2745 if (RC != SRC) {
2746 report("Invalid register class for subregister index", MO, MONum);
2747 OS << "Register class " << TRI->getRegClassName(RC)
2748 << " does not fully support subreg index "
2749 << TRI->getSubRegIndexName(SubIdx) << '\n';
2750 return;
2751 }
2752 }
2753 if (MONum < MCID.getNumOperands()) {
2754 if (const TargetRegisterClass *DRC =
2755 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2756 if (SubIdx) {
2757 const TargetRegisterClass *SuperRC =
2758 TRI->getLargestLegalSuperClass(RC, *MF);
2759 if (!SuperRC) {
2760 report("No largest legal super class exists.", MO, MONum);
2761 return;
2762 }
2763 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2764 if (!DRC) {
2765 report("No matching super-reg register class.", MO, MONum);
2766 return;
2767 }
2768 }
2769 if (!RC->hasSuperClassEq(DRC)) {
2770 report("Illegal virtual register for instruction", MO, MONum);
2771 OS << "Expected a " << TRI->getRegClassName(DRC)
2772 << " register, but got a " << TRI->getRegClassName(RC)
2773 << " register\n";
2774 }
2775 }
2776 }
2777 }
2778 break;
2779 }
2780
2782 regMasks.push_back(MO->getRegMask());
2783 break;
2784
2786 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2787 report("PHI operand is not in the CFG", MO, MONum);
2788 break;
2789
2791 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2792 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2793 int FI = MO->getIndex();
2794 LiveInterval &LI = LiveStks->getInterval(FI);
2795 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2796
2797 bool stores = MI->mayStore();
2798 bool loads = MI->mayLoad();
2799 // For a memory-to-memory move, we need to check if the frame
2800 // index is used for storing or loading, by inspecting the
2801 // memory operands.
2802 if (stores && loads) {
2803 for (auto *MMO : MI->memoperands()) {
2804 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2805 if (PSV == nullptr) continue;
2807 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2808 if (Value == nullptr) continue;
2809 if (Value->getFrameIndex() != FI) continue;
2810
2811 if (MMO->isStore())
2812 loads = false;
2813 else
2814 stores = false;
2815 break;
2816 }
2817 if (loads == stores)
2818 report("Missing fixed stack memoperand.", MI);
2819 }
2820 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2821 report("Instruction loads from dead spill slot", MO, MONum);
2822 OS << "Live stack: " << LI << '\n';
2823 }
2824 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2825 report("Instruction stores to dead spill slot", MO, MONum);
2826 OS << "Live stack: " << LI << '\n';
2827 }
2828 }
2829 break;
2830
2832 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2833 report("CFI instruction has invalid index", MO, MONum);
2834 break;
2835
2836 default:
2837 break;
2838 }
2839}
2840
2841void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2842 unsigned MONum, SlotIndex UseIdx,
2843 const LiveRange &LR,
2844 VirtRegOrUnit VRegOrUnit,
2845 LaneBitmask LaneMask) {
2846 const MachineInstr *MI = MO->getParent();
2847
2848 if (!LR.verify()) {
2849 report("invalid live range", MO, MONum);
2850 report_context_liverange(LR);
2851 report_context_vreg_regunit(VRegOrUnit);
2852 report_context(UseIdx);
2853 return;
2854 }
2855
2856 LiveQueryResult LRQ = LR.Query(UseIdx);
2857 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2858 // Check if we have a segment at the use, note however that we only need one
2859 // live subregister range, the others may be dead.
2860 if (!HasValue && LaneMask.none()) {
2861 report("No live segment at use", MO, MONum);
2862 report_context_liverange(LR);
2863 report_context_vreg_regunit(VRegOrUnit);
2864 report_context(UseIdx);
2865 }
2866 if (MO->isKill() && !LRQ.isKill()) {
2867 report("Live range continues after kill flag", MO, MONum);
2868 report_context_liverange(LR);
2869 report_context_vreg_regunit(VRegOrUnit);
2870 if (LaneMask.any())
2871 report_context_lanemask(LaneMask);
2872 report_context(UseIdx);
2873 }
2874}
2875
2876void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2877 unsigned MONum, SlotIndex DefIdx,
2878 const LiveRange &LR,
2879 VirtRegOrUnit VRegOrUnit,
2880 bool SubRangeCheck,
2881 LaneBitmask LaneMask) {
2882 if (!LR.verify()) {
2883 report("invalid live range", MO, MONum);
2884 report_context_liverange(LR);
2885 report_context_vreg_regunit(VRegOrUnit);
2886 if (LaneMask.any())
2887 report_context_lanemask(LaneMask);
2888 report_context(DefIdx);
2889 }
2890
2891 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2892 // The LR can correspond to the whole reg and its def slot is not obliged
2893 // to be the same as the MO' def slot. E.g. when we check here "normal"
2894 // subreg MO but there is other EC subreg MO in the same instruction so the
2895 // whole reg has EC def slot and differs from the currently checked MO' def
2896 // slot. For example:
2897 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2898 // Check that there is an early-clobber def of the same superregister
2899 // somewhere is performed in visitMachineFunctionAfter()
2900 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2901 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2902 (VNI->def != DefIdx &&
2903 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2904 report("Inconsistent valno->def", MO, MONum);
2905 report_context_liverange(LR);
2906 report_context_vreg_regunit(VRegOrUnit);
2907 if (LaneMask.any())
2908 report_context_lanemask(LaneMask);
2909 report_context(*VNI);
2910 report_context(DefIdx);
2911 }
2912 } else {
2913 report("No live segment at def", MO, MONum);
2914 report_context_liverange(LR);
2915 report_context_vreg_regunit(VRegOrUnit);
2916 if (LaneMask.any())
2917 report_context_lanemask(LaneMask);
2918 report_context(DefIdx);
2919 }
2920 // Check that, if the dead def flag is present, LiveInts agree.
2921 if (MO->isDead()) {
2922 LiveQueryResult LRQ = LR.Query(DefIdx);
2923 if (!LRQ.isDeadDef()) {
2924 assert(VRegOrUnit.isVirtualReg() && "Expecting a virtual register.");
2925 // A dead subreg def only tells us that the specific subreg is dead. There
2926 // could be other non-dead defs of other subregs, or we could have other
2927 // parts of the register being live through the instruction. So unless we
2928 // are checking liveness for a subrange it is ok for the live range to
2929 // continue, given that we have a dead def of a subregister.
2930 if (SubRangeCheck || MO->getSubReg() == 0) {
2931 report("Live range continues after dead def flag", MO, MONum);
2932 report_context_liverange(LR);
2933 report_context_vreg_regunit(VRegOrUnit);
2934 if (LaneMask.any())
2935 report_context_lanemask(LaneMask);
2936 }
2937 }
2938 }
2939}
2940
2941void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2942 const MachineInstr *MI = MO->getParent();
2943 const Register Reg = MO->getReg();
2944 const unsigned SubRegIdx = MO->getSubReg();
2945
2946 const LiveInterval *LI = nullptr;
2947 if (LiveInts && Reg.isVirtual()) {
2948 if (LiveInts->hasInterval(Reg)) {
2949 LI = &LiveInts->getInterval(Reg);
2950 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2951 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2952 report("Live interval for subreg operand has no subranges", MO, MONum);
2953 } else {
2954 report("Virtual register has no live interval", MO, MONum);
2955 }
2956 }
2957
2958 // Both use and def operands can read a register.
2959 if (MO->readsReg()) {
2960 if (MO->isKill())
2961 addRegWithSubRegs(regsKilled, Reg);
2962
2963 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2964 // which case we have already checked that LiveVars knows any kills on the
2965 // bundle header instead).
2966 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2967 !MI->isBundledWithPred()) {
2968 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2969 if (!is_contained(VI.Kills, MI))
2970 report("Kill missing from LiveVariables", MO, MONum);
2971 }
2972
2973 // Check LiveInts liveness and kill.
2974 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2975 SlotIndex UseIdx;
2976 if (MI->isPHI()) {
2977 // PHI use occurs on the edge, so check for live out here instead.
2978 UseIdx = LiveInts->getMBBEndIdx(
2979 MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
2980 } else {
2981 UseIdx = LiveInts->getInstructionIndex(*MI);
2982 }
2983 // Check the cached regunit intervals.
2984 if (Reg.isPhysical() && !isReserved(Reg)) {
2985 for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
2986 if (MRI->isReservedRegUnit(Unit))
2987 continue;
2988 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
2989 checkLivenessAtUse(MO, MONum, UseIdx, *LR, VirtRegOrUnit(Unit));
2990 }
2991 }
2992
2993 if (Reg.isVirtual()) {
2994 // This is a virtual register interval.
2995 checkLivenessAtUse(MO, MONum, UseIdx, *LI, VirtRegOrUnit(Reg));
2996
2997 if (LI->hasSubRanges() && !MO->isDef()) {
2998 LaneBitmask MOMask = SubRegIdx != 0
2999 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
3000 : MRI->getMaxLaneMaskForVReg(Reg);
3001 LaneBitmask LiveInMask;
3002 for (const LiveInterval::SubRange &SR : LI->subranges()) {
3003 if ((MOMask & SR.LaneMask).none())
3004 continue;
3005 checkLivenessAtUse(MO, MONum, UseIdx, SR, VirtRegOrUnit(Reg),
3006 SR.LaneMask);
3007 LiveQueryResult LRQ = SR.Query(UseIdx);
3008 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
3009 LiveInMask |= SR.LaneMask;
3010 }
3011 // At least parts of the register has to be live at the use.
3012 if ((LiveInMask & MOMask).none()) {
3013 report("No live subrange at use", MO, MONum);
3014 report_context(*LI);
3015 report_context(UseIdx);
3016 }
3017 // For PHIs all lanes should be live
3018 if (MI->isPHI() && LiveInMask != MOMask) {
3019 report("Not all lanes of PHI source live at use", MO, MONum);
3020 report_context(*LI);
3021 report_context(UseIdx);
3022 }
3023 }
3024 }
3025 }
3026
3027 // Use of a dead register.
3028 if (!regsLive.count(Reg)) {
3029 if (Reg.isPhysical()) {
3030 // Reserved registers may be used even when 'dead'.
3031 bool Bad = !isReserved(Reg);
3032 // We are fine if just any subregister has a defined value.
3033 if (Bad) {
3034
3035 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
3036 if (regsLive.count(SubReg)) {
3037 Bad = false;
3038 break;
3039 }
3040 }
3041 }
3042 // If there is an additional implicit-use of a super register we stop
3043 // here. By definition we are fine if the super register is not
3044 // (completely) dead, if the complete super register is dead we will
3045 // get a report for its operand.
3046 if (Bad) {
3047 for (const MachineOperand &MOP : MI->uses()) {
3048 if (!MOP.isReg() || !MOP.isImplicit())
3049 continue;
3050
3051 if (!MOP.getReg().isPhysical())
3052 continue;
3053
3054 if (MOP.getReg() != Reg &&
3055 all_of(TRI->regunits(Reg), [&](const MCRegUnit RegUnit) {
3056 return llvm::is_contained(TRI->regunits(MOP.getReg()),
3057 RegUnit);
3058 }))
3059 Bad = false;
3060 }
3061 }
3062 if (Bad)
3063 report("Using an undefined physical register", MO, MONum);
3064 } else if (MRI->def_empty(Reg)) {
3065 report("Reading virtual register without a def", MO, MONum);
3066 } else {
3067 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3068 // We don't know which virtual registers are live in, so only complain
3069 // if vreg was killed in this MBB. Otherwise keep track of vregs that
3070 // must be live in. PHI instructions are handled separately.
3071 if (MInfo.regsKilled.count(Reg))
3072 report("Using a killed virtual register", MO, MONum);
3073 else if (!MI->isPHI())
3074 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
3075 }
3076 }
3077 }
3078
3079 if (MO->isDef()) {
3080 // Register defined.
3081 // TODO: verify that earlyclobber ops are not used.
3082 if (MO->isDead())
3083 addRegWithSubRegs(regsDead, Reg);
3084 else
3085 addRegWithSubRegs(regsDefined, Reg);
3086
3087 // Verify SSA form.
3088 if (MRI->isSSA() && Reg.isVirtual() &&
3089 std::next(MRI->def_begin(Reg)) != MRI->def_end())
3090 report("Multiple virtual register defs in SSA form", MO, MONum);
3091
3092 // Check LiveInts for a live segment, but only for virtual registers.
3093 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
3094 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
3095 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
3096
3097 if (Reg.isVirtual()) {
3098 checkLivenessAtDef(MO, MONum, DefIdx, *LI, VirtRegOrUnit(Reg));
3099
3100 if (LI->hasSubRanges()) {
3101 LaneBitmask MOMask = SubRegIdx != 0
3102 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
3103 : MRI->getMaxLaneMaskForVReg(Reg);
3104 for (const LiveInterval::SubRange &SR : LI->subranges()) {
3105 if ((SR.LaneMask & MOMask).none())
3106 continue;
3107 checkLivenessAtDef(MO, MONum, DefIdx, SR, VirtRegOrUnit(Reg), true,
3108 SR.LaneMask);
3109 }
3110 }
3111 }
3112 }
3113 }
3114}
3115
3116// This function gets called after visiting all instructions in a bundle. The
3117// argument points to the bundle header.
3118// Normal stand-alone instructions are also considered 'bundles', and this
3119// function is called for all of them.
3120void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
3121 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3122 set_union(MInfo.regsKilled, regsKilled);
3123 set_subtract(regsLive, regsKilled); regsKilled.clear();
3124 // Kill any masked registers.
3125 while (!regMasks.empty()) {
3126 const uint32_t *Mask = regMasks.pop_back_val();
3127 for (Register Reg : regsLive)
3128 if (Reg.isPhysical() &&
3129 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
3130 regsDead.push_back(Reg);
3131 }
3132 set_subtract(regsLive, regsDead); regsDead.clear();
3133 set_union(regsLive, regsDefined); regsDefined.clear();
3134}
3135
3136void
3137MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
3138 MBBInfoMap[MBB].regsLiveOut = regsLive;
3139 regsLive.clear();
3140
3141 if (Indexes) {
3142 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
3143 if (!(stop > lastIndex)) {
3144 report("Block ends before last instruction index", MBB);
3145 OS << "Block ends at " << stop << " last instruction was at " << lastIndex
3146 << '\n';
3147 }
3148 lastIndex = stop;
3149 }
3150}
3151
3152namespace {
3153// This implements a set of registers that serves as a filter: can filter other
3154// sets by passing through elements not in the filter and blocking those that
3155// are. Any filter implicitly includes the full set of physical registers upon
3156// creation, thus filtering them all out. The filter itself as a set only grows,
3157// and needs to be as efficient as possible.
3158struct VRegFilter {
3159 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
3160 // no duplicates. Both virtual and physical registers are fine.
3161 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
3162 SmallVector<Register, 0> VRegsBuffer;
3163 filterAndAdd(FromRegSet, VRegsBuffer);
3164 }
3165 // Filter \p FromRegSet through the filter and append passed elements into \p
3166 // ToVRegs. All elements appended are then added to the filter itself.
3167 // \returns true if anything changed.
3168 template <typename RegSetT>
3169 bool filterAndAdd(const RegSetT &FromRegSet,
3170 SmallVectorImpl<Register> &ToVRegs) {
3171 unsigned SparseUniverse = Sparse.size();
3172 unsigned NewSparseUniverse = SparseUniverse;
3173 unsigned NewDenseSize = Dense.size();
3174 size_t Begin = ToVRegs.size();
3175 for (Register Reg : FromRegSet) {
3176 if (!Reg.isVirtual())
3177 continue;
3178 unsigned Index = Reg.virtRegIndex();
3179 if (Index < SparseUniverseMax) {
3180 if (Index < SparseUniverse && Sparse.test(Index))
3181 continue;
3182 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
3183 } else {
3184 if (Dense.count(Reg))
3185 continue;
3186 ++NewDenseSize;
3187 }
3188 ToVRegs.push_back(Reg);
3189 }
3190 size_t End = ToVRegs.size();
3191 if (Begin == End)
3192 return false;
3193 // Reserving space in sets once performs better than doing so continuously
3194 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
3195 // tuned all the way down) and double iteration (the second one is over a
3196 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
3197 Sparse.resize(NewSparseUniverse);
3198 Dense.reserve(NewDenseSize);
3199 for (unsigned I = Begin; I < End; ++I) {
3200 Register Reg = ToVRegs[I];
3201 unsigned Index = Reg.virtRegIndex();
3202 if (Index < SparseUniverseMax)
3203 Sparse.set(Index);
3204 else
3205 Dense.insert(Reg);
3206 }
3207 return true;
3208 }
3209
3210private:
3211 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
3212 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
3213 // are tracked by Dense. The only purpose of the threashold and the Dense set
3214 // is to have a reasonably growing memory usage in pathological cases (large
3215 // number of very sparse VRegFilter instances live at the same time). In
3216 // practice even in the worst-by-execution time cases having all elements
3217 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
3218 // space efficient than if tracked by Dense. The threashold is set to keep the
3219 // worst-case memory usage within 2x of figures determined empirically for
3220 // "all Dense" scenario in such worst-by-execution-time cases.
3221 BitVector Sparse;
3223};
3224
3225// Implements both a transfer function and a (binary, in-place) join operator
3226// for a dataflow over register sets with set union join and filtering transfer
3227// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
3228// Maintains out_b as its state, allowing for O(n) iteration over it at any
3229// time, where n is the size of the set (as opposed to O(U) where U is the
3230// universe). filter_b implicitly contains all physical registers at all times.
3231class FilteringVRegSet {
3232 VRegFilter Filter;
3234
3235public:
3236 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
3237 // Both virtual and physical registers are fine.
3238 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
3239 Filter.add(RS);
3240 }
3241 // Passes \p RS through the filter_b (transfer function) and adds what's left
3242 // to itself (out_b).
3243 template <typename RegSetT> bool add(const RegSetT &RS) {
3244 // Double-duty the Filter: to maintain VRegs a set (and the join operation
3245 // a set union) just add everything being added here to the Filter as well.
3246 return Filter.filterAndAdd(RS, VRegs);
3247 }
3248 using const_iterator = decltype(VRegs)::const_iterator;
3249 const_iterator begin() const { return VRegs.begin(); }
3250 const_iterator end() const { return VRegs.end(); }
3251 size_t size() const { return VRegs.size(); }
3252};
3253} // namespace
3254
3255// Calculate the largest possible vregsPassed sets. These are the registers that
3256// can pass through an MBB live, but may not be live every time. It is assumed
3257// that all vregsPassed sets are empty before the call.
3258void MachineVerifier::calcRegsPassed() {
3259 if (MF->empty())
3260 // ReversePostOrderTraversal doesn't handle empty functions.
3261 return;
3262
3263 for (const MachineBasicBlock *MB :
3265 FilteringVRegSet VRegs;
3266 BBInfo &Info = MBBInfoMap[MB];
3267 assert(Info.reachable);
3268
3269 VRegs.addToFilter(Info.regsKilled);
3270 VRegs.addToFilter(Info.regsLiveOut);
3271 for (const MachineBasicBlock *Pred : MB->predecessors()) {
3272 const BBInfo &PredInfo = MBBInfoMap[Pred];
3273 if (!PredInfo.reachable)
3274 continue;
3275
3276 VRegs.add(PredInfo.regsLiveOut);
3277 VRegs.add(PredInfo.vregsPassed);
3278 }
3279 Info.vregsPassed.reserve(VRegs.size());
3280 Info.vregsPassed.insert_range(VRegs);
3281 }
3282}
3283
3284// Calculate the set of virtual registers that must be passed through each basic
3285// block in order to satisfy the requirements of successor blocks. This is very
3286// similar to calcRegsPassed, only backwards.
3287void MachineVerifier::calcRegsRequired() {
3288 // First push live-in regs to predecessors' vregsRequired.
3290 for (const auto &MBB : *MF) {
3291 BBInfo &MInfo = MBBInfoMap[&MBB];
3292 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3293 BBInfo &PInfo = MBBInfoMap[Pred];
3294 if (PInfo.addRequired(MInfo.vregsLiveIn))
3295 todo.insert(Pred);
3296 }
3297
3298 // Handle the PHI node.
3299 for (const MachineInstr &MI : MBB.phis()) {
3300 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
3301 // Skip those Operands which are undef regs or not regs.
3302 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
3303 continue;
3304
3305 // Get register and predecessor for one PHI edge.
3306 Register Reg = MI.getOperand(i).getReg();
3307 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
3308
3309 BBInfo &PInfo = MBBInfoMap[Pred];
3310 if (PInfo.addRequired(Reg))
3311 todo.insert(Pred);
3312 }
3313 }
3314 }
3315
3316 // Iteratively push vregsRequired to predecessors. This will converge to the
3317 // same final state regardless of DenseSet iteration order.
3318 while (!todo.empty()) {
3319 const MachineBasicBlock *MBB = *todo.begin();
3320 todo.erase(MBB);
3321 BBInfo &MInfo = MBBInfoMap[MBB];
3322 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3323 if (Pred == MBB)
3324 continue;
3325 BBInfo &SInfo = MBBInfoMap[Pred];
3326 if (SInfo.addRequired(MInfo.vregsRequired))
3327 todo.insert(Pred);
3328 }
3329 }
3330}
3331
3332// Check PHI instructions at the beginning of MBB. It is assumed that
3333// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
3334void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
3335 BBInfo &MInfo = MBBInfoMap[&MBB];
3336
3338 for (const MachineInstr &Phi : MBB) {
3339 if (!Phi.isPHI())
3340 break;
3341 seen.clear();
3342
3343 const MachineOperand &MODef = Phi.getOperand(0);
3344 if (!MODef.isReg() || !MODef.isDef()) {
3345 report("Expected first PHI operand to be a register def", &MODef, 0);
3346 continue;
3347 }
3348 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
3349 MODef.isEarlyClobber() || MODef.isDebug())
3350 report("Unexpected flag on PHI operand", &MODef, 0);
3351 Register DefReg = MODef.getReg();
3352 if (!DefReg.isVirtual())
3353 report("Expected first PHI operand to be a virtual register", &MODef, 0);
3354
3355 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
3356 const MachineOperand &MO0 = Phi.getOperand(I);
3357 if (!MO0.isReg()) {
3358 report("Expected PHI operand to be a register", &MO0, I);
3359 continue;
3360 }
3361 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
3362 MO0.isDebug() || MO0.isTied())
3363 report("Unexpected flag on PHI operand", &MO0, I);
3364
3365 const MachineOperand &MO1 = Phi.getOperand(I + 1);
3366 if (!MO1.isMBB()) {
3367 report("Expected PHI operand to be a basic block", &MO1, I + 1);
3368 continue;
3369 }
3370
3371 const MachineBasicBlock &Pre = *MO1.getMBB();
3372 if (!Pre.isSuccessor(&MBB)) {
3373 report("PHI input is not a predecessor block", &MO1, I + 1);
3374 continue;
3375 }
3376
3377 if (MInfo.reachable) {
3378 seen.insert(&Pre);
3379 BBInfo &PrInfo = MBBInfoMap[&Pre];
3380 if (!MO0.isUndef() && PrInfo.reachable &&
3381 !PrInfo.isLiveOut(MO0.getReg()))
3382 report("PHI operand is not live-out from predecessor", &MO0, I);
3383 }
3384 }
3385
3386 // Did we see all predecessors?
3387 if (MInfo.reachable) {
3388 for (MachineBasicBlock *Pred : MBB.predecessors()) {
3389 if (!seen.count(Pred)) {
3390 report("Missing PHI operand", &Phi);
3391 OS << printMBBReference(*Pred)
3392 << " is a predecessor according to the CFG.\n";
3393 }
3394 }
3395 }
3396 }
3397}
3398
3399static void
3401 std::function<void(const Twine &Message)> FailureCB,
3402 raw_ostream &OS) {
3404 CV.initialize(&OS, FailureCB, MF);
3405
3406 for (const auto &MBB : MF) {
3407 CV.visit(MBB);
3408 for (const auto &MI : MBB.instrs())
3409 CV.visit(MI);
3410 }
3411
3412 if (CV.sawTokens()) {
3413 DT.recalculate(const_cast<MachineFunction &>(MF));
3414 CV.verify(DT);
3415 }
3416}
3417
3418void MachineVerifier::visitMachineFunctionAfter() {
3419 auto FailureCB = [this](const Twine &Message) {
3420 report(Message.str().c_str(), MF);
3421 };
3422 verifyConvergenceControl(*MF, DT, FailureCB, OS);
3423
3424 calcRegsPassed();
3425
3426 for (const MachineBasicBlock &MBB : *MF)
3427 checkPHIOps(MBB);
3428
3429 // Now check liveness info if available
3430 calcRegsRequired();
3431
3432 // Check for killed virtual registers that should be live out.
3433 for (const auto &MBB : *MF) {
3434 BBInfo &MInfo = MBBInfoMap[&MBB];
3435 for (Register VReg : MInfo.vregsRequired)
3436 if (MInfo.regsKilled.count(VReg)) {
3437 report("Virtual register killed in block, but needed live out.", &MBB);
3438 OS << "Virtual register " << printReg(VReg)
3439 << " is used after the block.\n";
3440 }
3441 }
3442
3443 if (!MF->empty()) {
3444 BBInfo &MInfo = MBBInfoMap[&MF->front()];
3445 for (Register VReg : MInfo.vregsRequired) {
3446 report("Virtual register defs don't dominate all uses.", MF);
3447 report_context_vreg(VReg);
3448 }
3449 }
3450
3451 if (LiveVars)
3452 verifyLiveVariables();
3453 if (LiveInts)
3454 verifyLiveIntervals();
3455
3456 // Check live-in list of each MBB. If a register is live into MBB, check
3457 // that the register is in regsLiveOut of each predecessor block. Since
3458 // this must come from a definition in the predecesssor or its live-in
3459 // list, this will catch a live-through case where the predecessor does not
3460 // have the register in its live-in list. This currently only checks
3461 // registers that have no aliases, are not allocatable and are not
3462 // reserved, which could mean a condition code register for instance.
3463 if (MRI->tracksLiveness())
3464 for (const auto &MBB : *MF)
3466 MCRegister LiveInReg = P.PhysReg;
3467 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
3468 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
3469 continue;
3470 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3471 BBInfo &PInfo = MBBInfoMap[Pred];
3472 if (!PInfo.regsLiveOut.count(LiveInReg)) {
3473 report("Live in register not found to be live out from predecessor.",
3474 &MBB);
3475 OS << TRI->getName(LiveInReg) << " not found to be live out from "
3476 << printMBBReference(*Pred) << '\n';
3477 }
3478 }
3479 }
3480
3481 for (auto CSInfo : MF->getCallSitesInfo())
3482 if (!CSInfo.first->isCall())
3483 report("Call site info referencing instruction that is not call", MF);
3484
3485 // If there's debug-info, check that we don't have any duplicate value
3486 // tracking numbers.
3487 if (MF->getFunction().getSubprogram()) {
3488 DenseSet<unsigned> SeenNumbers;
3489 for (const auto &MBB : *MF) {
3490 for (const auto &MI : MBB) {
3491 if (auto Num = MI.peekDebugInstrNum()) {
3492 auto Result = SeenNumbers.insert((unsigned)Num);
3493 if (!Result.second)
3494 report("Instruction has a duplicated value tracking number", &MI);
3495 }
3496 }
3497 }
3498 }
3499}
3500
3501void MachineVerifier::verifyLiveVariables() {
3502 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3503 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3505 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
3506 for (const auto &MBB : *MF) {
3507 BBInfo &MInfo = MBBInfoMap[&MBB];
3508
3509 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3510 if (MInfo.vregsRequired.count(Reg)) {
3511 if (!VI.AliveBlocks.test(MBB.getNumber())) {
3512 report("LiveVariables: Block missing from AliveBlocks", &MBB);
3513 OS << "Virtual register " << printReg(Reg)
3514 << " must be live through the block.\n";
3515 }
3516 } else {
3517 if (VI.AliveBlocks.test(MBB.getNumber())) {
3518 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
3519 OS << "Virtual register " << printReg(Reg)
3520 << " is not needed live through the block.\n";
3521 }
3522 }
3523 }
3524 }
3525}
3526
3527void MachineVerifier::verifyLiveIntervals() {
3528 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3529 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3531
3532 // Spilling and splitting may leave unused registers around. Skip them.
3533 if (MRI->reg_nodbg_empty(Reg))
3534 continue;
3535
3536 if (!LiveInts->hasInterval(Reg)) {
3537 report("Missing live interval for virtual register", MF);
3538 OS << printReg(Reg, TRI) << " still has defs or uses\n";
3539 continue;
3540 }
3541
3542 const LiveInterval &LI = LiveInts->getInterval(Reg);
3543 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3544 verifyLiveInterval(LI);
3545 }
3546
3547 // Verify all the cached regunit intervals.
3548 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
3549 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
3550 verifyLiveRange(*LR, VirtRegOrUnit(i));
3551}
3552
3553void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3554 const VNInfo *VNI,
3555 VirtRegOrUnit VRegOrUnit,
3556 LaneBitmask LaneMask) {
3557 if (VNI->isUnused())
3558 return;
3559
3560 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
3561
3562 if (!DefVNI) {
3563 report("Value not live at VNInfo def and not marked unused", MF);
3564 report_context(LR, VRegOrUnit, LaneMask);
3565 report_context(*VNI);
3566 return;
3567 }
3568
3569 if (DefVNI != VNI) {
3570 report("Live segment at def has different VNInfo", MF);
3571 report_context(LR, VRegOrUnit, LaneMask);
3572 report_context(*VNI);
3573 return;
3574 }
3575
3576 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3577 if (!MBB) {
3578 report("Invalid VNInfo definition index", MF);
3579 report_context(LR, VRegOrUnit, LaneMask);
3580 report_context(*VNI);
3581 return;
3582 }
3583
3584 if (VNI->isPHIDef()) {
3585 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3586 report("PHIDef VNInfo is not defined at MBB start", MBB);
3587 report_context(LR, VRegOrUnit, LaneMask);
3588 report_context(*VNI);
3589 }
3590 return;
3591 }
3592
3593 // Non-PHI def.
3594 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3595 if (!MI) {
3596 report("No instruction at VNInfo def index", MBB);
3597 report_context(LR, VRegOrUnit, LaneMask);
3598 report_context(*VNI);
3599 return;
3600 }
3601
3602 bool hasDef = false;
3603 bool isEarlyClobber = false;
3604 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3605 if (!MOI->isReg() || !MOI->isDef())
3606 continue;
3607 if (VRegOrUnit.isVirtualReg()) {
3608 if (MOI->getReg() != VRegOrUnit.asVirtualReg())
3609 continue;
3610 } else {
3611 if (!MOI->getReg().isPhysical() ||
3612 !TRI->hasRegUnit(MOI->getReg(), VRegOrUnit.asMCRegUnit()))
3613 continue;
3614 }
3615 if (LaneMask.any() &&
3616 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3617 continue;
3618 hasDef = true;
3619 if (MOI->isEarlyClobber())
3620 isEarlyClobber = true;
3621 }
3622
3623 if (!hasDef) {
3624 report("Defining instruction does not modify register", MI);
3625 report_context(LR, VRegOrUnit, LaneMask);
3626 report_context(*VNI);
3627 }
3628
3629 // Early clobber defs begin at USE slots, but other defs must begin at
3630 // DEF slots.
3631 if (isEarlyClobber) {
3632 if (!VNI->def.isEarlyClobber()) {
3633 report("Early clobber def must be at an early-clobber slot", MBB);
3634 report_context(LR, VRegOrUnit, LaneMask);
3635 report_context(*VNI);
3636 }
3637 } else if (!VNI->def.isRegister()) {
3638 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3639 report_context(LR, VRegOrUnit, LaneMask);
3640 report_context(*VNI);
3641 }
3642}
3643
3644void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3646 VirtRegOrUnit VRegOrUnit,
3647 LaneBitmask LaneMask) {
3648 const LiveRange::Segment &S = *I;
3649 const VNInfo *VNI = S.valno;
3650 assert(VNI && "Live segment has no valno");
3651
3652 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3653 report("Foreign valno in live segment", MF);
3654 report_context(LR, VRegOrUnit, LaneMask);
3655 report_context(S);
3656 report_context(*VNI);
3657 }
3658
3659 if (VNI->isUnused()) {
3660 report("Live segment valno is marked unused", MF);
3661 report_context(LR, VRegOrUnit, LaneMask);
3662 report_context(S);
3663 }
3664
3665 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3666 if (!MBB) {
3667 report("Bad start of live segment, no basic block", MF);
3668 report_context(LR, VRegOrUnit, LaneMask);
3669 report_context(S);
3670 return;
3671 }
3672 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3673 if (S.start != MBBStartIdx && S.start != VNI->def) {
3674 report("Live segment must begin at MBB entry or valno def", MBB);
3675 report_context(LR, VRegOrUnit, LaneMask);
3676 report_context(S);
3677 }
3678
3679 const MachineBasicBlock *EndMBB =
3680 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3681 if (!EndMBB) {
3682 report("Bad end of live segment, no basic block", MF);
3683 report_context(LR, VRegOrUnit, LaneMask);
3684 report_context(S);
3685 return;
3686 }
3687
3688 // Checks for non-live-out segments.
3689 if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3690 // RegUnit intervals are allowed dead phis.
3691 if (!VRegOrUnit.isVirtualReg() && VNI->isPHIDef() && S.start == VNI->def &&
3692 S.end == VNI->def.getDeadSlot())
3693 return;
3694
3695 // The live segment is ending inside EndMBB
3696 const MachineInstr *MI =
3697 LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
3698 if (!MI) {
3699 report("Live segment doesn't end at a valid instruction", EndMBB);
3700 report_context(LR, VRegOrUnit, LaneMask);
3701 report_context(S);
3702 return;
3703 }
3704
3705 // The block slot must refer to a basic block boundary.
3706 if (S.end.isBlock()) {
3707 report("Live segment ends at B slot of an instruction", EndMBB);
3708 report_context(LR, VRegOrUnit, LaneMask);
3709 report_context(S);
3710 }
3711
3712 if (S.end.isDead()) {
3713 // Segment ends on the dead slot.
3714 // That means there must be a dead def.
3715 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3716 report("Live segment ending at dead slot spans instructions", EndMBB);
3717 report_context(LR, VRegOrUnit, LaneMask);
3718 report_context(S);
3719 }
3720 }
3721
3722 // After tied operands are rewritten, a live segment can only end at an
3723 // early-clobber slot if it is being redefined by an early-clobber def.
3724 // TODO: Before tied operands are rewritten, a live segment can only end at
3725 // an early-clobber slot if the last use is tied to an early-clobber def.
3726 if (MF->getProperties().hasTiedOpsRewritten() && S.end.isEarlyClobber()) {
3727 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3728 report("Live segment ending at early clobber slot must be "
3729 "redefined by an EC def in the same instruction",
3730 EndMBB);
3731 report_context(LR, VRegOrUnit, LaneMask);
3732 report_context(S);
3733 }
3734 }
3735
3736 // The following checks only apply to virtual registers. Physreg liveness
3737 // is too weird to check.
3738 if (VRegOrUnit.isVirtualReg()) {
3739 // A live segment can end with either a redefinition, a kill flag on a
3740 // use, or a dead flag on a def.
3741 bool hasRead = false;
3742 bool hasSubRegDef = false;
3743 bool hasDeadDef = false;
3744 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3745 if (!MOI->isReg() || MOI->getReg() != VRegOrUnit.asVirtualReg())
3746 continue;
3747 unsigned Sub = MOI->getSubReg();
3748 LaneBitmask SLM =
3749 Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3750 if (MOI->isDef()) {
3751 if (Sub != 0) {
3752 hasSubRegDef = true;
3753 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3754 // mask for subregister defs. Read-undef defs will be handled by
3755 // readsReg below.
3756 SLM = ~SLM;
3757 }
3758 if (MOI->isDead())
3759 hasDeadDef = true;
3760 }
3761 if (LaneMask.any() && (LaneMask & SLM).none())
3762 continue;
3763 if (MOI->readsReg())
3764 hasRead = true;
3765 }
3766 if (S.end.isDead()) {
3767 // Make sure that the corresponding machine operand for a "dead" live
3768 // range has the dead flag. We cannot perform this check for subregister
3769 // liveranges as partially dead values are allowed.
3770 if (LaneMask.none() && !hasDeadDef) {
3771 report(
3772 "Instruction ending live segment on dead slot has no dead flag",
3773 MI);
3774 report_context(LR, VRegOrUnit, LaneMask);
3775 report_context(S);
3776 }
3777 } else {
3778 if (!hasRead) {
3779 // When tracking subregister liveness, the main range must start new
3780 // values on partial register writes, even if there is no read.
3781 if (!MRI->shouldTrackSubRegLiveness(VRegOrUnit.asVirtualReg()) ||
3782 LaneMask.any() || !hasSubRegDef) {
3783 report("Instruction ending live segment doesn't read the register",
3784 MI);
3785 report_context(LR, VRegOrUnit, LaneMask);
3786 report_context(S);
3787 }
3788 }
3789 }
3790 }
3791 }
3792
3793 // Now check all the basic blocks in this live segment.
3795 // Is this live segment the beginning of a non-PHIDef VN?
3796 if (S.start == VNI->def && !VNI->isPHIDef()) {
3797 // Not live-in to any blocks.
3798 if (MBB == EndMBB)
3799 return;
3800 // Skip this block.
3801 ++MFI;
3802 }
3803
3805 if (LaneMask.any()) {
3806 LiveInterval &OwnerLI = LiveInts->getInterval(VRegOrUnit.asVirtualReg());
3807 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3808 }
3809
3810 while (true) {
3811 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3812 // We don't know how to track physregs into a landing pad.
3813 if (!VRegOrUnit.isVirtualReg() && MFI->isEHPad()) {
3814 if (&*MFI == EndMBB)
3815 break;
3816 ++MFI;
3817 continue;
3818 }
3819
3820 // Is VNI a PHI-def in the current block?
3821 bool IsPHI = VNI->isPHIDef() &&
3822 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3823
3824 // Check that VNI is live-out of all predecessors.
3825 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3826 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3827 // Predecessor of landing pad live-out on last call.
3828 if (MFI->isEHPad()) {
3829 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3830 if (MI.isCall()) {
3831 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3832 break;
3833 }
3834 }
3835 }
3836 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3837
3838 // All predecessors must have a live-out value. However for a phi
3839 // instruction with subregister intervals
3840 // only one of the subregisters (not necessarily the current one) needs to
3841 // be defined.
3842 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3843 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3844 continue;
3845 report("Register not marked live out of predecessor", Pred);
3846 report_context(LR, VRegOrUnit, LaneMask);
3847 report_context(*VNI);
3848 OS << " live into " << printMBBReference(*MFI) << '@'
3849 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " << PEnd
3850 << '\n';
3851 continue;
3852 }
3853
3854 // Only PHI-defs can take different predecessor values.
3855 if (!IsPHI && PVNI != VNI) {
3856 report("Different value live out of predecessor", Pred);
3857 report_context(LR, VRegOrUnit, LaneMask);
3858 OS << "Valno #" << PVNI->id << " live out of "
3859 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #" << VNI->id
3860 << " live into " << printMBBReference(*MFI) << '@'
3861 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3862 }
3863 }
3864 if (&*MFI == EndMBB)
3865 break;
3866 ++MFI;
3867 }
3868}
3869
3870void MachineVerifier::verifyLiveRange(const LiveRange &LR,
3871 VirtRegOrUnit VRegOrUnit,
3872 LaneBitmask LaneMask) {
3873 for (const VNInfo *VNI : LR.valnos)
3874 verifyLiveRangeValue(LR, VNI, VRegOrUnit, LaneMask);
3875
3876 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3877 verifyLiveRangeSegment(LR, I, VRegOrUnit, LaneMask);
3878}
3879
3880void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3881 Register Reg = LI.reg();
3882 assert(Reg.isVirtual());
3883 verifyLiveRange(LI, VirtRegOrUnit(Reg));
3884
3885 if (LI.hasSubRanges()) {
3887 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3888 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3889 if ((Mask & SR.LaneMask).any()) {
3890 report("Lane masks of sub ranges overlap in live interval", MF);
3891 report_context(LI);
3892 }
3893 if ((SR.LaneMask & ~MaxMask).any()) {
3894 report("Subrange lanemask is invalid", MF);
3895 report_context(LI);
3896 }
3897 if (SR.empty()) {
3898 report("Subrange must not be empty", MF);
3899 report_context(SR, VirtRegOrUnit(LI.reg()), SR.LaneMask);
3900 }
3901 Mask |= SR.LaneMask;
3902 verifyLiveRange(SR, VirtRegOrUnit(LI.reg()), SR.LaneMask);
3903 if (!LI.covers(SR)) {
3904 report("A Subrange is not covered by the main range", MF);
3905 report_context(LI);
3906 }
3907 }
3908 }
3909
3910 // Check the LI only has one connected component.
3911 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3912 unsigned NumComp = ConEQ.Classify(LI);
3913 if (NumComp > 1) {
3914 report("Multiple connected components in live interval", MF);
3915 report_context(LI);
3916 for (unsigned comp = 0; comp != NumComp; ++comp) {
3917 OS << comp << ": valnos";
3918 for (const VNInfo *I : LI.valnos)
3919 if (comp == ConEQ.getEqClass(I))
3920 OS << ' ' << I->id;
3921 OS << '\n';
3922 }
3923 }
3924}
3925
3926namespace {
3927
3928 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3929 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3930 // value is zero.
3931 // We use a bool plus an integer to capture the stack state.
3932struct StackStateOfBB {
3933 StackStateOfBB() = default;
3934 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup)
3935 : EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3936 ExitIsSetup(ExitSetup) {}
3937
3938 // Can be negative, which means we are setting up a frame.
3939 int EntryValue = 0;
3940 int ExitValue = 0;
3941 bool EntryIsSetup = false;
3942 bool ExitIsSetup = false;
3943};
3944
3945} // end anonymous namespace
3946
3947/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3948/// by a FrameDestroy <n>, stack adjustments are identical on all
3949/// CFG edges to a merge point, and frame is destroyed at end of a return block.
3950void MachineVerifier::verifyStackFrame() {
3951 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3952 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3953 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3954 return;
3955
3957 SPState.resize(MF->getNumBlockIDs());
3959
3960 // Visit the MBBs in DFS order.
3961 for (df_ext_iterator<const MachineFunction *,
3963 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3964 DFI != DFE; ++DFI) {
3965 const MachineBasicBlock *MBB = *DFI;
3966
3967 StackStateOfBB BBState;
3968 // Check the exit state of the DFS stack predecessor.
3969 if (DFI.getPathLength() >= 2) {
3970 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3971 assert(Reachable.count(StackPred) &&
3972 "DFS stack predecessor is already visited.\n");
3973 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3974 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3975 BBState.ExitValue = BBState.EntryValue;
3976 BBState.ExitIsSetup = BBState.EntryIsSetup;
3977 }
3978
3979 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
3980 report("Call frame size on entry does not match value computed from "
3981 "predecessor",
3982 MBB);
3983 OS << "Call frame size on entry " << MBB->getCallFrameSize()
3984 << " does not match value computed from predecessor "
3985 << -BBState.EntryValue << '\n';
3986 }
3987
3988 // Update stack state by checking contents of MBB.
3989 for (const auto &I : *MBB) {
3990 if (I.getOpcode() == FrameSetupOpcode) {
3991 if (BBState.ExitIsSetup)
3992 report("FrameSetup is after another FrameSetup", &I);
3993 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3994 report("AdjustsStack not set in presence of a frame pseudo "
3995 "instruction.", &I);
3996 BBState.ExitValue -= TII->getFrameTotalSize(I);
3997 BBState.ExitIsSetup = true;
3998 }
3999
4000 if (I.getOpcode() == FrameDestroyOpcode) {
4001 int Size = TII->getFrameTotalSize(I);
4002 if (!BBState.ExitIsSetup)
4003 report("FrameDestroy is not after a FrameSetup", &I);
4004 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
4005 BBState.ExitValue;
4006 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
4007 report("FrameDestroy <n> is after FrameSetup <m>", &I);
4008 OS << "FrameDestroy <" << Size << "> is after FrameSetup <"
4009 << AbsSPAdj << ">.\n";
4010 }
4011 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
4012 report("AdjustsStack not set in presence of a frame pseudo "
4013 "instruction.", &I);
4014 BBState.ExitValue += Size;
4015 BBState.ExitIsSetup = false;
4016 }
4017 }
4018 SPState[MBB->getNumber()] = BBState;
4019
4020 // Make sure the exit state of any predecessor is consistent with the entry
4021 // state.
4022 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
4023 if (Reachable.count(Pred) &&
4024 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
4025 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
4026 report("The exit stack state of a predecessor is inconsistent.", MBB);
4027 OS << "Predecessor " << printMBBReference(*Pred) << " has exit state ("
4028 << SPState[Pred->getNumber()].ExitValue << ", "
4029 << SPState[Pred->getNumber()].ExitIsSetup << "), while "
4030 << printMBBReference(*MBB) << " has entry state ("
4031 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
4032 }
4033 }
4034
4035 // Make sure the entry state of any successor is consistent with the exit
4036 // state.
4037 for (const MachineBasicBlock *Succ : MBB->successors()) {
4038 if (Reachable.count(Succ) &&
4039 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
4040 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
4041 report("The entry stack state of a successor is inconsistent.", MBB);
4042 OS << "Successor " << printMBBReference(*Succ) << " has entry state ("
4043 << SPState[Succ->getNumber()].EntryValue << ", "
4044 << SPState[Succ->getNumber()].EntryIsSetup << "), while "
4045 << printMBBReference(*MBB) << " has exit state ("
4046 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
4047 }
4048 }
4049
4050 // Make sure a basic block with return ends with zero stack adjustment.
4051 if (!MBB->empty() && MBB->back().isReturn()) {
4052 if (BBState.ExitIsSetup)
4053 report("A return block ends with a FrameSetup.", MBB);
4054 if (BBState.ExitValue)
4055 report("A return block ends with a nonzero stack adjustment.", MBB);
4056 }
4057 }
4058}
4059
4060void MachineVerifier::verifyStackProtector() {
4061 const MachineFrameInfo &MFI = MF->getFrameInfo();
4062 if (!MFI.hasStackProtectorIndex())
4063 return;
4064 // Only applicable when the offsets of frame objects have been determined,
4065 // which is indicated by a non-zero stack size.
4066 if (!MFI.getStackSize())
4067 return;
4068 const TargetFrameLowering &TFI = *MF->getSubtarget().getFrameLowering();
4069 bool StackGrowsDown =
4071 unsigned FI = MFI.getStackProtectorIndex();
4072 int64_t SPStart = MFI.getObjectOffset(FI);
4073 int64_t SPEnd = SPStart + MFI.getObjectSize(FI);
4074 for (unsigned I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
4075 if (I == FI)
4076 continue;
4077 if (MFI.isDeadObjectIndex(I))
4078 continue;
4079 // FIXME: Skip non-default stack objects, as some targets may place them
4080 // above the stack protector. This is a workaround for the fact that
4081 // backends such as AArch64 may place SVE stack objects *above* the stack
4082 // protector.
4084 continue;
4085 // Skip variable-sized objects because they do not have a fixed offset.
4087 continue;
4088 // FIXME: Skip spill slots which may be allocated above the stack protector.
4089 // Ideally this would only skip callee-saved registers, but we don't have
4090 // that information here. For example, spill-slots used for scavenging are
4091 // not described in CalleeSavedInfo.
4092 if (MFI.isSpillSlotObjectIndex(I))
4093 continue;
4094 int64_t ObjStart = MFI.getObjectOffset(I);
4095 int64_t ObjEnd = ObjStart + MFI.getObjectSize(I);
4096 if (SPStart < ObjEnd && ObjStart < SPEnd) {
4097 report("Stack protector overlaps with another stack object", MF);
4098 break;
4099 }
4100 if ((StackGrowsDown && SPStart <= ObjStart) ||
4101 (!StackGrowsDown && SPStart >= ObjStart)) {
4102 report("Stack protector is not the top-most object on the stack", MF);
4103 break;
4104 }
4105 }
4106}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file implements the BitVector class.
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::string Name
uint32_t Index
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
global merge Global merge function pass
const HexagonInstrInfo * TII
hexagon widen Hexagon Store false hexagon widen loads
hexagon widen stores
IRTranslator LLVM IR MI
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
A common definition of LaneBitmask for use in TableGen and CodeGen.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MIR specialization of the GenericConvergenceVerifier template.
Register Reg
Register const TargetRegisterInfo * TRI
static void verifyConvergenceControl(const MachineFunction &MF, MachineDominatorTree &DT, std::function< void(const Twine &Message)> FailureCB, raw_ostream &OS)
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:56
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
std::unordered_set< BasicBlock * > BlockSet
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static unsigned getSize(unsigned Kind)
const fltSemantics & getSemantics() const
Definition: APFloat.h:1457
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:255
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:431
Represent the analysis usage information of a pass.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:690
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:233
bool test(unsigned Idx) const
Definition: BitVector.h:461
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:335
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:140
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a LiveInterval into equivalence cl...
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:277
const APFloat & getValueAPF() const
Definition: Constants.h:320
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
IntegerType * getIntegerType() const
Variant of the getType() method to always return an IntegerType, which reduces the amount of casting ...
Definition: Constants.h:193
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:157
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition: DenseMap.h:267
Implements a dense probed hash-table based set.
Definition: DenseSet.h:263
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Register getReg() const
Base class for user error types.
Definition: Error.h:354
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:314
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
Definition: DerivedTypes.h:74
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:182
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:265
constexpr bool isScalar() const
Definition: LowLevelType.h:147
constexpr bool isPointerVector() const
Definition: LowLevelType.h:153
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:43
constexpr bool isValid() const
Definition: LowLevelType.h:146
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:160
constexpr bool isVector() const
Definition: LowLevelType.h:149
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:171
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:191
constexpr bool isPointer() const
Definition: LowLevelType.h:150
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:278
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
constexpr unsigned getAddressSpace() const
Definition: LowLevelType.h:271
constexpr bool isPointerOrPointerVector() const
Definition: LowLevelType.h:154
constexpr LLT getScalarType() const
Definition: LowLevelType.h:206
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelType.h:201
A live range for subregisters.
Definition: LiveInterval.h:697
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:690
Register reg() const
Definition: LiveInterval.h:721
bool hasSubRanges() const
Returns true if subregister liveness information is available.
Definition: LiveInterval.h:813
iterator_range< subrange_iterator > subranges()
Definition: LiveInterval.h:785
LLVM_ABI void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
Definition: LiveInterval.h:91
bool isDeadDef() const
Return true if this instruction has a dead def.
Definition: LiveInterval.h:118
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
Definition: LiveInterval.h:106
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
Definition: LiveInterval.h:124
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:113
LLVM_ABI static LLVM_ATTRIBUTE_UNUSED bool isJointlyDominated(const MachineBasicBlock *MBB, ArrayRef< SlotIndex > Defs, const SlotIndexes &Indexes)
A diagnostic function to check if the end of the block MBB is jointly dominated by the blocks corresp...
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:158
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:318
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:403
LLVM_ABI bool covers(const LiveRange &Other) const
Returns true if all segments of the Other live range are completely covered by this live range.
bool empty() const
Definition: LiveInterval.h:384
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:545
iterator end()
Definition: LiveInterval.h:217
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
Definition: LiveInterval.h:431
bool verify() const
Walk the range and assert if any invariants fail to hold.
unsigned getNumValNums() const
Definition: LiveInterval.h:314
iterator begin()
Definition: LiveInterval.h:216
VNInfoList valnos
Definition: LiveInterval.h:205
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:423
LLVM_ABI VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:64
ExceptionHandling getExceptionHandlingType() const
Definition: MCAsmInfo.h:633
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:199
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:238
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:240
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:249
bool isConvergent() const
Return true if this instruction is convergent.
Definition: MCInstrDesc.h:417
bool variadicOpsAreDefs() const
Return true if variadic operands of this instruction are definitions.
Definition: MCInstrDesc.h:420
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:220
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:231
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:86
bool isOptionalDef() const
Set if this operand is a optional def.
Definition: MCInstrDesc.h:114
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:98
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1445
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
iterator_range< livein_iterator > liveins() const
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isIRBlockAddressTaken() const
Test whether this block is the target of an IR BlockAddress.
unsigned succ_size() const
BasicBlock * getAddressTakenIRBlock() const
Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
LLVM_ABI bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
iterator_range< succ_iterator > successors()
LLVM_ABI bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
LLVM_ABI StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
int getStackProtectorIndex() const
Return the index for the stack protector object.
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
LLVM_ABI BitVector getPristineRegs(const MachineFunction &MF) const
Return a set of physical registers that are pristine.
bool isVariableSizedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a variable sized object.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
Properties which a MachineFunction may have at a given point in time.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, raw_ostream *OS=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
Definition: MachineInstr.h:72
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:587
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:938
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:974
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:965
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
LLT getMemoryType() const
Return the memory type of the memory reference.
const MDNode * getRanges() const
Return the range tag for the memory reference.
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isImplicit() const
bool isIntrinsicID() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
ArrayRef< int > getShuffleMask() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isValidExcessOperand() const
Return true if this operand can validly be appended to an arbitrary operand list.
bool isShuffleMask() const
LLVM_ABI void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr) const
Print the MachineOperand to os.
unsigned getCFIIndex() const
LLVM_ABI bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
@ MO_CFIIndex
MCCFIInstruction index.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
ManagedStatic - This transparently changes the behavior of global statics to be lazily constructed on...
Definition: ManagedStatic.h:85
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:99
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition: Pass.cpp:140
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:118
Special value supplied for machine level alias analysis.
Holds all the information related to register banks.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
This class implements the register bank concept.
Definition: RegisterBank.h:29
const char * getName() const
Get a user friendly name of this register bank.
Definition: RegisterBank.h:50
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:46
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:67
unsigned virtRegIndex() const
Convert a virtual register number to a 0-based index.
Definition: Register.h:82
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:74
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:78
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:66
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:177
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
Definition: SlotIndexes.h:210
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
Definition: SlotIndexes.h:243
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
Definition: SlotIndexes.h:213
bool isRegister() const
isRegister - Returns true if this is a normal register use/def slot.
Definition: SlotIndexes.h:217
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:273
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:238
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
Definition: SlotIndexes.h:220
SlotIndexes pass.
Definition: SlotIndexes.h:298
MBBIndexIterator MBBIndexBegin() const
Returns an iterator for the begin of the idx2MBBMap.
Definition: SlotIndexes.h:506
MBBIndexIterator MBBIndexEnd() const
Return an iterator for the end of the idx2MBBMap.
Definition: SlotIndexes.h:511
SmallVectorImpl< IdxMBBPair >::const_iterator MBBIndexIterator
Iterator over the idx2MBBMap (sorted pairs of slot index of basic block begin and basic block)
Definition: SlotIndexes.h:482
size_type size() const
Definition: SmallPtrSet.h:99
bool erase(PtrType Ptr)
Remove pointer from the set.
Definition: SmallPtrSet.h:418
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:470
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:401
iterator begin() const
Definition: SmallPtrSet.h:494
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
void resize(size_type N)
Definition: SmallVector.h:639
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
Register getReg() const
MI-level Statepoint operands.
Definition: StackMaps.h:159
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
TargetInstrInfo - Interface to description of machine instruction set.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:83
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
LLVM_ABI std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
VNInfo - Value Number Information.
Definition: LiveInterval.h:54
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:82
unsigned id
The ID number of this value.
Definition: LiveInterval.h:59
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:62
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:79
LLVM Value Representation.
Definition: Value.h:75
Wrapper class representing a virtual register or register unit.
Definition: Register.h:176
constexpr bool isVirtualReg() const
Definition: Register.h:187
constexpr MCRegUnit asMCRegUnit() const
Definition: Register.h:191
constexpr Register asVirtualReg() const
Definition: Register.h:196
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:194
constexpr bool isNonZero() const
Definition: TypeSize.h:159
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:219
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:172
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:169
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:226
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:240
self_iterator getIterator()
Definition: ilist_node.h:134
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:359
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:126
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
LLVM_ABI AttributeSet getFnAttributes(LLVMContext &C, ID id)
Return the function attributes for an intrinsic.
Definition: Intrinsics.cpp:743
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:62
@ OPERAND_IMMEDIATE
Definition: MCInstrDesc.h:61
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:47
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
NodeAddr< FuncNode * > Func
Definition: RDFGraph.h:393
LLVM_ABI const_iterator begin(StringRef path LLVM_LIFETIME_BOUND, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:226
LLVM_ABI const_iterator end(StringRef path LLVM_LIFETIME_BOUND)
Get end iterator over path.
Definition: Path.cpp:235
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:338
@ Offset
Definition: DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1744
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1702
@ SjLj
setjmp/longjmp based exceptions
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
LLVM_ABI Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2155
void set_subtract(S1Ty &S1, const S2Ty &S2)
set_subtract(A, B) - Compute A := A - B
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
Definition: LaneBitmask.h:92
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
LLVM_ABI FunctionPass * createMachineVerifierPass(const std::string &Banner)
createMachineVerifierPass - This pass verifies cenerated machine code instructions for correctness.
LLVM_ABI void verifyMachineFunction(const std::string &Banner, const MachineFunction &MF)
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
LLVM_ABI void initializeMachineVerifierLegacyPassPass(PassRegistry &)
detail::ValueMatchesPoly< M > HasValue(M Matcher)
Definition: Error.h:221
df_ext_iterator< T, SetTy > df_ext_begin(const T &G, SetTy &S)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1758
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition: Error.cpp:167
LLVM_ABI raw_ostream & nulls()
This returns a reference to a raw_ostream which simply discards output.
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
Definition: SetOperations.h:43
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Sub
Subtraction of integers.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1886
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1916
df_ext_iterator< T, SetTy > df_ext_end(const T &G, SetTy &S)
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:851
static LLVM_ABI unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:388
static constexpr LaneBitmask getAll()
Definition: LaneBitmask.h:82
constexpr bool none() const
Definition: LaneBitmask.h:52
constexpr bool any() const
Definition: LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition: LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:163
VarInfo - This represents the regions where a virtual register is live in the program.
Definition: LiveVariables.h:79
Pair of physical register and lane mask.