LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
123#include <algorithm>
124#include <cassert>
125#include <cstdint>
126#include <memory>
127#include <optional>
128#include <string>
129#include <utility>
130
131using namespace llvm;
132
134 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
135 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
136 "scopes are not dominating"));
137
138namespace llvm {
139
142 const Module &M;
144 const Triple &TT;
147
148 /// Track the brokenness of the module while recursively visiting.
149 bool Broken = false;
150 /// Broken debug info can be "recovered" from by stripping the debug info.
151 bool BrokenDebugInfo = false;
152 /// Whether to treat broken debug info as an error.
154
156 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
157 Context(M.getContext()) {}
158
159private:
160 void Write(const Module *M) {
161 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
162 }
163
164 void Write(const Value *V) {
165 if (V)
166 Write(*V);
167 }
168
169 void Write(const Value &V) {
170 if (isa<Instruction>(V)) {
171 V.print(*OS, MST);
172 *OS << '\n';
173 } else {
174 V.printAsOperand(*OS, true, MST);
175 *OS << '\n';
176 }
177 }
178
179 void Write(const DbgRecord *DR) {
180 if (DR) {
181 DR->print(*OS, MST, false);
182 *OS << '\n';
183 }
184 }
185
187 switch (Type) {
189 *OS << "value";
190 break;
192 *OS << "declare";
193 break;
195 *OS << "assign";
196 break;
198 *OS << "end";
199 break;
201 *OS << "any";
202 break;
203 };
204 }
205
206 void Write(const Metadata *MD) {
207 if (!MD)
208 return;
209 MD->print(*OS, MST, &M);
210 *OS << '\n';
211 }
212
213 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
214 Write(MD.get());
215 }
216
217 void Write(const NamedMDNode *NMD) {
218 if (!NMD)
219 return;
220 NMD->print(*OS, MST);
221 *OS << '\n';
222 }
223
224 void Write(Type *T) {
225 if (!T)
226 return;
227 *OS << ' ' << *T;
228 }
229
230 void Write(const Comdat *C) {
231 if (!C)
232 return;
233 *OS << *C;
234 }
235
236 void Write(const APInt *AI) {
237 if (!AI)
238 return;
239 *OS << *AI << '\n';
240 }
241
242 void Write(const unsigned i) { *OS << i << '\n'; }
243
244 // NOLINTNEXTLINE(readability-identifier-naming)
245 void Write(const Attribute *A) {
246 if (!A)
247 return;
248 *OS << A->getAsString() << '\n';
249 }
250
251 // NOLINTNEXTLINE(readability-identifier-naming)
252 void Write(const AttributeSet *AS) {
253 if (!AS)
254 return;
255 *OS << AS->getAsString() << '\n';
256 }
257
258 // NOLINTNEXTLINE(readability-identifier-naming)
259 void Write(const AttributeList *AL) {
260 if (!AL)
261 return;
262 AL->print(*OS);
263 }
264
265 void Write(Printable P) { *OS << P << '\n'; }
266
267 template <typename T> void Write(ArrayRef<T> Vs) {
268 for (const T &V : Vs)
269 Write(V);
270 }
271
272 template <typename T1, typename... Ts>
273 void WriteTs(const T1 &V1, const Ts &... Vs) {
274 Write(V1);
275 WriteTs(Vs...);
276 }
277
278 template <typename... Ts> void WriteTs() {}
279
280public:
281 /// A check failed, so printout out the condition and the message.
282 ///
283 /// This provides a nice place to put a breakpoint if you want to see why
284 /// something is not correct.
285 void CheckFailed(const Twine &Message) {
286 if (OS)
287 *OS << Message << '\n';
288 Broken = true;
289 }
290
291 /// A check failed (with values to print).
292 ///
293 /// This calls the Message-only version so that the above is easier to set a
294 /// breakpoint on.
295 template <typename T1, typename... Ts>
296 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
297 CheckFailed(Message);
298 if (OS)
299 WriteTs(V1, Vs...);
300 }
301
302 /// A debug info check failed.
303 void DebugInfoCheckFailed(const Twine &Message) {
304 if (OS)
305 *OS << Message << '\n';
307 BrokenDebugInfo = true;
308 }
309
310 /// A debug info check failed (with values to print).
311 template <typename T1, typename... Ts>
312 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
313 const Ts &... Vs) {
314 DebugInfoCheckFailed(Message);
315 if (OS)
316 WriteTs(V1, Vs...);
317 }
318};
319
320} // namespace llvm
321
322namespace {
323
324class Verifier : public InstVisitor<Verifier>, VerifierSupport {
325 friend class InstVisitor<Verifier>;
326 DominatorTree DT;
327
328 /// When verifying a basic block, keep track of all of the
329 /// instructions we have seen so far.
330 ///
331 /// This allows us to do efficient dominance checks for the case when an
332 /// instruction has an operand that is an instruction in the same block.
333 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
334
335 /// Keep track of the metadata nodes that have been checked already.
337
338 /// Keep track which DISubprogram is attached to which function.
340
341 /// Track all DICompileUnits visited.
343
344 /// The result type for a landingpad.
345 Type *LandingPadResultTy;
346
347 /// Whether we've seen a call to @llvm.localescape in this function
348 /// already.
349 bool SawFrameEscape;
350
351 /// Whether the current function has a DISubprogram attached to it.
352 bool HasDebugInfo = false;
353
354 /// Stores the count of how many objects were passed to llvm.localescape for a
355 /// given function and the largest index passed to llvm.localrecover.
357
358 // Maps catchswitches and cleanuppads that unwind to siblings to the
359 // terminators that indicate the unwind, used to detect cycles therein.
361
362 /// Cache which blocks are in which funclet, if an EH funclet personality is
363 /// in use. Otherwise empty.
364 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
365
366 /// Cache of constants visited in search of ConstantExprs.
367 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
368
369 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
370 SmallVector<const Function *, 4> DeoptimizeDeclarations;
371
372 /// Cache of attribute lists verified.
373 SmallPtrSet<const void *, 32> AttributeListsVisited;
374
375 // Verify that this GlobalValue is only used in this module.
376 // This map is used to avoid visiting uses twice. We can arrive at a user
377 // twice, if they have multiple operands. In particular for very large
378 // constant expressions, we can arrive at a particular user many times.
379 SmallPtrSet<const Value *, 32> GlobalValueVisited;
380
381 // Keeps track of duplicate function argument debug info.
383
384 TBAAVerifier TBAAVerifyHelper;
385 ConvergenceVerifier ConvergenceVerifyHelper;
386
387 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
388
389 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
390
391public:
392 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
393 const Module &M)
394 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
395 SawFrameEscape(false), TBAAVerifyHelper(this) {
396 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
397 }
398
399 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
400
401 bool verify(const Function &F) {
402 assert(F.getParent() == &M &&
403 "An instance of this class only works with a specific module!");
404
405 // First ensure the function is well-enough formed to compute dominance
406 // information, and directly compute a dominance tree. We don't rely on the
407 // pass manager to provide this as it isolates us from a potentially
408 // out-of-date dominator tree and makes it significantly more complex to run
409 // this code outside of a pass manager.
410 // FIXME: It's really gross that we have to cast away constness here.
411 if (!F.empty())
412 DT.recalculate(const_cast<Function &>(F));
413
414 for (const BasicBlock &BB : F) {
415 if (!BB.empty() && BB.back().isTerminator())
416 continue;
417
418 if (OS) {
419 *OS << "Basic Block in function '" << F.getName()
420 << "' does not have terminator!\n";
421 BB.printAsOperand(*OS, true, MST);
422 *OS << "\n";
423 }
424 return false;
425 }
426
427 auto FailureCB = [this](const Twine &Message) {
428 this->CheckFailed(Message);
429 };
430 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
431
432 Broken = false;
433 // FIXME: We strip const here because the inst visitor strips const.
434 visit(const_cast<Function &>(F));
435 verifySiblingFuncletUnwinds();
436
437 if (ConvergenceVerifyHelper.sawTokens())
438 ConvergenceVerifyHelper.verify(DT);
439
440 InstsInThisBlock.clear();
441 DebugFnArgs.clear();
442 LandingPadResultTy = nullptr;
443 SawFrameEscape = false;
444 SiblingFuncletInfo.clear();
445 verifyNoAliasScopeDecl();
446 NoAliasScopeDecls.clear();
447
448 return !Broken;
449 }
450
451 /// Verify the module that this instance of \c Verifier was initialized with.
452 bool verify() {
453 Broken = false;
454
455 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
456 for (const Function &F : M)
457 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
458 DeoptimizeDeclarations.push_back(&F);
459
460 // Now that we've visited every function, verify that we never asked to
461 // recover a frame index that wasn't escaped.
462 verifyFrameRecoverIndices();
463 for (const GlobalVariable &GV : M.globals())
464 visitGlobalVariable(GV);
465
466 for (const GlobalAlias &GA : M.aliases())
467 visitGlobalAlias(GA);
468
469 for (const GlobalIFunc &GI : M.ifuncs())
470 visitGlobalIFunc(GI);
471
472 for (const NamedMDNode &NMD : M.named_metadata())
473 visitNamedMDNode(NMD);
474
475 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
476 visitComdat(SMEC.getValue());
477
478 visitModuleFlags();
479 visitModuleIdents();
480 visitModuleCommandLines();
481
482 verifyCompileUnits();
483
484 verifyDeoptimizeCallingConvs();
485 DISubprogramAttachments.clear();
486 return !Broken;
487 }
488
489private:
490 /// Whether a metadata node is allowed to be, or contain, a DILocation.
491 enum class AreDebugLocsAllowed { No, Yes };
492
493 /// Metadata that should be treated as a range, with slightly different
494 /// requirements.
495 enum class RangeLikeMetadataKind {
496 Range, // MD_range
497 AbsoluteSymbol, // MD_absolute_symbol
498 NoaliasAddrspace // MD_noalias_addrspace
499 };
500
501 // Verification methods...
502 void visitGlobalValue(const GlobalValue &GV);
503 void visitGlobalVariable(const GlobalVariable &GV);
504 void visitGlobalAlias(const GlobalAlias &GA);
505 void visitGlobalIFunc(const GlobalIFunc &GI);
506 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
507 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
508 const GlobalAlias &A, const Constant &C);
509 void visitNamedMDNode(const NamedMDNode &NMD);
510 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
511 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
512 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
513 void visitDIArgList(const DIArgList &AL, Function *F);
514 void visitComdat(const Comdat &C);
515 void visitModuleIdents();
516 void visitModuleCommandLines();
517 void visitModuleFlags();
518 void visitModuleFlag(const MDNode *Op,
520 SmallVectorImpl<const MDNode *> &Requirements);
521 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
522 void visitFunction(const Function &F);
523 void visitBasicBlock(BasicBlock &BB);
524 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
525 RangeLikeMetadataKind Kind);
526 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
527 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
528 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
529 void visitNofreeMetadata(Instruction &I, MDNode *MD);
530 void visitProfMetadata(Instruction &I, MDNode *MD);
531 void visitCallStackMetadata(MDNode *MD);
532 void visitMemProfMetadata(Instruction &I, MDNode *MD);
533 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
534 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
535 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
536 void visitMMRAMetadata(Instruction &I, MDNode *MD);
537 void visitAnnotationMetadata(MDNode *Annotation);
538 void visitAliasScopeMetadata(const MDNode *MD);
539 void visitAliasScopeListMetadata(const MDNode *MD);
540 void visitAccessGroupMetadata(const MDNode *MD);
541
542 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
543#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
544#include "llvm/IR/Metadata.def"
545 void visitDIScope(const DIScope &N);
546 void visitDIVariable(const DIVariable &N);
547 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
548 void visitDITemplateParameter(const DITemplateParameter &N);
549
550 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
551
552 void visit(DbgLabelRecord &DLR);
553 void visit(DbgVariableRecord &DVR);
554 // InstVisitor overrides...
556 void visitDbgRecords(Instruction &I);
557 void visit(Instruction &I);
558
559 void visitTruncInst(TruncInst &I);
560 void visitZExtInst(ZExtInst &I);
561 void visitSExtInst(SExtInst &I);
562 void visitFPTruncInst(FPTruncInst &I);
563 void visitFPExtInst(FPExtInst &I);
564 void visitFPToUIInst(FPToUIInst &I);
565 void visitFPToSIInst(FPToSIInst &I);
566 void visitUIToFPInst(UIToFPInst &I);
567 void visitSIToFPInst(SIToFPInst &I);
568 void visitIntToPtrInst(IntToPtrInst &I);
569 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
570 void visitPtrToAddrInst(PtrToAddrInst &I);
571 void visitPtrToIntInst(PtrToIntInst &I);
572 void visitBitCastInst(BitCastInst &I);
573 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
574 void visitPHINode(PHINode &PN);
575 void visitCallBase(CallBase &Call);
576 void visitUnaryOperator(UnaryOperator &U);
577 void visitBinaryOperator(BinaryOperator &B);
578 void visitICmpInst(ICmpInst &IC);
579 void visitFCmpInst(FCmpInst &FC);
580 void visitExtractElementInst(ExtractElementInst &EI);
581 void visitInsertElementInst(InsertElementInst &EI);
582 void visitShuffleVectorInst(ShuffleVectorInst &EI);
583 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
584 void visitCallInst(CallInst &CI);
585 void visitInvokeInst(InvokeInst &II);
586 void visitGetElementPtrInst(GetElementPtrInst &GEP);
587 void visitLoadInst(LoadInst &LI);
588 void visitStoreInst(StoreInst &SI);
589 void verifyDominatesUse(Instruction &I, unsigned i);
590 void visitInstruction(Instruction &I);
591 void visitTerminator(Instruction &I);
592 void visitBranchInst(BranchInst &BI);
593 void visitReturnInst(ReturnInst &RI);
594 void visitSwitchInst(SwitchInst &SI);
595 void visitIndirectBrInst(IndirectBrInst &BI);
596 void visitCallBrInst(CallBrInst &CBI);
597 void visitSelectInst(SelectInst &SI);
598 void visitUserOp1(Instruction &I);
599 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
600 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
601 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
602 void visitVPIntrinsic(VPIntrinsic &VPI);
603 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
604 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
605 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
606 void visitFenceInst(FenceInst &FI);
607 void visitAllocaInst(AllocaInst &AI);
608 void visitExtractValueInst(ExtractValueInst &EVI);
609 void visitInsertValueInst(InsertValueInst &IVI);
610 void visitEHPadPredecessors(Instruction &I);
611 void visitLandingPadInst(LandingPadInst &LPI);
612 void visitResumeInst(ResumeInst &RI);
613 void visitCatchPadInst(CatchPadInst &CPI);
614 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
615 void visitCleanupPadInst(CleanupPadInst &CPI);
616 void visitFuncletPadInst(FuncletPadInst &FPI);
617 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
618 void visitCleanupReturnInst(CleanupReturnInst &CRI);
619
620 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
621 void verifySwiftErrorValue(const Value *SwiftErrorVal);
622 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
623 void verifyMustTailCall(CallInst &CI);
624 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
625 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
626 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
627 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
628 const Value *V);
629 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
630 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
631 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
632
633 void visitConstantExprsRecursively(const Constant *EntryC);
634 void visitConstantExpr(const ConstantExpr *CE);
635 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
636 void verifyInlineAsmCall(const CallBase &Call);
637 void verifyStatepoint(const CallBase &Call);
638 void verifyFrameRecoverIndices();
639 void verifySiblingFuncletUnwinds();
640
641 void verifyFragmentExpression(const DbgVariableRecord &I);
642 template <typename ValueOrMetadata>
643 void verifyFragmentExpression(const DIVariable &V,
645 ValueOrMetadata *Desc);
646 void verifyFnArgs(const DbgVariableRecord &DVR);
647 void verifyNotEntryValue(const DbgVariableRecord &I);
648
649 /// Module-level debug info verification...
650 void verifyCompileUnits();
651
652 /// Module-level verification that all @llvm.experimental.deoptimize
653 /// declarations share the same calling convention.
654 void verifyDeoptimizeCallingConvs();
655
656 void verifyAttachedCallBundle(const CallBase &Call,
657 const OperandBundleUse &BU);
658
659 /// Verify the llvm.experimental.noalias.scope.decl declarations
660 void verifyNoAliasScopeDecl();
661};
662
663} // end anonymous namespace
664
665/// We know that cond should be true, if not print an error message.
666#define Check(C, ...) \
667 do { \
668 if (!(C)) { \
669 CheckFailed(__VA_ARGS__); \
670 return; \
671 } \
672 } while (false)
673
674/// We know that a debug info condition should be true, if not print
675/// an error message.
676#define CheckDI(C, ...) \
677 do { \
678 if (!(C)) { \
679 DebugInfoCheckFailed(__VA_ARGS__); \
680 return; \
681 } \
682 } while (false)
683
684void Verifier::visitDbgRecords(Instruction &I) {
685 if (!I.DebugMarker)
686 return;
687 CheckDI(I.DebugMarker->MarkedInstr == &I,
688 "Instruction has invalid DebugMarker", &I);
689 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
690 "PHI Node must not have any attached DbgRecords", &I);
691 for (DbgRecord &DR : I.getDbgRecordRange()) {
692 CheckDI(DR.getMarker() == I.DebugMarker,
693 "DbgRecord had invalid DebugMarker", &I, &DR);
694 if (auto *Loc =
695 dyn_cast_or_null<DILocation>(DR.getDebugLoc().getAsMDNode()))
696 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
697 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
698 visit(*DVR);
699 // These have to appear after `visit` for consistency with existing
700 // intrinsic behaviour.
701 verifyFragmentExpression(*DVR);
702 verifyNotEntryValue(*DVR);
703 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
704 visit(*DLR);
705 }
706 }
707}
708
709void Verifier::visit(Instruction &I) {
710 visitDbgRecords(I);
711 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
712 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
714}
715
716// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
717static void forEachUser(const Value *User,
719 llvm::function_ref<bool(const Value *)> Callback) {
720 if (!Visited.insert(User).second)
721 return;
722
724 while (!WorkList.empty()) {
725 const Value *Cur = WorkList.pop_back_val();
726 if (!Visited.insert(Cur).second)
727 continue;
728 if (Callback(Cur))
729 append_range(WorkList, Cur->materialized_users());
730 }
731}
732
733void Verifier::visitGlobalValue(const GlobalValue &GV) {
735 "Global is external, but doesn't have external or weak linkage!", &GV);
736
737 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
738 if (const MDNode *Associated =
739 GO->getMetadata(LLVMContext::MD_associated)) {
740 Check(Associated->getNumOperands() == 1,
741 "associated metadata must have one operand", &GV, Associated);
742 const Metadata *Op = Associated->getOperand(0).get();
743 Check(Op, "associated metadata must have a global value", GO, Associated);
744
745 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
746 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
747 if (VM) {
748 Check(isa<PointerType>(VM->getValue()->getType()),
749 "associated value must be pointer typed", GV, Associated);
750
751 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
752 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
753 "associated metadata must point to a GlobalObject", GO, Stripped);
754 Check(Stripped != GO,
755 "global values should not associate to themselves", GO,
756 Associated);
757 }
758 }
759
760 // FIXME: Why is getMetadata on GlobalValue protected?
761 if (const MDNode *AbsoluteSymbol =
762 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
763 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
764 DL.getIntPtrType(GO->getType()),
765 RangeLikeMetadataKind::AbsoluteSymbol);
766 }
767 }
768
769 Check(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
770 "Only global variables can have appending linkage!", &GV);
771
772 if (GV.hasAppendingLinkage()) {
773 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
774 Check(GVar && GVar->getValueType()->isArrayTy(),
775 "Only global arrays can have appending linkage!", GVar);
776 }
777
778 if (GV.isDeclarationForLinker())
779 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
780
781 if (GV.hasDLLExportStorageClass()) {
783 "dllexport GlobalValue must have default or protected visibility",
784 &GV);
785 }
786 if (GV.hasDLLImportStorageClass()) {
788 "dllimport GlobalValue must have default visibility", &GV);
789 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
790 &GV);
791
792 Check((GV.isDeclaration() &&
795 "Global is marked as dllimport, but not external", &GV);
796 }
797
798 if (GV.isImplicitDSOLocal())
799 Check(GV.isDSOLocal(),
800 "GlobalValue with local linkage or non-default "
801 "visibility must be dso_local!",
802 &GV);
803
804 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
805 if (const Instruction *I = dyn_cast<Instruction>(V)) {
806 if (!I->getParent() || !I->getParent()->getParent())
807 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
808 I);
809 else if (I->getParent()->getParent()->getParent() != &M)
810 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
811 I->getParent()->getParent(),
812 I->getParent()->getParent()->getParent());
813 return false;
814 } else if (const Function *F = dyn_cast<Function>(V)) {
815 if (F->getParent() != &M)
816 CheckFailed("Global is used by function in a different module", &GV, &M,
817 F, F->getParent());
818 return false;
819 }
820 return true;
821 });
822}
823
824void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
825 Type *GVType = GV.getValueType();
826
827 if (MaybeAlign A = GV.getAlign()) {
828 Check(A->value() <= Value::MaximumAlignment,
829 "huge alignment values are unsupported", &GV);
830 }
831
832 if (GV.hasInitializer()) {
833 Check(GV.getInitializer()->getType() == GVType,
834 "Global variable initializer type does not match global "
835 "variable type!",
836 &GV);
838 "Global variable initializer must be sized", &GV);
839 visitConstantExprsRecursively(GV.getInitializer());
840 // If the global has common linkage, it must have a zero initializer and
841 // cannot be constant.
842 if (GV.hasCommonLinkage()) {
844 "'common' global must have a zero initializer!", &GV);
845 Check(!GV.isConstant(), "'common' global may not be marked constant!",
846 &GV);
847 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
848 }
849 }
850
851 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
852 GV.getName() == "llvm.global_dtors")) {
854 "invalid linkage for intrinsic global variable", &GV);
856 "invalid uses of intrinsic global variable", &GV);
857
858 // Don't worry about emitting an error for it not being an array,
859 // visitGlobalValue will complain on appending non-array.
860 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
861 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
862 PointerType *FuncPtrTy =
863 PointerType::get(Context, DL.getProgramAddressSpace());
864 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
865 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
866 STy->getTypeAtIndex(1) == FuncPtrTy,
867 "wrong type for intrinsic global variable", &GV);
868 Check(STy->getNumElements() == 3,
869 "the third field of the element type is mandatory, "
870 "specify ptr null to migrate from the obsoleted 2-field form");
871 Type *ETy = STy->getTypeAtIndex(2);
872 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
873 &GV);
874 }
875 }
876
877 if (GV.hasName() && (GV.getName() == "llvm.used" ||
878 GV.getName() == "llvm.compiler.used")) {
880 "invalid linkage for intrinsic global variable", &GV);
882 "invalid uses of intrinsic global variable", &GV);
883
884 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
885 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
886 Check(PTy, "wrong type for intrinsic global variable", &GV);
887 if (GV.hasInitializer()) {
888 const Constant *Init = GV.getInitializer();
889 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
890 Check(InitArray, "wrong initalizer for intrinsic global variable",
891 Init);
892 for (Value *Op : InitArray->operands()) {
893 Value *V = Op->stripPointerCasts();
894 Check(isa<GlobalVariable>(V) || isa<Function>(V) ||
895 isa<GlobalAlias>(V),
896 Twine("invalid ") + GV.getName() + " member", V);
897 Check(V->hasName(),
898 Twine("members of ") + GV.getName() + " must be named", V);
899 }
900 }
901 }
902 }
903
904 // Visit any debug info attachments.
906 GV.getMetadata(LLVMContext::MD_dbg, MDs);
907 for (auto *MD : MDs) {
908 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
909 visitDIGlobalVariableExpression(*GVE);
910 else
911 CheckDI(false, "!dbg attachment of global variable must be a "
912 "DIGlobalVariableExpression");
913 }
914
915 // Scalable vectors cannot be global variables, since we don't know
916 // the runtime size.
917 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
918
919 // Check if it is or contains a target extension type that disallows being
920 // used as a global.
922 "Global @" + GV.getName() + " has illegal target extension type",
923 GVType);
924
925 if (!GV.hasInitializer()) {
926 visitGlobalValue(GV);
927 return;
928 }
929
930 // Walk any aggregate initializers looking for bitcasts between address spaces
931 visitConstantExprsRecursively(GV.getInitializer());
932
933 visitGlobalValue(GV);
934}
935
936void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
938 Visited.insert(&GA);
939 visitAliaseeSubExpr(Visited, GA, C);
940}
941
942void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
943 const GlobalAlias &GA, const Constant &C) {
945 Check(isa<GlobalValue>(C) &&
946 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
947 "available_externally alias must point to available_externally "
948 "global value",
949 &GA);
950 }
951 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
953 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
954 &GA);
955 }
956
957 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
958 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
959
960 Check(!GA2->isInterposable(),
961 "Alias cannot point to an interposable alias", &GA);
962 } else {
963 // Only continue verifying subexpressions of GlobalAliases.
964 // Do not recurse into global initializers.
965 return;
966 }
967 }
968
969 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
970 visitConstantExprsRecursively(CE);
971
972 for (const Use &U : C.operands()) {
973 Value *V = &*U;
974 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
975 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
976 else if (const auto *C2 = dyn_cast<Constant>(V))
977 visitAliaseeSubExpr(Visited, GA, *C2);
978 }
979}
980
981void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
983 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
984 "weak_odr, external, or available_externally linkage!",
985 &GA);
986 const Constant *Aliasee = GA.getAliasee();
987 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
988 Check(GA.getType() == Aliasee->getType(),
989 "Alias and aliasee types should match!", &GA);
990
991 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
992 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
993
994 visitAliaseeSubExpr(GA, *Aliasee);
995
996 visitGlobalValue(GA);
997}
998
999void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1001 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1002 "weak_odr, or external linkage!",
1003 &GI);
1004 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1005 // is a Function definition.
1007 Check(Resolver, "IFunc must have a Function resolver", &GI);
1008 Check(!Resolver->isDeclarationForLinker(),
1009 "IFunc resolver must be a definition", &GI);
1010
1011 // Check that the immediate resolver operand (prior to any bitcasts) has the
1012 // correct type.
1013 const Type *ResolverTy = GI.getResolver()->getType();
1014
1015 Check(isa<PointerType>(Resolver->getFunctionType()->getReturnType()),
1016 "IFunc resolver must return a pointer", &GI);
1017
1018 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1019 "IFunc resolver has incorrect type", &GI);
1020}
1021
1022void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1023 // There used to be various other llvm.dbg.* nodes, but we don't support
1024 // upgrading them and we want to reserve the namespace for future uses.
1025 if (NMD.getName().starts_with("llvm.dbg."))
1026 CheckDI(NMD.getName() == "llvm.dbg.cu",
1027 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1028 for (const MDNode *MD : NMD.operands()) {
1029 if (NMD.getName() == "llvm.dbg.cu")
1030 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1031
1032 if (!MD)
1033 continue;
1034
1035 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1036 }
1037}
1038
1039void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1040 // Only visit each node once. Metadata can be mutually recursive, so this
1041 // avoids infinite recursion here, as well as being an optimization.
1042 if (!MDNodes.insert(&MD).second)
1043 return;
1044
1045 Check(&MD.getContext() == &Context,
1046 "MDNode context does not match Module context!", &MD);
1047
1048 switch (MD.getMetadataID()) {
1049 default:
1050 llvm_unreachable("Invalid MDNode subclass");
1051 case Metadata::MDTupleKind:
1052 break;
1053#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1054 case Metadata::CLASS##Kind: \
1055 visit##CLASS(cast<CLASS>(MD)); \
1056 break;
1057#include "llvm/IR/Metadata.def"
1058 }
1059
1060 for (const Metadata *Op : MD.operands()) {
1061 if (!Op)
1062 continue;
1063 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1064 &MD, Op);
1065 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1066 "DILocation not allowed within this metadata node", &MD, Op);
1067 if (auto *N = dyn_cast<MDNode>(Op)) {
1068 visitMDNode(*N, AllowLocs);
1069 continue;
1070 }
1071 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1072 visitValueAsMetadata(*V, nullptr);
1073 continue;
1074 }
1075 }
1076
1077 // Check these last, so we diagnose problems in operands first.
1078 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1079 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1080}
1081
1082void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1083 Check(MD.getValue(), "Expected valid value", &MD);
1084 Check(!MD.getValue()->getType()->isMetadataTy(),
1085 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1086
1087 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1088 if (!L)
1089 return;
1090
1091 Check(F, "function-local metadata used outside a function", L);
1092
1093 // If this was an instruction, bb, or argument, verify that it is in the
1094 // function that we expect.
1095 Function *ActualF = nullptr;
1096 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1097 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1098 ActualF = I->getParent()->getParent();
1099 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1100 ActualF = BB->getParent();
1101 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1102 ActualF = A->getParent();
1103 assert(ActualF && "Unimplemented function local metadata case!");
1104
1105 Check(ActualF == F, "function-local metadata used in wrong function", L);
1106}
1107
1108void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1109 for (const ValueAsMetadata *VAM : AL.getArgs())
1110 visitValueAsMetadata(*VAM, F);
1111}
1112
1113void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1114 Metadata *MD = MDV.getMetadata();
1115 if (auto *N = dyn_cast<MDNode>(MD)) {
1116 visitMDNode(*N, AreDebugLocsAllowed::No);
1117 return;
1118 }
1119
1120 // Only visit each node once. Metadata can be mutually recursive, so this
1121 // avoids infinite recursion here, as well as being an optimization.
1122 if (!MDNodes.insert(MD).second)
1123 return;
1124
1125 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1126 visitValueAsMetadata(*V, F);
1127
1128 if (auto *AL = dyn_cast<DIArgList>(MD))
1129 visitDIArgList(*AL, F);
1130}
1131
1132static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1133static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1134static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1135
1136void Verifier::visitDILocation(const DILocation &N) {
1137 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1138 "location requires a valid scope", &N, N.getRawScope());
1139 if (auto *IA = N.getRawInlinedAt())
1140 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1141 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1142 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1143}
1144
1145void Verifier::visitGenericDINode(const GenericDINode &N) {
1146 CheckDI(N.getTag(), "invalid tag", &N);
1147}
1148
1149void Verifier::visitDIScope(const DIScope &N) {
1150 if (auto *F = N.getRawFile())
1151 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1152}
1153
1154void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1155 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1156 auto *BaseType = N.getRawBaseType();
1157 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1158 auto *LBound = N.getRawLowerBound();
1159 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1160 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1161 "LowerBound must be signed constant or DIVariable or DIExpression",
1162 &N);
1163 auto *UBound = N.getRawUpperBound();
1164 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1165 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1166 "UpperBound must be signed constant or DIVariable or DIExpression",
1167 &N);
1168 auto *Stride = N.getRawStride();
1169 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1170 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1171 "Stride must be signed constant or DIVariable or DIExpression", &N);
1172 auto *Bias = N.getRawBias();
1173 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1174 isa<DIExpression>(Bias),
1175 "Bias must be signed constant or DIVariable or DIExpression", &N);
1176 // Subrange types currently only support constant size.
1177 auto *Size = N.getRawSizeInBits();
1178 CheckDI(!Size || isa<ConstantAsMetadata>(Size),
1179 "SizeInBits must be a constant");
1180}
1181
1182void Verifier::visitDISubrange(const DISubrange &N) {
1183 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1184 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1185 "Subrange can have any one of count or upperBound", &N);
1186 auto *CBound = N.getRawCountNode();
1187 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1188 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1189 "Count must be signed constant or DIVariable or DIExpression", &N);
1190 auto Count = N.getCount();
1191 CheckDI(!Count || !isa<ConstantInt *>(Count) ||
1192 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1193 "invalid subrange count", &N);
1194 auto *LBound = N.getRawLowerBound();
1195 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1196 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1197 "LowerBound must be signed constant or DIVariable or DIExpression",
1198 &N);
1199 auto *UBound = N.getRawUpperBound();
1200 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1201 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1202 "UpperBound must be signed constant or DIVariable or DIExpression",
1203 &N);
1204 auto *Stride = N.getRawStride();
1205 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1206 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1207 "Stride must be signed constant or DIVariable or DIExpression", &N);
1208}
1209
1210void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1211 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1212 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1213 "GenericSubrange can have any one of count or upperBound", &N);
1214 auto *CBound = N.getRawCountNode();
1215 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1216 "Count must be signed constant or DIVariable or DIExpression", &N);
1217 auto *LBound = N.getRawLowerBound();
1218 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1219 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1220 "LowerBound must be signed constant or DIVariable or DIExpression",
1221 &N);
1222 auto *UBound = N.getRawUpperBound();
1223 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1224 "UpperBound must be signed constant or DIVariable or DIExpression",
1225 &N);
1226 auto *Stride = N.getRawStride();
1227 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1228 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1229 "Stride must be signed constant or DIVariable or DIExpression", &N);
1230}
1231
1232void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1233 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1234}
1235
1236void Verifier::visitDIBasicType(const DIBasicType &N) {
1237 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1238 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1239 N.getTag() == dwarf::DW_TAG_string_type,
1240 "invalid tag", &N);
1241 // Basic types currently only support constant size.
1242 auto *Size = N.getRawSizeInBits();
1243 CheckDI(!Size || isa<ConstantAsMetadata>(Size),
1244 "SizeInBits must be a constant");
1245}
1246
1247void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1248 visitDIBasicType(N);
1249
1250 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1251 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1252 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1253 "invalid encoding", &N);
1257 "invalid kind", &N);
1259 N.getFactorRaw() == 0,
1260 "factor should be 0 for rationals", &N);
1262 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1263 "numerator and denominator should be 0 for non-rationals", &N);
1264}
1265
1266void Verifier::visitDIStringType(const DIStringType &N) {
1267 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1268 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1269 &N);
1270}
1271
1272void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1273 // Common scope checks.
1274 visitDIScope(N);
1275
1276 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1277 N.getTag() == dwarf::DW_TAG_pointer_type ||
1278 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1279 N.getTag() == dwarf::DW_TAG_reference_type ||
1280 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1281 N.getTag() == dwarf::DW_TAG_const_type ||
1282 N.getTag() == dwarf::DW_TAG_immutable_type ||
1283 N.getTag() == dwarf::DW_TAG_volatile_type ||
1284 N.getTag() == dwarf::DW_TAG_restrict_type ||
1285 N.getTag() == dwarf::DW_TAG_atomic_type ||
1286 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1287 N.getTag() == dwarf::DW_TAG_member ||
1288 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1289 N.getTag() == dwarf::DW_TAG_inheritance ||
1290 N.getTag() == dwarf::DW_TAG_friend ||
1291 N.getTag() == dwarf::DW_TAG_set_type ||
1292 N.getTag() == dwarf::DW_TAG_template_alias,
1293 "invalid tag", &N);
1294 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1295 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1296 N.getRawExtraData());
1297 }
1298
1299 if (N.getTag() == dwarf::DW_TAG_set_type) {
1300 if (auto *T = N.getRawBaseType()) {
1301 auto *Enum = dyn_cast_or_null<DICompositeType>(T);
1302 auto *Subrange = dyn_cast_or_null<DISubrangeType>(T);
1303 auto *Basic = dyn_cast_or_null<DIBasicType>(T);
1304 CheckDI(
1305 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1306 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1307 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1308 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1309 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1310 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1311 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1312 "invalid set base type", &N, T);
1313 }
1314 }
1315
1316 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1317 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1318 N.getRawBaseType());
1319
1320 if (N.getDWARFAddressSpace()) {
1321 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1322 N.getTag() == dwarf::DW_TAG_reference_type ||
1323 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1324 "DWARF address space only applies to pointer or reference types",
1325 &N);
1326 }
1327
1328 auto *Size = N.getRawSizeInBits();
1329 CheckDI(!Size || isa<ConstantAsMetadata>(Size) || isa<DIVariable>(Size) ||
1330 isa<DIExpression>(Size),
1331 "SizeInBits must be a constant or DIVariable or DIExpression");
1332}
1333
1334/// Detect mutually exclusive flags.
1335static bool hasConflictingReferenceFlags(unsigned Flags) {
1336 return ((Flags & DINode::FlagLValueReference) &&
1337 (Flags & DINode::FlagRValueReference)) ||
1338 ((Flags & DINode::FlagTypePassByValue) &&
1339 (Flags & DINode::FlagTypePassByReference));
1340}
1341
1342void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1343 auto *Params = dyn_cast<MDTuple>(&RawParams);
1344 CheckDI(Params, "invalid template params", &N, &RawParams);
1345 for (Metadata *Op : Params->operands()) {
1346 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1347 &N, Params, Op);
1348 }
1349}
1350
1351void Verifier::visitDICompositeType(const DICompositeType &N) {
1352 // Common scope checks.
1353 visitDIScope(N);
1354
1355 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1356 N.getTag() == dwarf::DW_TAG_structure_type ||
1357 N.getTag() == dwarf::DW_TAG_union_type ||
1358 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1359 N.getTag() == dwarf::DW_TAG_class_type ||
1360 N.getTag() == dwarf::DW_TAG_variant_part ||
1361 N.getTag() == dwarf::DW_TAG_variant ||
1362 N.getTag() == dwarf::DW_TAG_namelist,
1363 "invalid tag", &N);
1364
1365 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1366 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1367 N.getRawBaseType());
1368
1369 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1370 "invalid composite elements", &N, N.getRawElements());
1371 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1372 N.getRawVTableHolder());
1374 "invalid reference flags", &N);
1375 unsigned DIBlockByRefStruct = 1 << 4;
1376 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1377 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1378 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1379 "DISubprogram contains null entry in `elements` field", &N);
1380
1381 if (N.isVector()) {
1382 const DINodeArray Elements = N.getElements();
1383 CheckDI(Elements.size() == 1 &&
1384 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1385 "invalid vector, expected one element of type subrange", &N);
1386 }
1387
1388 if (auto *Params = N.getRawTemplateParams())
1389 visitTemplateParams(N, *Params);
1390
1391 if (auto *D = N.getRawDiscriminator()) {
1392 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1393 "discriminator can only appear on variant part");
1394 }
1395
1396 if (N.getRawDataLocation()) {
1397 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1398 "dataLocation can only appear in array type");
1399 }
1400
1401 if (N.getRawAssociated()) {
1402 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1403 "associated can only appear in array type");
1404 }
1405
1406 if (N.getRawAllocated()) {
1407 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1408 "allocated can only appear in array type");
1409 }
1410
1411 if (N.getRawRank()) {
1412 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1413 "rank can only appear in array type");
1414 }
1415
1416 if (N.getTag() == dwarf::DW_TAG_array_type) {
1417 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1418 }
1419
1420 auto *Size = N.getRawSizeInBits();
1421 CheckDI(!Size || isa<ConstantAsMetadata>(Size) || isa<DIVariable>(Size) ||
1422 isa<DIExpression>(Size),
1423 "SizeInBits must be a constant or DIVariable or DIExpression");
1424}
1425
1426void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1427 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1428 if (auto *Types = N.getRawTypeArray()) {
1429 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1430 for (Metadata *Ty : N.getTypeArray()->operands()) {
1431 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1432 }
1433 }
1435 "invalid reference flags", &N);
1436}
1437
1438void Verifier::visitDIFile(const DIFile &N) {
1439 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1440 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1441 if (Checksum) {
1442 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1443 "invalid checksum kind", &N);
1444 size_t Size;
1445 switch (Checksum->Kind) {
1446 case DIFile::CSK_MD5:
1447 Size = 32;
1448 break;
1449 case DIFile::CSK_SHA1:
1450 Size = 40;
1451 break;
1452 case DIFile::CSK_SHA256:
1453 Size = 64;
1454 break;
1455 }
1456 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1457 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1458 "invalid checksum", &N);
1459 }
1460}
1461
1462void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1463 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1464 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1465
1466 // Don't bother verifying the compilation directory or producer string
1467 // as those could be empty.
1468 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1469 N.getRawFile());
1470 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1471 N.getFile());
1472
1473 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1474 "invalid emission kind", &N);
1475
1476 if (auto *Array = N.getRawEnumTypes()) {
1477 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1478 for (Metadata *Op : N.getEnumTypes()->operands()) {
1479 auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1480 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1481 "invalid enum type", &N, N.getEnumTypes(), Op);
1482 }
1483 }
1484 if (auto *Array = N.getRawRetainedTypes()) {
1485 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1486 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1487 CheckDI(
1488 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1489 !cast<DISubprogram>(Op)->isDefinition())),
1490 "invalid retained type", &N, Op);
1491 }
1492 }
1493 if (auto *Array = N.getRawGlobalVariables()) {
1494 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1495 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1496 CheckDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1497 "invalid global variable ref", &N, Op);
1498 }
1499 }
1500 if (auto *Array = N.getRawImportedEntities()) {
1501 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1502 for (Metadata *Op : N.getImportedEntities()->operands()) {
1503 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1504 &N, Op);
1505 }
1506 }
1507 if (auto *Array = N.getRawMacros()) {
1508 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1509 for (Metadata *Op : N.getMacros()->operands()) {
1510 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1511 }
1512 }
1513 CUVisited.insert(&N);
1514}
1515
1516void Verifier::visitDISubprogram(const DISubprogram &N) {
1517 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1518 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1519 if (auto *F = N.getRawFile())
1520 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1521 else
1522 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1523 if (auto *T = N.getRawType())
1524 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1525 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1526 N.getRawContainingType());
1527 if (auto *Params = N.getRawTemplateParams())
1528 visitTemplateParams(N, *Params);
1529 if (auto *S = N.getRawDeclaration())
1530 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1531 "invalid subprogram declaration", &N, S);
1532 if (auto *RawNode = N.getRawRetainedNodes()) {
1533 auto *Node = dyn_cast<MDTuple>(RawNode);
1534 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1535 for (Metadata *Op : Node->operands()) {
1536 CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op) ||
1537 isa<DIImportedEntity>(Op)),
1538 "invalid retained nodes, expected DILocalVariable, DILabel or "
1539 "DIImportedEntity",
1540 &N, Node, Op);
1541 }
1542 }
1544 "invalid reference flags", &N);
1545
1546 auto *Unit = N.getRawUnit();
1547 if (N.isDefinition()) {
1548 // Subprogram definitions (not part of the type hierarchy).
1549 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1550 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1551 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1552 // There's no good way to cross the CU boundary to insert a nested
1553 // DISubprogram definition in one CU into a type defined in another CU.
1554 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1555 if (CT && CT->getRawIdentifier() &&
1556 M.getContext().isODRUniquingDebugTypes())
1557 CheckDI(N.getDeclaration(),
1558 "definition subprograms cannot be nested within DICompositeType "
1559 "when enabling ODR",
1560 &N);
1561 } else {
1562 // Subprogram declarations (part of the type hierarchy).
1563 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1564 CheckDI(!N.getRawDeclaration(),
1565 "subprogram declaration must not have a declaration field");
1566 }
1567
1568 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1569 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1570 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1571 for (Metadata *Op : ThrownTypes->operands())
1572 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1573 Op);
1574 }
1575
1576 if (N.areAllCallsDescribed())
1577 CheckDI(N.isDefinition(),
1578 "DIFlagAllCallsDescribed must be attached to a definition");
1579}
1580
1581void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1582 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1583 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1584 "invalid local scope", &N, N.getRawScope());
1585 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1586 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1587}
1588
1589void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1590 visitDILexicalBlockBase(N);
1591
1592 CheckDI(N.getLine() || !N.getColumn(),
1593 "cannot have column info without line info", &N);
1594}
1595
1596void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1597 visitDILexicalBlockBase(N);
1598}
1599
1600void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1601 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1602 if (auto *S = N.getRawScope())
1603 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1604 if (auto *S = N.getRawDecl())
1605 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1606}
1607
1608void Verifier::visitDINamespace(const DINamespace &N) {
1609 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1610 if (auto *S = N.getRawScope())
1611 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1612}
1613
1614void Verifier::visitDIMacro(const DIMacro &N) {
1615 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1616 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1617 "invalid macinfo type", &N);
1618 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1619 if (!N.getValue().empty()) {
1620 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1621 }
1622}
1623
1624void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1625 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1626 "invalid macinfo type", &N);
1627 if (auto *F = N.getRawFile())
1628 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1629
1630 if (auto *Array = N.getRawElements()) {
1631 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1632 for (Metadata *Op : N.getElements()->operands()) {
1633 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1634 }
1635 }
1636}
1637
1638void Verifier::visitDIModule(const DIModule &N) {
1639 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1640 CheckDI(!N.getName().empty(), "anonymous module", &N);
1641}
1642
1643void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1644 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1645}
1646
1647void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1648 visitDITemplateParameter(N);
1649
1650 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1651 &N);
1652}
1653
1654void Verifier::visitDITemplateValueParameter(
1655 const DITemplateValueParameter &N) {
1656 visitDITemplateParameter(N);
1657
1658 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1659 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1660 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1661 "invalid tag", &N);
1662}
1663
1664void Verifier::visitDIVariable(const DIVariable &N) {
1665 if (auto *S = N.getRawScope())
1666 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1667 if (auto *F = N.getRawFile())
1668 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1669}
1670
1671void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1672 // Checks common to all variables.
1673 visitDIVariable(N);
1674
1675 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1676 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1677 // Check only if the global variable is not an extern
1678 if (N.isDefinition())
1679 CheckDI(N.getType(), "missing global variable type", &N);
1680 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1681 CheckDI(isa<DIDerivedType>(Member),
1682 "invalid static data member declaration", &N, Member);
1683 }
1684}
1685
1686void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1687 // Checks common to all variables.
1688 visitDIVariable(N);
1689
1690 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1691 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1692 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1693 "local variable requires a valid scope", &N, N.getRawScope());
1694 if (auto Ty = N.getType())
1695 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1696}
1697
1698void Verifier::visitDIAssignID(const DIAssignID &N) {
1699 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1700 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1701}
1702
1703void Verifier::visitDILabel(const DILabel &N) {
1704 if (auto *S = N.getRawScope())
1705 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1706 if (auto *F = N.getRawFile())
1707 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1708
1709 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1710 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1711 "label requires a valid scope", &N, N.getRawScope());
1712}
1713
1714void Verifier::visitDIExpression(const DIExpression &N) {
1715 CheckDI(N.isValid(), "invalid expression", &N);
1716}
1717
1718void Verifier::visitDIGlobalVariableExpression(
1719 const DIGlobalVariableExpression &GVE) {
1720 CheckDI(GVE.getVariable(), "missing variable");
1721 if (auto *Var = GVE.getVariable())
1722 visitDIGlobalVariable(*Var);
1723 if (auto *Expr = GVE.getExpression()) {
1724 visitDIExpression(*Expr);
1725 if (auto Fragment = Expr->getFragmentInfo())
1726 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1727 }
1728}
1729
1730void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1731 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1732 if (auto *T = N.getRawType())
1733 CheckDI(isType(T), "invalid type ref", &N, T);
1734 if (auto *F = N.getRawFile())
1735 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1736}
1737
1738void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1739 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1740 N.getTag() == dwarf::DW_TAG_imported_declaration,
1741 "invalid tag", &N);
1742 if (auto *S = N.getRawScope())
1743 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1744 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1745 N.getRawEntity());
1746}
1747
1748void Verifier::visitComdat(const Comdat &C) {
1749 // In COFF the Module is invalid if the GlobalValue has private linkage.
1750 // Entities with private linkage don't have entries in the symbol table.
1751 if (TT.isOSBinFormatCOFF())
1752 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1753 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1754 GV);
1755}
1756
1757void Verifier::visitModuleIdents() {
1758 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1759 if (!Idents)
1760 return;
1761
1762 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1763 // Scan each llvm.ident entry and make sure that this requirement is met.
1764 for (const MDNode *N : Idents->operands()) {
1765 Check(N->getNumOperands() == 1,
1766 "incorrect number of operands in llvm.ident metadata", N);
1767 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1768 ("invalid value for llvm.ident metadata entry operand"
1769 "(the operand should be a string)"),
1770 N->getOperand(0));
1771 }
1772}
1773
1774void Verifier::visitModuleCommandLines() {
1775 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1776 if (!CommandLines)
1777 return;
1778
1779 // llvm.commandline takes a list of metadata entry. Each entry has only one
1780 // string. Scan each llvm.commandline entry and make sure that this
1781 // requirement is met.
1782 for (const MDNode *N : CommandLines->operands()) {
1783 Check(N->getNumOperands() == 1,
1784 "incorrect number of operands in llvm.commandline metadata", N);
1785 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1786 ("invalid value for llvm.commandline metadata entry operand"
1787 "(the operand should be a string)"),
1788 N->getOperand(0));
1789 }
1790}
1791
1792void Verifier::visitModuleFlags() {
1793 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1794 if (!Flags) return;
1795
1796 // Scan each flag, and track the flags and requirements.
1798 SmallVector<const MDNode*, 16> Requirements;
1799 uint64_t PAuthABIPlatform = -1;
1800 uint64_t PAuthABIVersion = -1;
1801 for (const MDNode *MDN : Flags->operands()) {
1802 visitModuleFlag(MDN, SeenIDs, Requirements);
1803 if (MDN->getNumOperands() != 3)
1804 continue;
1805 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1806 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1807 if (const auto *PAP =
1808 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1809 PAuthABIPlatform = PAP->getZExtValue();
1810 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1811 if (const auto *PAV =
1812 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1813 PAuthABIVersion = PAV->getZExtValue();
1814 }
1815 }
1816 }
1817
1818 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1819 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1820 "'aarch64-elf-pauthabi-version' module flags must be present");
1821
1822 // Validate that the requirements in the module are valid.
1823 for (const MDNode *Requirement : Requirements) {
1824 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1825 const Metadata *ReqValue = Requirement->getOperand(1);
1826
1827 const MDNode *Op = SeenIDs.lookup(Flag);
1828 if (!Op) {
1829 CheckFailed("invalid requirement on flag, flag is not present in module",
1830 Flag);
1831 continue;
1832 }
1833
1834 if (Op->getOperand(2) != ReqValue) {
1835 CheckFailed(("invalid requirement on flag, "
1836 "flag does not have the required value"),
1837 Flag);
1838 continue;
1839 }
1840 }
1841}
1842
1843void
1844Verifier::visitModuleFlag(const MDNode *Op,
1846 SmallVectorImpl<const MDNode *> &Requirements) {
1847 // Each module flag should have three arguments, the merge behavior (a
1848 // constant int), the flag ID (an MDString), and the value.
1849 Check(Op->getNumOperands() == 3,
1850 "incorrect number of operands in module flag", Op);
1852 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1853 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1854 "invalid behavior operand in module flag (expected constant integer)",
1855 Op->getOperand(0));
1856 Check(false,
1857 "invalid behavior operand in module flag (unexpected constant)",
1858 Op->getOperand(0));
1859 }
1860 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1861 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1862 Op->getOperand(1));
1863
1864 // Check the values for behaviors with additional requirements.
1865 switch (MFB) {
1866 case Module::Error:
1867 case Module::Warning:
1868 case Module::Override:
1869 // These behavior types accept any value.
1870 break;
1871
1872 case Module::Min: {
1873 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1874 Check(V && V->getValue().isNonNegative(),
1875 "invalid value for 'min' module flag (expected constant non-negative "
1876 "integer)",
1877 Op->getOperand(2));
1878 break;
1879 }
1880
1881 case Module::Max: {
1882 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1883 "invalid value for 'max' module flag (expected constant integer)",
1884 Op->getOperand(2));
1885 break;
1886 }
1887
1888 case Module::Require: {
1889 // The value should itself be an MDNode with two operands, a flag ID (an
1890 // MDString), and a value.
1891 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1892 Check(Value && Value->getNumOperands() == 2,
1893 "invalid value for 'require' module flag (expected metadata pair)",
1894 Op->getOperand(2));
1895 Check(isa<MDString>(Value->getOperand(0)),
1896 ("invalid value for 'require' module flag "
1897 "(first value operand should be a string)"),
1898 Value->getOperand(0));
1899
1900 // Append it to the list of requirements, to check once all module flags are
1901 // scanned.
1902 Requirements.push_back(Value);
1903 break;
1904 }
1905
1906 case Module::Append:
1907 case Module::AppendUnique: {
1908 // These behavior types require the operand be an MDNode.
1909 Check(isa<MDNode>(Op->getOperand(2)),
1910 "invalid value for 'append'-type module flag "
1911 "(expected a metadata node)",
1912 Op->getOperand(2));
1913 break;
1914 }
1915 }
1916
1917 // Unless this is a "requires" flag, check the ID is unique.
1918 if (MFB != Module::Require) {
1919 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1920 Check(Inserted,
1921 "module flag identifiers must be unique (or of 'require' type)", ID);
1922 }
1923
1924 if (ID->getString() == "wchar_size") {
1926 = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1927 Check(Value, "wchar_size metadata requires constant integer argument");
1928 }
1929
1930 if (ID->getString() == "Linker Options") {
1931 // If the llvm.linker.options named metadata exists, we assume that the
1932 // bitcode reader has upgraded the module flag. Otherwise the flag might
1933 // have been created by a client directly.
1934 Check(M.getNamedMetadata("llvm.linker.options"),
1935 "'Linker Options' named metadata no longer supported");
1936 }
1937
1938 if (ID->getString() == "SemanticInterposition") {
1940 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1941 Check(Value,
1942 "SemanticInterposition metadata requires constant integer argument");
1943 }
1944
1945 if (ID->getString() == "CG Profile") {
1946 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1947 visitModuleFlagCGProfileEntry(MDO);
1948 }
1949}
1950
1951void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1952 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1953 if (!FuncMDO)
1954 return;
1955 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1956 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1957 "expected a Function or null", FuncMDO);
1958 };
1959 auto Node = dyn_cast_or_null<MDNode>(MDO);
1960 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1961 CheckFunction(Node->getOperand(0));
1962 CheckFunction(Node->getOperand(1));
1963 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1964 Check(Count && Count->getType()->isIntegerTy(),
1965 "expected an integer constant", Node->getOperand(2));
1966}
1967
1968void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
1969 for (Attribute A : Attrs) {
1970
1971 if (A.isStringAttribute()) {
1972#define GET_ATTR_NAMES
1973#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
1974#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
1975 if (A.getKindAsString() == #DISPLAY_NAME) { \
1976 auto V = A.getValueAsString(); \
1977 if (!(V.empty() || V == "true" || V == "false")) \
1978 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
1979 ""); \
1980 }
1981
1982#include "llvm/IR/Attributes.inc"
1983 continue;
1984 }
1985
1986 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
1987 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
1988 V);
1989 return;
1990 }
1991 }
1992}
1993
1994// VerifyParameterAttrs - Check the given attributes for an argument or return
1995// value of the specified type. The value V is printed in error messages.
1996void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1997 const Value *V) {
1998 if (!Attrs.hasAttributes())
1999 return;
2000
2001 verifyAttributeTypes(Attrs, V);
2002
2003 for (Attribute Attr : Attrs)
2004 Check(Attr.isStringAttribute() ||
2005 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2006 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2007 V);
2008
2009 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2010 unsigned AttrCount =
2011 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2012 Check(AttrCount == 1,
2013 "Attribute 'immarg' is incompatible with other attributes except the "
2014 "'range' attribute",
2015 V);
2016 }
2017
2018 // Check for mutually incompatible attributes. Only inreg is compatible with
2019 // sret.
2020 unsigned AttrCount = 0;
2021 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2022 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2023 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2024 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2025 Attrs.hasAttribute(Attribute::InReg);
2026 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2027 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2028 Check(AttrCount <= 1,
2029 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2030 "'byref', and 'sret' are incompatible!",
2031 V);
2032
2033 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2034 Attrs.hasAttribute(Attribute::ReadOnly)),
2035 "Attributes "
2036 "'inalloca and readonly' are incompatible!",
2037 V);
2038
2039 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2040 Attrs.hasAttribute(Attribute::Returned)),
2041 "Attributes "
2042 "'sret and returned' are incompatible!",
2043 V);
2044
2045 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2046 Attrs.hasAttribute(Attribute::SExt)),
2047 "Attributes "
2048 "'zeroext and signext' are incompatible!",
2049 V);
2050
2051 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2052 Attrs.hasAttribute(Attribute::ReadOnly)),
2053 "Attributes "
2054 "'readnone and readonly' are incompatible!",
2055 V);
2056
2057 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2058 Attrs.hasAttribute(Attribute::WriteOnly)),
2059 "Attributes "
2060 "'readnone and writeonly' are incompatible!",
2061 V);
2062
2063 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2064 Attrs.hasAttribute(Attribute::WriteOnly)),
2065 "Attributes "
2066 "'readonly and writeonly' are incompatible!",
2067 V);
2068
2069 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2070 Attrs.hasAttribute(Attribute::AlwaysInline)),
2071 "Attributes "
2072 "'noinline and alwaysinline' are incompatible!",
2073 V);
2074
2075 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2076 Attrs.hasAttribute(Attribute::ReadNone)),
2077 "Attributes writable and readnone are incompatible!", V);
2078
2079 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2080 Attrs.hasAttribute(Attribute::ReadOnly)),
2081 "Attributes writable and readonly are incompatible!", V);
2082
2083 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2084 for (Attribute Attr : Attrs) {
2085 if (!Attr.isStringAttribute() &&
2086 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2087 CheckFailed("Attribute '" + Attr.getAsString() +
2088 "' applied to incompatible type!", V);
2089 return;
2090 }
2091 }
2092
2093 if (isa<PointerType>(Ty)) {
2094 if (Attrs.hasAttribute(Attribute::Alignment)) {
2095 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2096 Check(AttrAlign.value() <= Value::MaximumAlignment,
2097 "huge alignment values are unsupported", V);
2098 }
2099 if (Attrs.hasAttribute(Attribute::ByVal)) {
2100 Type *ByValTy = Attrs.getByValType();
2101 SmallPtrSet<Type *, 4> Visited;
2102 Check(ByValTy->isSized(&Visited),
2103 "Attribute 'byval' does not support unsized types!", V);
2104 // Check if it is or contains a target extension type that disallows being
2105 // used on the stack.
2107 "'byval' argument has illegal target extension type", V);
2108 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2109 "huge 'byval' arguments are unsupported", V);
2110 }
2111 if (Attrs.hasAttribute(Attribute::ByRef)) {
2112 SmallPtrSet<Type *, 4> Visited;
2113 Check(Attrs.getByRefType()->isSized(&Visited),
2114 "Attribute 'byref' does not support unsized types!", V);
2115 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2116 (1ULL << 32),
2117 "huge 'byref' arguments are unsupported", V);
2118 }
2119 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2120 SmallPtrSet<Type *, 4> Visited;
2121 Check(Attrs.getInAllocaType()->isSized(&Visited),
2122 "Attribute 'inalloca' does not support unsized types!", V);
2123 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2124 (1ULL << 32),
2125 "huge 'inalloca' arguments are unsupported", V);
2126 }
2127 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2128 SmallPtrSet<Type *, 4> Visited;
2129 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2130 "Attribute 'preallocated' does not support unsized types!", V);
2131 Check(
2132 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2133 (1ULL << 32),
2134 "huge 'preallocated' arguments are unsupported", V);
2135 }
2136 }
2137
2138 if (Attrs.hasAttribute(Attribute::Initializes)) {
2139 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2140 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2141 V);
2143 "Attribute 'initializes' does not support unordered ranges", V);
2144 }
2145
2146 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2147 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2148 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2149 V);
2150 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2151 "Invalid value for 'nofpclass' test mask", V);
2152 }
2153 if (Attrs.hasAttribute(Attribute::Range)) {
2154 const ConstantRange &CR =
2155 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2157 "Range bit width must match type bit width!", V);
2158 }
2159}
2160
2161void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2162 const Value *V) {
2163 if (Attrs.hasFnAttr(Attr)) {
2164 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2165 unsigned N;
2166 if (S.getAsInteger(10, N))
2167 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2168 }
2169}
2170
2171// Check parameter attributes against a function type.
2172// The value V is printed in error messages.
2173void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2174 const Value *V, bool IsIntrinsic,
2175 bool IsInlineAsm) {
2176 if (Attrs.isEmpty())
2177 return;
2178
2179 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2180 Check(Attrs.hasParentContext(Context),
2181 "Attribute list does not match Module context!", &Attrs, V);
2182 for (const auto &AttrSet : Attrs) {
2183 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2184 "Attribute set does not match Module context!", &AttrSet, V);
2185 for (const auto &A : AttrSet) {
2186 Check(A.hasParentContext(Context),
2187 "Attribute does not match Module context!", &A, V);
2188 }
2189 }
2190 }
2191
2192 bool SawNest = false;
2193 bool SawReturned = false;
2194 bool SawSRet = false;
2195 bool SawSwiftSelf = false;
2196 bool SawSwiftAsync = false;
2197 bool SawSwiftError = false;
2198
2199 // Verify return value attributes.
2200 AttributeSet RetAttrs = Attrs.getRetAttrs();
2201 for (Attribute RetAttr : RetAttrs)
2202 Check(RetAttr.isStringAttribute() ||
2203 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2204 "Attribute '" + RetAttr.getAsString() +
2205 "' does not apply to function return values",
2206 V);
2207
2208 unsigned MaxParameterWidth = 0;
2209 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2210 if (Ty->isVectorTy()) {
2211 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2212 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2213 if (Size > MaxParameterWidth)
2214 MaxParameterWidth = Size;
2215 }
2216 }
2217 };
2218 GetMaxParameterWidth(FT->getReturnType());
2219 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2220
2221 // Verify parameter attributes.
2222 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2223 Type *Ty = FT->getParamType(i);
2224 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2225
2226 if (!IsIntrinsic) {
2227 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2228 "immarg attribute only applies to intrinsics", V);
2229 if (!IsInlineAsm)
2230 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2231 "Attribute 'elementtype' can only be applied to intrinsics"
2232 " and inline asm.",
2233 V);
2234 }
2235
2236 verifyParameterAttrs(ArgAttrs, Ty, V);
2237 GetMaxParameterWidth(Ty);
2238
2239 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2240 Check(!SawNest, "More than one parameter has attribute nest!", V);
2241 SawNest = true;
2242 }
2243
2244 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2245 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2246 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2247 "Incompatible argument and return types for 'returned' attribute",
2248 V);
2249 SawReturned = true;
2250 }
2251
2252 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2253 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2254 Check(i == 0 || i == 1,
2255 "Attribute 'sret' is not on first or second parameter!", V);
2256 SawSRet = true;
2257 }
2258
2259 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2260 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2261 SawSwiftSelf = true;
2262 }
2263
2264 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2265 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2266 SawSwiftAsync = true;
2267 }
2268
2269 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2270 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2271 SawSwiftError = true;
2272 }
2273
2274 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2275 Check(i == FT->getNumParams() - 1,
2276 "inalloca isn't on the last parameter!", V);
2277 }
2278 }
2279
2280 if (!Attrs.hasFnAttrs())
2281 return;
2282
2283 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2284 for (Attribute FnAttr : Attrs.getFnAttrs())
2285 Check(FnAttr.isStringAttribute() ||
2286 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2287 "Attribute '" + FnAttr.getAsString() +
2288 "' does not apply to functions!",
2289 V);
2290
2291 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2292 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2293 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2294
2295 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2296 Check(Attrs.hasFnAttr(Attribute::NoInline),
2297 "Attribute 'optnone' requires 'noinline'!", V);
2298
2299 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2300 "Attributes 'optsize and optnone' are incompatible!", V);
2301
2302 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2303 "Attributes 'minsize and optnone' are incompatible!", V);
2304
2305 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2306 "Attributes 'optdebug and optnone' are incompatible!", V);
2307 }
2308
2309 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2310 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2311 "Attributes "
2312 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2313 V);
2314
2315 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2316 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2317 "Attributes 'optsize and optdebug' are incompatible!", V);
2318
2319 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2320 "Attributes 'minsize and optdebug' are incompatible!", V);
2321 }
2322
2323 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2324 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2325 "Attribute writable and memory without argmem: write are incompatible!",
2326 V);
2327
2328 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2329 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2330 "Attributes 'aarch64_pstate_sm_enabled and "
2331 "aarch64_pstate_sm_compatible' are incompatible!",
2332 V);
2333 }
2334
2335 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2336 Attrs.hasFnAttr("aarch64_inout_za") +
2337 Attrs.hasFnAttr("aarch64_out_za") +
2338 Attrs.hasFnAttr("aarch64_preserves_za") +
2339 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2340 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2341 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2342 "'aarch64_za_state_agnostic' are mutually exclusive",
2343 V);
2344
2345 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2346 Attrs.hasFnAttr("aarch64_in_zt0") +
2347 Attrs.hasFnAttr("aarch64_inout_zt0") +
2348 Attrs.hasFnAttr("aarch64_out_zt0") +
2349 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2350 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2351 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2352 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2353 "'aarch64_za_state_agnostic' are mutually exclusive",
2354 V);
2355
2356 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2357 const GlobalValue *GV = cast<GlobalValue>(V);
2359 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2360 }
2361
2362 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2363 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2364 if (ParamNo >= FT->getNumParams()) {
2365 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2366 return false;
2367 }
2368
2369 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2370 CheckFailed("'allocsize' " + Name +
2371 " argument must refer to an integer parameter",
2372 V);
2373 return false;
2374 }
2375
2376 return true;
2377 };
2378
2379 if (!CheckParam("element size", Args->first))
2380 return;
2381
2382 if (Args->second && !CheckParam("number of elements", *Args->second))
2383 return;
2384 }
2385
2386 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2387 AllocFnKind K = Attrs.getAllocKind();
2390 if (!is_contained(
2392 Type))
2393 CheckFailed(
2394 "'allockind()' requires exactly one of alloc, realloc, and free");
2395 if ((Type == AllocFnKind::Free) &&
2398 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2399 "or aligned modifiers.");
2401 if ((K & ZeroedUninit) == ZeroedUninit)
2402 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2403 }
2404
2405 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2406 StringRef S = A.getValueAsString();
2407 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2408 Function *Variant = M.getFunction(S);
2409 if (Variant) {
2410 Attribute Family = Attrs.getFnAttr("alloc-family");
2411 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2412 if (Family.isValid())
2413 Check(VariantFamily.isValid() &&
2414 VariantFamily.getValueAsString() == Family.getValueAsString(),
2415 "'alloc-variant-zeroed' must name a function belonging to the "
2416 "same 'alloc-family'");
2417
2418 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2419 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2421 "'alloc-variant-zeroed' must name a function with "
2422 "'allockind(\"zeroed\")'");
2423
2424 Check(FT == Variant->getFunctionType(),
2425 "'alloc-variant-zeroed' must name a function with the same "
2426 "signature");
2427 }
2428 }
2429
2430 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2431 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2432 if (VScaleMin == 0)
2433 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2434 else if (!isPowerOf2_32(VScaleMin))
2435 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2436 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2437 if (VScaleMax && VScaleMin > VScaleMax)
2438 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2439 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2440 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2441 }
2442
2443 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2444 StringRef FP = FPAttr.getValueAsString();
2445 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved")
2446 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2447 }
2448
2449 // Check EVEX512 feature.
2450 if (TT.isX86() && MaxParameterWidth >= 512) {
2451 Attribute TargetFeaturesAttr = Attrs.getFnAttr("target-features");
2452 if (TargetFeaturesAttr.isValid()) {
2453 StringRef TF = TargetFeaturesAttr.getValueAsString();
2454 Check(!TF.contains("+avx512f") || !TF.contains("-evex512"),
2455 "512-bit vector arguments require 'evex512' for AVX512", V);
2456 }
2457 }
2458
2459 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2460 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2461 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2462 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2463 .getValueAsString()
2464 .empty(),
2465 "\"patchable-function-entry-section\" must not be empty");
2466 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2467
2468 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2469 StringRef S = A.getValueAsString();
2470 if (S != "none" && S != "all" && S != "non-leaf")
2471 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2472 }
2473
2474 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2475 StringRef S = A.getValueAsString();
2476 if (S != "a_key" && S != "b_key")
2477 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2478 V);
2479 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2480 CheckFailed(
2481 "'sign-return-address-key' present without `sign-return-address`");
2482 }
2483 }
2484
2485 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2486 StringRef S = A.getValueAsString();
2487 if (S != "" && S != "true" && S != "false")
2488 CheckFailed(
2489 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2490 }
2491
2492 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2493 StringRef S = A.getValueAsString();
2494 if (S != "" && S != "true" && S != "false")
2495 CheckFailed(
2496 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2497 }
2498
2499 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2500 StringRef S = A.getValueAsString();
2501 if (S != "" && S != "true" && S != "false")
2502 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2503 V);
2504 }
2505
2506 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2507 StringRef S = A.getValueAsString();
2508 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2509 if (!Info)
2510 CheckFailed("invalid name for a VFABI variant: " + S, V);
2511 }
2512
2513 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2514 StringRef S = A.getValueAsString();
2516 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2517 }
2518
2519 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2520 StringRef S = A.getValueAsString();
2522 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2523 V);
2524 }
2525}
2526
2527void Verifier::verifyFunctionMetadata(
2528 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2529 for (const auto &Pair : MDs) {
2530 if (Pair.first == LLVMContext::MD_prof) {
2531 MDNode *MD = Pair.second;
2533 CheckFailed("'unknown' !prof metadata should appear only on "
2534 "instructions supporting the 'branch_weights' metadata",
2535 MD);
2536 continue;
2537 }
2538 Check(MD->getNumOperands() >= 2,
2539 "!prof annotations should have no less than 2 operands", MD);
2540
2541 // Check first operand.
2542 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2543 MD);
2544 Check(isa<MDString>(MD->getOperand(0)),
2545 "expected string with name of the !prof annotation", MD);
2546 MDString *MDS = cast<MDString>(MD->getOperand(0));
2547 StringRef ProfName = MDS->getString();
2550 "first operand should be 'function_entry_count'"
2551 " or 'synthetic_function_entry_count'",
2552 MD);
2553
2554 // Check second operand.
2555 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2556 MD);
2557 Check(isa<ConstantAsMetadata>(MD->getOperand(1)),
2558 "expected integer argument to function_entry_count", MD);
2559 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2560 MDNode *MD = Pair.second;
2561 Check(MD->getNumOperands() == 1,
2562 "!kcfi_type must have exactly one operand", MD);
2563 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2564 MD);
2565 Check(isa<ConstantAsMetadata>(MD->getOperand(0)),
2566 "expected a constant operand for !kcfi_type", MD);
2567 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2568 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2569 "expected a constant integer operand for !kcfi_type", MD);
2570 Check(cast<ConstantInt>(C)->getBitWidth() == 32,
2571 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2572 }
2573 }
2574}
2575
2576void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2577 if (!ConstantExprVisited.insert(EntryC).second)
2578 return;
2579
2581 Stack.push_back(EntryC);
2582
2583 while (!Stack.empty()) {
2584 const Constant *C = Stack.pop_back_val();
2585
2586 // Check this constant expression.
2587 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2588 visitConstantExpr(CE);
2589
2590 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2591 visitConstantPtrAuth(CPA);
2592
2593 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2594 // Global Values get visited separately, but we do need to make sure
2595 // that the global value is in the correct module
2596 Check(GV->getParent() == &M, "Referencing global in another module!",
2597 EntryC, &M, GV, GV->getParent());
2598 continue;
2599 }
2600
2601 // Visit all sub-expressions.
2602 for (const Use &U : C->operands()) {
2603 const auto *OpC = dyn_cast<Constant>(U);
2604 if (!OpC)
2605 continue;
2606 if (!ConstantExprVisited.insert(OpC).second)
2607 continue;
2608 Stack.push_back(OpC);
2609 }
2610 }
2611}
2612
2613void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2614 if (CE->getOpcode() == Instruction::BitCast)
2615 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2616 CE->getType()),
2617 "Invalid bitcast", CE);
2618 else if (CE->getOpcode() == Instruction::PtrToAddr)
2619 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2620}
2621
2622void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2623 Check(CPA->getPointer()->getType()->isPointerTy(),
2624 "signed ptrauth constant base pointer must have pointer type");
2625
2626 Check(CPA->getType() == CPA->getPointer()->getType(),
2627 "signed ptrauth constant must have same type as its base pointer");
2628
2629 Check(CPA->getKey()->getBitWidth() == 32,
2630 "signed ptrauth constant key must be i32 constant integer");
2631
2633 "signed ptrauth constant address discriminator must be a pointer");
2634
2635 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2636 "signed ptrauth constant discriminator must be i64 constant integer");
2637}
2638
2639bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2640 // There shouldn't be more attribute sets than there are parameters plus the
2641 // function and return value.
2642 return Attrs.getNumAttrSets() <= Params + 2;
2643}
2644
2645void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2646 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2647 unsigned ArgNo = 0;
2648 unsigned LabelNo = 0;
2649 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2650 if (CI.Type == InlineAsm::isLabel) {
2651 ++LabelNo;
2652 continue;
2653 }
2654
2655 // Only deal with constraints that correspond to call arguments.
2656 if (!CI.hasArg())
2657 continue;
2658
2659 if (CI.isIndirect) {
2660 const Value *Arg = Call.getArgOperand(ArgNo);
2661 Check(Arg->getType()->isPointerTy(),
2662 "Operand for indirect constraint must have pointer type", &Call);
2663
2664 Check(Call.getParamElementType(ArgNo),
2665 "Operand for indirect constraint must have elementtype attribute",
2666 &Call);
2667 } else {
2668 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2669 "Elementtype attribute can only be applied for indirect "
2670 "constraints",
2671 &Call);
2672 }
2673
2674 ArgNo++;
2675 }
2676
2677 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2678 Check(LabelNo == CallBr->getNumIndirectDests(),
2679 "Number of label constraints does not match number of callbr dests",
2680 &Call);
2681 } else {
2682 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2683 &Call);
2684 }
2685}
2686
2687/// Verify that statepoint intrinsic is well formed.
2688void Verifier::verifyStatepoint(const CallBase &Call) {
2689 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2690
2691 Check(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
2692 !Call.onlyAccessesArgMemory(),
2693 "gc.statepoint must read and write all memory to preserve "
2694 "reordering restrictions required by safepoint semantics",
2695 Call);
2696
2697 const int64_t NumPatchBytes =
2698 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2699 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2700 Check(NumPatchBytes >= 0,
2701 "gc.statepoint number of patchable bytes must be "
2702 "positive",
2703 Call);
2704
2705 Type *TargetElemType = Call.getParamElementType(2);
2706 Check(TargetElemType,
2707 "gc.statepoint callee argument must have elementtype attribute", Call);
2708 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2709 Check(TargetFuncType,
2710 "gc.statepoint callee elementtype must be function type", Call);
2711
2712 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2713 Check(NumCallArgs >= 0,
2714 "gc.statepoint number of arguments to underlying call "
2715 "must be positive",
2716 Call);
2717 const int NumParams = (int)TargetFuncType->getNumParams();
2718 if (TargetFuncType->isVarArg()) {
2719 Check(NumCallArgs >= NumParams,
2720 "gc.statepoint mismatch in number of vararg call args", Call);
2721
2722 // TODO: Remove this limitation
2723 Check(TargetFuncType->getReturnType()->isVoidTy(),
2724 "gc.statepoint doesn't support wrapping non-void "
2725 "vararg functions yet",
2726 Call);
2727 } else
2728 Check(NumCallArgs == NumParams,
2729 "gc.statepoint mismatch in number of call args", Call);
2730
2731 const uint64_t Flags
2732 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2733 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2734 "unknown flag used in gc.statepoint flags argument", Call);
2735
2736 // Verify that the types of the call parameter arguments match
2737 // the type of the wrapped callee.
2738 AttributeList Attrs = Call.getAttributes();
2739 for (int i = 0; i < NumParams; i++) {
2740 Type *ParamType = TargetFuncType->getParamType(i);
2741 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2742 Check(ArgType == ParamType,
2743 "gc.statepoint call argument does not match wrapped "
2744 "function type",
2745 Call);
2746
2747 if (TargetFuncType->isVarArg()) {
2748 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2749 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2750 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2751 }
2752 }
2753
2754 const int EndCallArgsInx = 4 + NumCallArgs;
2755
2756 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2757 Check(isa<ConstantInt>(NumTransitionArgsV),
2758 "gc.statepoint number of transition arguments "
2759 "must be constant integer",
2760 Call);
2761 const int NumTransitionArgs =
2762 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2763 Check(NumTransitionArgs == 0,
2764 "gc.statepoint w/inline transition bundle is deprecated", Call);
2765 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2766
2767 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2768 Check(isa<ConstantInt>(NumDeoptArgsV),
2769 "gc.statepoint number of deoptimization arguments "
2770 "must be constant integer",
2771 Call);
2772 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2773 Check(NumDeoptArgs == 0,
2774 "gc.statepoint w/inline deopt operands is deprecated", Call);
2775
2776 const int ExpectedNumArgs = 7 + NumCallArgs;
2777 Check(ExpectedNumArgs == (int)Call.arg_size(),
2778 "gc.statepoint too many arguments", Call);
2779
2780 // Check that the only uses of this gc.statepoint are gc.result or
2781 // gc.relocate calls which are tied to this statepoint and thus part
2782 // of the same statepoint sequence
2783 for (const User *U : Call.users()) {
2784 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2785 Check(UserCall, "illegal use of statepoint token", Call, U);
2786 if (!UserCall)
2787 continue;
2788 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2789 "gc.result or gc.relocate are the only value uses "
2790 "of a gc.statepoint",
2791 Call, U);
2792 if (isa<GCResultInst>(UserCall)) {
2793 Check(UserCall->getArgOperand(0) == &Call,
2794 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2795 } else if (isa<GCRelocateInst>(Call)) {
2796 Check(UserCall->getArgOperand(0) == &Call,
2797 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2798 }
2799 }
2800
2801 // Note: It is legal for a single derived pointer to be listed multiple
2802 // times. It's non-optimal, but it is legal. It can also happen after
2803 // insertion if we strip a bitcast away.
2804 // Note: It is really tempting to check that each base is relocated and
2805 // that a derived pointer is never reused as a base pointer. This turns
2806 // out to be problematic since optimizations run after safepoint insertion
2807 // can recognize equality properties that the insertion logic doesn't know
2808 // about. See example statepoint.ll in the verifier subdirectory
2809}
2810
2811void Verifier::verifyFrameRecoverIndices() {
2812 for (auto &Counts : FrameEscapeInfo) {
2813 Function *F = Counts.first;
2814 unsigned EscapedObjectCount = Counts.second.first;
2815 unsigned MaxRecoveredIndex = Counts.second.second;
2816 Check(MaxRecoveredIndex <= EscapedObjectCount,
2817 "all indices passed to llvm.localrecover must be less than the "
2818 "number of arguments passed to llvm.localescape in the parent "
2819 "function",
2820 F);
2821 }
2822}
2823
2824static Instruction *getSuccPad(Instruction *Terminator) {
2825 BasicBlock *UnwindDest;
2826 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2827 UnwindDest = II->getUnwindDest();
2828 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2829 UnwindDest = CSI->getUnwindDest();
2830 else
2831 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2832 return &*UnwindDest->getFirstNonPHIIt();
2833}
2834
2835void Verifier::verifySiblingFuncletUnwinds() {
2838 for (const auto &Pair : SiblingFuncletInfo) {
2839 Instruction *PredPad = Pair.first;
2840 if (Visited.count(PredPad))
2841 continue;
2842 Active.insert(PredPad);
2843 Instruction *Terminator = Pair.second;
2844 do {
2845 Instruction *SuccPad = getSuccPad(Terminator);
2846 if (Active.count(SuccPad)) {
2847 // Found a cycle; report error
2848 Instruction *CyclePad = SuccPad;
2850 do {
2851 CycleNodes.push_back(CyclePad);
2852 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2853 if (CycleTerminator != CyclePad)
2854 CycleNodes.push_back(CycleTerminator);
2855 CyclePad = getSuccPad(CycleTerminator);
2856 } while (CyclePad != SuccPad);
2857 Check(false, "EH pads can't handle each other's exceptions",
2858 ArrayRef<Instruction *>(CycleNodes));
2859 }
2860 // Don't re-walk a node we've already checked
2861 if (!Visited.insert(SuccPad).second)
2862 break;
2863 // Walk to this successor if it has a map entry.
2864 PredPad = SuccPad;
2865 auto TermI = SiblingFuncletInfo.find(PredPad);
2866 if (TermI == SiblingFuncletInfo.end())
2867 break;
2868 Terminator = TermI->second;
2869 Active.insert(PredPad);
2870 } while (true);
2871 // Each node only has one successor, so we've walked all the active
2872 // nodes' successors.
2873 Active.clear();
2874 }
2875}
2876
2877// visitFunction - Verify that a function is ok.
2878//
2879void Verifier::visitFunction(const Function &F) {
2880 visitGlobalValue(F);
2881
2882 // Check function arguments.
2883 FunctionType *FT = F.getFunctionType();
2884 unsigned NumArgs = F.arg_size();
2885
2886 Check(&Context == &F.getContext(),
2887 "Function context does not match Module context!", &F);
2888
2889 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2890 Check(FT->getNumParams() == NumArgs,
2891 "# formal arguments must match # of arguments for function type!", &F,
2892 FT);
2893 Check(F.getReturnType()->isFirstClassType() ||
2894 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2895 "Functions cannot return aggregate values!", &F);
2896
2897 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2898 "Invalid struct return type!", &F);
2899
2900 if (MaybeAlign A = F.getAlign()) {
2901 Check(A->value() <= Value::MaximumAlignment,
2902 "huge alignment values are unsupported", &F);
2903 }
2904
2905 AttributeList Attrs = F.getAttributes();
2906
2907 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2908 "Attribute after last parameter!", &F);
2909
2910 bool IsIntrinsic = F.isIntrinsic();
2911
2912 // Check function attributes.
2913 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2914
2915 // On function declarations/definitions, we do not support the builtin
2916 // attribute. We do not check this in VerifyFunctionAttrs since that is
2917 // checking for Attributes that can/can not ever be on functions.
2918 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2919 "Attribute 'builtin' can only be applied to a callsite.", &F);
2920
2921 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2922 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2923
2924 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
2925 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
2926
2927 if (Attrs.hasFnAttr(Attribute::Naked))
2928 for (const Argument &Arg : F.args())
2929 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
2930
2931 // Check that this function meets the restrictions on this calling convention.
2932 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2933 // restrictions can be lifted.
2934 switch (F.getCallingConv()) {
2935 default:
2936 case CallingConv::C:
2937 break;
2938 case CallingConv::X86_INTR: {
2939 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2940 "Calling convention parameter requires byval", &F);
2941 break;
2942 }
2947 Check(F.getReturnType()->isVoidTy(),
2948 "Calling convention requires void return type", &F);
2949 [[fallthrough]];
2955 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2956 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2957 const unsigned StackAS = DL.getAllocaAddrSpace();
2958 unsigned i = 0;
2959 for (const Argument &Arg : F.args()) {
2960 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
2961 "Calling convention disallows byval", &F);
2962 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
2963 "Calling convention disallows preallocated", &F);
2964 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
2965 "Calling convention disallows inalloca", &F);
2966
2967 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
2968 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
2969 // value here.
2970 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
2971 "Calling convention disallows stack byref", &F);
2972 }
2973
2974 ++i;
2975 }
2976 }
2977
2978 [[fallthrough]];
2979 case CallingConv::Fast:
2980 case CallingConv::Cold:
2984 Check(!F.isVarArg(),
2985 "Calling convention does not support varargs or "
2986 "perfect forwarding!",
2987 &F);
2988 break;
2990 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
2991 "Calling convention requires first argument to be i1", &F);
2992 Check(!F.arg_begin()->hasInRegAttr(),
2993 "Calling convention requires first argument to not be inreg", &F);
2994 Check(!F.isVarArg(),
2995 "Calling convention does not support varargs or "
2996 "perfect forwarding!",
2997 &F);
2998 break;
2999 }
3000
3001 // Check that the argument values match the function type for this function...
3002 unsigned i = 0;
3003 for (const Argument &Arg : F.args()) {
3004 Check(Arg.getType() == FT->getParamType(i),
3005 "Argument value does not match function argument type!", &Arg,
3006 FT->getParamType(i));
3007 Check(Arg.getType()->isFirstClassType(),
3008 "Function arguments must have first-class types!", &Arg);
3009 if (!IsIntrinsic) {
3010 Check(!Arg.getType()->isMetadataTy(),
3011 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3012 Check(!Arg.getType()->isTokenLikeTy(),
3013 "Function takes token but isn't an intrinsic", &Arg, &F);
3014 Check(!Arg.getType()->isX86_AMXTy(),
3015 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3016 }
3017
3018 // Check that swifterror argument is only used by loads and stores.
3019 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3020 verifySwiftErrorValue(&Arg);
3021 }
3022 ++i;
3023 }
3024
3025 if (!IsIntrinsic) {
3026 Check(!F.getReturnType()->isTokenLikeTy(),
3027 "Function returns a token but isn't an intrinsic", &F);
3028 Check(!F.getReturnType()->isX86_AMXTy(),
3029 "Function returns a x86_amx but isn't an intrinsic", &F);
3030 }
3031
3032 // Get the function metadata attachments.
3034 F.getAllMetadata(MDs);
3035 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3036 verifyFunctionMetadata(MDs);
3037
3038 // Check validity of the personality function
3039 if (F.hasPersonalityFn()) {
3040 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3041 if (Per)
3042 Check(Per->getParent() == F.getParent(),
3043 "Referencing personality function in another module!", &F,
3044 F.getParent(), Per, Per->getParent());
3045 }
3046
3047 // EH funclet coloring can be expensive, recompute on-demand
3048 BlockEHFuncletColors.clear();
3049
3050 if (F.isMaterializable()) {
3051 // Function has a body somewhere we can't see.
3052 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3053 MDs.empty() ? nullptr : MDs.front().second);
3054 } else if (F.isDeclaration()) {
3055 for (const auto &I : MDs) {
3056 // This is used for call site debug information.
3057 CheckDI(I.first != LLVMContext::MD_dbg ||
3058 !cast<DISubprogram>(I.second)->isDistinct(),
3059 "function declaration may only have a unique !dbg attachment",
3060 &F);
3061 Check(I.first != LLVMContext::MD_prof,
3062 "function declaration may not have a !prof attachment", &F);
3063
3064 // Verify the metadata itself.
3065 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3066 }
3067 Check(!F.hasPersonalityFn(),
3068 "Function declaration shouldn't have a personality routine", &F);
3069 } else {
3070 // Verify that this function (which has a body) is not named "llvm.*". It
3071 // is not legal to define intrinsics.
3072 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3073
3074 // Check the entry node
3075 const BasicBlock *Entry = &F.getEntryBlock();
3076 Check(pred_empty(Entry),
3077 "Entry block to function must not have predecessors!", Entry);
3078
3079 // The address of the entry block cannot be taken, unless it is dead.
3080 if (Entry->hasAddressTaken()) {
3081 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3082 "blockaddress may not be used with the entry block!", Entry);
3083 }
3084
3085 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3086 NumKCFIAttachments = 0;
3087 // Visit metadata attachments.
3088 for (const auto &I : MDs) {
3089 // Verify that the attachment is legal.
3090 auto AllowLocs = AreDebugLocsAllowed::No;
3091 switch (I.first) {
3092 default:
3093 break;
3094 case LLVMContext::MD_dbg: {
3095 ++NumDebugAttachments;
3096 CheckDI(NumDebugAttachments == 1,
3097 "function must have a single !dbg attachment", &F, I.second);
3098 CheckDI(isa<DISubprogram>(I.second),
3099 "function !dbg attachment must be a subprogram", &F, I.second);
3100 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3101 "function definition may only have a distinct !dbg attachment",
3102 &F);
3103
3104 auto *SP = cast<DISubprogram>(I.second);
3105 const Function *&AttachedTo = DISubprogramAttachments[SP];
3106 CheckDI(!AttachedTo || AttachedTo == &F,
3107 "DISubprogram attached to more than one function", SP, &F);
3108 AttachedTo = &F;
3109 AllowLocs = AreDebugLocsAllowed::Yes;
3110 break;
3111 }
3112 case LLVMContext::MD_prof:
3113 ++NumProfAttachments;
3114 Check(NumProfAttachments == 1,
3115 "function must have a single !prof attachment", &F, I.second);
3116 break;
3117 case LLVMContext::MD_kcfi_type:
3118 ++NumKCFIAttachments;
3119 Check(NumKCFIAttachments == 1,
3120 "function must have a single !kcfi_type attachment", &F,
3121 I.second);
3122 break;
3123 }
3124
3125 // Verify the metadata itself.
3126 visitMDNode(*I.second, AllowLocs);
3127 }
3128 }
3129
3130 // If this function is actually an intrinsic, verify that it is only used in
3131 // direct call/invokes, never having its "address taken".
3132 // Only do this if the module is materialized, otherwise we don't have all the
3133 // uses.
3134 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3135 const User *U;
3136 if (F.hasAddressTaken(&U, false, true, false,
3137 /*IgnoreARCAttachedCall=*/true))
3138 Check(false, "Invalid user of intrinsic instruction!", U);
3139 }
3140
3141 // Check intrinsics' signatures.
3142 switch (F.getIntrinsicID()) {
3143 case Intrinsic::experimental_gc_get_pointer_base: {
3144 FunctionType *FT = F.getFunctionType();
3145 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3146 Check(isa<PointerType>(F.getReturnType()),
3147 "gc.get.pointer.base must return a pointer", F);
3148 Check(FT->getParamType(0) == F.getReturnType(),
3149 "gc.get.pointer.base operand and result must be of the same type", F);
3150 break;
3151 }
3152 case Intrinsic::experimental_gc_get_pointer_offset: {
3153 FunctionType *FT = F.getFunctionType();
3154 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3155 Check(isa<PointerType>(FT->getParamType(0)),
3156 "gc.get.pointer.offset operand must be a pointer", F);
3157 Check(F.getReturnType()->isIntegerTy(),
3158 "gc.get.pointer.offset must return integer", F);
3159 break;
3160 }
3161 }
3162
3163 auto *N = F.getSubprogram();
3164 HasDebugInfo = (N != nullptr);
3165 if (!HasDebugInfo)
3166 return;
3167
3168 // Check that all !dbg attachments lead to back to N.
3169 //
3170 // FIXME: Check this incrementally while visiting !dbg attachments.
3171 // FIXME: Only check when N is the canonical subprogram for F.
3173 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3174 // Be careful about using DILocation here since we might be dealing with
3175 // broken code (this is the Verifier after all).
3176 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3177 if (!DL)
3178 return;
3179 if (!Seen.insert(DL).second)
3180 return;
3181
3182 Metadata *Parent = DL->getRawScope();
3183 CheckDI(Parent && isa<DILocalScope>(Parent),
3184 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3185
3186 DILocalScope *Scope = DL->getInlinedAtScope();
3187 Check(Scope, "Failed to find DILocalScope", DL);
3188
3189 if (!Seen.insert(Scope).second)
3190 return;
3191
3192 DISubprogram *SP = Scope->getSubprogram();
3193
3194 // Scope and SP could be the same MDNode and we don't want to skip
3195 // validation in that case
3196 if (SP && ((Scope != SP) && !Seen.insert(SP).second))
3197 return;
3198
3199 CheckDI(SP->describes(&F),
3200 "!dbg attachment points at wrong subprogram for function", N, &F,
3201 &I, DL, Scope, SP);
3202 };
3203 for (auto &BB : F)
3204 for (auto &I : BB) {
3205 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3206 // The llvm.loop annotations also contain two DILocations.
3207 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3208 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3209 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3210 if (BrokenDebugInfo)
3211 return;
3212 }
3213}
3214
3215// verifyBasicBlock - Verify that a basic block is well formed...
3216//
3217void Verifier::visitBasicBlock(BasicBlock &BB) {
3218 InstsInThisBlock.clear();
3219 ConvergenceVerifyHelper.visit(BB);
3220
3221 // Ensure that basic blocks have terminators!
3222 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3223
3224 // Check constraints that this basic block imposes on all of the PHI nodes in
3225 // it.
3226 if (isa<PHINode>(BB.front())) {
3229 llvm::sort(Preds);
3230 for (const PHINode &PN : BB.phis()) {
3231 Check(PN.getNumIncomingValues() == Preds.size(),
3232 "PHINode should have one entry for each predecessor of its "
3233 "parent basic block!",
3234 &PN);
3235
3236 // Get and sort all incoming values in the PHI node...
3237 Values.clear();
3238 Values.reserve(PN.getNumIncomingValues());
3239 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3240 Values.push_back(
3241 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3242 llvm::sort(Values);
3243
3244 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3245 // Check to make sure that if there is more than one entry for a
3246 // particular basic block in this PHI node, that the incoming values are
3247 // all identical.
3248 //
3249 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3250 Values[i].second == Values[i - 1].second,
3251 "PHI node has multiple entries for the same basic block with "
3252 "different incoming values!",
3253 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3254
3255 // Check to make sure that the predecessors and PHI node entries are
3256 // matched up.
3257 Check(Values[i].first == Preds[i],
3258 "PHI node entries do not match predecessors!", &PN,
3259 Values[i].first, Preds[i]);
3260 }
3261 }
3262 }
3263
3264 // Check that all instructions have their parent pointers set up correctly.
3265 for (auto &I : BB)
3266 {
3267 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3268 }
3269
3270 // Confirm that no issues arise from the debug program.
3271 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3272 &BB);
3273}
3274
3275void Verifier::visitTerminator(Instruction &I) {
3276 // Ensure that terminators only exist at the end of the basic block.
3277 Check(&I == I.getParent()->getTerminator(),
3278 "Terminator found in the middle of a basic block!", I.getParent());
3280}
3281
3282void Verifier::visitBranchInst(BranchInst &BI) {
3283 if (BI.isConditional()) {
3285 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3286 }
3287 visitTerminator(BI);
3288}
3289
3290void Verifier::visitReturnInst(ReturnInst &RI) {
3291 Function *F = RI.getParent()->getParent();
3292 unsigned N = RI.getNumOperands();
3293 if (F->getReturnType()->isVoidTy())
3294 Check(N == 0,
3295 "Found return instr that returns non-void in Function of void "
3296 "return type!",
3297 &RI, F->getReturnType());
3298 else
3299 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3300 "Function return type does not match operand "
3301 "type of return inst!",
3302 &RI, F->getReturnType());
3303
3304 // Check to make sure that the return value has necessary properties for
3305 // terminators...
3306 visitTerminator(RI);
3307}
3308
3309void Verifier::visitSwitchInst(SwitchInst &SI) {
3310 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3311 // Check to make sure that all of the constants in the switch instruction
3312 // have the same type as the switched-on value.
3313 Type *SwitchTy = SI.getCondition()->getType();
3315 for (auto &Case : SI.cases()) {
3316 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3317 "Case value is not a constant integer.", &SI);
3318 Check(Case.getCaseValue()->getType() == SwitchTy,
3319 "Switch constants must all be same type as switch value!", &SI);
3320 Check(Constants.insert(Case.getCaseValue()).second,
3321 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3322 }
3323
3324 visitTerminator(SI);
3325}
3326
3327void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3329 "Indirectbr operand must have pointer type!", &BI);
3330 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3332 "Indirectbr destinations must all have pointer type!", &BI);
3333
3334 visitTerminator(BI);
3335}
3336
3337void Verifier::visitCallBrInst(CallBrInst &CBI) {
3338 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3339 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3340 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3341
3342 verifyInlineAsmCall(CBI);
3343 visitTerminator(CBI);
3344}
3345
3346void Verifier::visitSelectInst(SelectInst &SI) {
3347 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3348 SI.getOperand(2)),
3349 "Invalid operands for select instruction!", &SI);
3350
3351 Check(SI.getTrueValue()->getType() == SI.getType(),
3352 "Select values must have same type as select instruction!", &SI);
3353 visitInstruction(SI);
3354}
3355
3356/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3357/// a pass, if any exist, it's an error.
3358///
3359void Verifier::visitUserOp1(Instruction &I) {
3360 Check(false, "User-defined operators should not live outside of a pass!", &I);
3361}
3362
3363void Verifier::visitTruncInst(TruncInst &I) {
3364 // Get the source and destination types
3365 Type *SrcTy = I.getOperand(0)->getType();
3366 Type *DestTy = I.getType();
3367
3368 // Get the size of the types in bits, we'll need this later
3369 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3370 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3371
3372 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3373 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3374 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3375 "trunc source and destination must both be a vector or neither", &I);
3376 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3377
3379}
3380
3381void Verifier::visitZExtInst(ZExtInst &I) {
3382 // Get the source and destination types
3383 Type *SrcTy = I.getOperand(0)->getType();
3384 Type *DestTy = I.getType();
3385
3386 // Get the size of the types in bits, we'll need this later
3387 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3388 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3389 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3390 "zext source and destination must both be a vector or neither", &I);
3391 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3392 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3393
3394 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3395
3397}
3398
3399void Verifier::visitSExtInst(SExtInst &I) {
3400 // Get the source and destination types
3401 Type *SrcTy = I.getOperand(0)->getType();
3402 Type *DestTy = I.getType();
3403
3404 // Get the size of the types in bits, we'll need this later
3405 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3406 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3407
3408 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3409 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3410 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3411 "sext source and destination must both be a vector or neither", &I);
3412 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3413
3415}
3416
3417void Verifier::visitFPTruncInst(FPTruncInst &I) {
3418 // Get the source and destination types
3419 Type *SrcTy = I.getOperand(0)->getType();
3420 Type *DestTy = I.getType();
3421 // Get the size of the types in bits, we'll need this later
3422 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3423 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3424
3425 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3426 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3427 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3428 "fptrunc source and destination must both be a vector or neither", &I);
3429 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3430
3432}
3433
3434void Verifier::visitFPExtInst(FPExtInst &I) {
3435 // Get the source and destination types
3436 Type *SrcTy = I.getOperand(0)->getType();
3437 Type *DestTy = I.getType();
3438
3439 // Get the size of the types in bits, we'll need this later
3440 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3441 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3442
3443 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3444 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3445 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3446 "fpext source and destination must both be a vector or neither", &I);
3447 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3448
3450}
3451
3452void Verifier::visitUIToFPInst(UIToFPInst &I) {
3453 // Get the source and destination types
3454 Type *SrcTy = I.getOperand(0)->getType();
3455 Type *DestTy = I.getType();
3456
3457 bool SrcVec = SrcTy->isVectorTy();
3458 bool DstVec = DestTy->isVectorTy();
3459
3460 Check(SrcVec == DstVec,
3461 "UIToFP source and dest must both be vector or scalar", &I);
3462 Check(SrcTy->isIntOrIntVectorTy(),
3463 "UIToFP source must be integer or integer vector", &I);
3464 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3465 &I);
3466
3467 if (SrcVec && DstVec)
3468 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3469 cast<VectorType>(DestTy)->getElementCount(),
3470 "UIToFP source and dest vector length mismatch", &I);
3471
3473}
3474
3475void Verifier::visitSIToFPInst(SIToFPInst &I) {
3476 // Get the source and destination types
3477 Type *SrcTy = I.getOperand(0)->getType();
3478 Type *DestTy = I.getType();
3479
3480 bool SrcVec = SrcTy->isVectorTy();
3481 bool DstVec = DestTy->isVectorTy();
3482
3483 Check(SrcVec == DstVec,
3484 "SIToFP source and dest must both be vector or scalar", &I);
3485 Check(SrcTy->isIntOrIntVectorTy(),
3486 "SIToFP source must be integer or integer vector", &I);
3487 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3488 &I);
3489
3490 if (SrcVec && DstVec)
3491 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3492 cast<VectorType>(DestTy)->getElementCount(),
3493 "SIToFP source and dest vector length mismatch", &I);
3494
3496}
3497
3498void Verifier::visitFPToUIInst(FPToUIInst &I) {
3499 // Get the source and destination types
3500 Type *SrcTy = I.getOperand(0)->getType();
3501 Type *DestTy = I.getType();
3502
3503 bool SrcVec = SrcTy->isVectorTy();
3504 bool DstVec = DestTy->isVectorTy();
3505
3506 Check(SrcVec == DstVec,
3507 "FPToUI source and dest must both be vector or scalar", &I);
3508 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3509 Check(DestTy->isIntOrIntVectorTy(),
3510 "FPToUI result must be integer or integer vector", &I);
3511
3512 if (SrcVec && DstVec)
3513 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3514 cast<VectorType>(DestTy)->getElementCount(),
3515 "FPToUI source and dest vector length mismatch", &I);
3516
3518}
3519
3520void Verifier::visitFPToSIInst(FPToSIInst &I) {
3521 // Get the source and destination types
3522 Type *SrcTy = I.getOperand(0)->getType();
3523 Type *DestTy = I.getType();
3524
3525 bool SrcVec = SrcTy->isVectorTy();
3526 bool DstVec = DestTy->isVectorTy();
3527
3528 Check(SrcVec == DstVec,
3529 "FPToSI source and dest must both be vector or scalar", &I);
3530 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3531 Check(DestTy->isIntOrIntVectorTy(),
3532 "FPToSI result must be integer or integer vector", &I);
3533
3534 if (SrcVec && DstVec)
3535 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3536 cast<VectorType>(DestTy)->getElementCount(),
3537 "FPToSI source and dest vector length mismatch", &I);
3538
3540}
3541
3542void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3543 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3544 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3545 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3546 V);
3547
3548 if (SrcTy->isVectorTy()) {
3549 auto *VSrc = cast<VectorType>(SrcTy);
3550 auto *VDest = cast<VectorType>(DestTy);
3551 Check(VSrc->getElementCount() == VDest->getElementCount(),
3552 "PtrToAddr vector length mismatch", V);
3553 }
3554
3555 Type *AddrTy = DL.getAddressType(SrcTy);
3556 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3557}
3558
3559void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3560 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3562}
3563
3564void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3565 // Get the source and destination types
3566 Type *SrcTy = I.getOperand(0)->getType();
3567 Type *DestTy = I.getType();
3568
3569 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3570
3571 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3572 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3573 &I);
3574
3575 if (SrcTy->isVectorTy()) {
3576 auto *VSrc = cast<VectorType>(SrcTy);
3577 auto *VDest = cast<VectorType>(DestTy);
3578 Check(VSrc->getElementCount() == VDest->getElementCount(),
3579 "PtrToInt Vector length mismatch", &I);
3580 }
3581
3583}
3584
3585void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3586 // Get the source and destination types
3587 Type *SrcTy = I.getOperand(0)->getType();
3588 Type *DestTy = I.getType();
3589
3590 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3591 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3592
3593 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3594 &I);
3595 if (SrcTy->isVectorTy()) {
3596 auto *VSrc = cast<VectorType>(SrcTy);
3597 auto *VDest = cast<VectorType>(DestTy);
3598 Check(VSrc->getElementCount() == VDest->getElementCount(),
3599 "IntToPtr Vector length mismatch", &I);
3600 }
3602}
3603
3604void Verifier::visitBitCastInst(BitCastInst &I) {
3605 Check(
3606 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3607 "Invalid bitcast", &I);
3609}
3610
3611void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3612 Type *SrcTy = I.getOperand(0)->getType();
3613 Type *DestTy = I.getType();
3614
3615 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3616 &I);
3617 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3618 &I);
3620 "AddrSpaceCast must be between different address spaces", &I);
3621 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3622 Check(SrcVTy->getElementCount() ==
3623 cast<VectorType>(DestTy)->getElementCount(),
3624 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3626}
3627
3628/// visitPHINode - Ensure that a PHI node is well formed.
3629///
3630void Verifier::visitPHINode(PHINode &PN) {
3631 // Ensure that the PHI nodes are all grouped together at the top of the block.
3632 // This can be tested by checking whether the instruction before this is
3633 // either nonexistent (because this is begin()) or is a PHI node. If not,
3634 // then there is some other instruction before a PHI.
3635 Check(&PN == &PN.getParent()->front() ||
3636 isa<PHINode>(--BasicBlock::iterator(&PN)),
3637 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3638
3639 // Check that a PHI doesn't yield a Token.
3640 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3641
3642 // Check that all of the values of the PHI node have the same type as the
3643 // result.
3644 for (Value *IncValue : PN.incoming_values()) {
3645 Check(PN.getType() == IncValue->getType(),
3646 "PHI node operands are not the same type as the result!", &PN);
3647 }
3648
3649 // All other PHI node constraints are checked in the visitBasicBlock method.
3650
3651 visitInstruction(PN);
3652}
3653
3654void Verifier::visitCallBase(CallBase &Call) {
3655 Check(Call.getCalledOperand()->getType()->isPointerTy(),
3656 "Called function must be a pointer!", Call);
3657 FunctionType *FTy = Call.getFunctionType();
3658
3659 // Verify that the correct number of arguments are being passed
3660 if (FTy->isVarArg())
3661 Check(Call.arg_size() >= FTy->getNumParams(),
3662 "Called function requires more parameters than were provided!", Call);
3663 else
3664 Check(Call.arg_size() == FTy->getNumParams(),
3665 "Incorrect number of arguments passed to called function!", Call);
3666
3667 // Verify that all arguments to the call match the function type.
3668 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3669 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3670 "Call parameter type does not match function signature!",
3671 Call.getArgOperand(i), FTy->getParamType(i), Call);
3672
3673 AttributeList Attrs = Call.getAttributes();
3674
3675 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3676 "Attribute after last parameter!", Call);
3677
3678 Function *Callee =
3679 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
3680 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3681 if (IsIntrinsic)
3682 Check(Callee->getValueType() == FTy,
3683 "Intrinsic called with incompatible signature", Call);
3684
3685 // Verify if the calling convention of the callee is callable.
3686 Check(isCallableCC(Call.getCallingConv()),
3687 "calling convention does not permit calls", Call);
3688
3689 // Disallow passing/returning values with alignment higher than we can
3690 // represent.
3691 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3692 // necessary.
3693 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3694 if (!Ty->isSized())
3695 return;
3696 Align ABIAlign = DL.getABITypeAlign(Ty);
3697 Check(ABIAlign.value() <= Value::MaximumAlignment,
3698 "Incorrect alignment of " + Message + " to called function!", Call);
3699 };
3700
3701 if (!IsIntrinsic) {
3702 VerifyTypeAlign(FTy->getReturnType(), "return type");
3703 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3704 Type *Ty = FTy->getParamType(i);
3705 VerifyTypeAlign(Ty, "argument passed");
3706 }
3707 }
3708
3709 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3710 // Don't allow speculatable on call sites, unless the underlying function
3711 // declaration is also speculatable.
3712 Check(Callee && Callee->isSpeculatable(),
3713 "speculatable attribute may not apply to call sites", Call);
3714 }
3715
3716 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3717 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3718 "preallocated as a call site attribute can only be on "
3719 "llvm.call.preallocated.arg");
3720 }
3721
3722 // Verify call attributes.
3723 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3724
3725 // Conservatively check the inalloca argument.
3726 // We have a bug if we can find that there is an underlying alloca without
3727 // inalloca.
3728 if (Call.hasInAllocaArgument()) {
3729 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3730 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3731 Check(AI->isUsedWithInAlloca(),
3732 "inalloca argument for call has mismatched alloca", AI, Call);
3733 }
3734
3735 // For each argument of the callsite, if it has the swifterror argument,
3736 // make sure the underlying alloca/parameter it comes from has a swifterror as
3737 // well.
3738 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3739 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3740 Value *SwiftErrorArg = Call.getArgOperand(i);
3741 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3742 Check(AI->isSwiftError(),
3743 "swifterror argument for call has mismatched alloca", AI, Call);
3744 continue;
3745 }
3746 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3747 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3748 SwiftErrorArg, Call);
3749 Check(ArgI->hasSwiftErrorAttr(),
3750 "swifterror argument for call has mismatched parameter", ArgI,
3751 Call);
3752 }
3753
3754 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3755 // Don't allow immarg on call sites, unless the underlying declaration
3756 // also has the matching immarg.
3757 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3758 "immarg may not apply only to call sites", Call.getArgOperand(i),
3759 Call);
3760 }
3761
3762 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3763 Value *ArgVal = Call.getArgOperand(i);
3764 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3765 "immarg operand has non-immediate parameter", ArgVal, Call);
3766
3767 // If the imm-arg is an integer and also has a range attached,
3768 // check if the given value is within the range.
3769 if (Call.paramHasAttr(i, Attribute::Range)) {
3770 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3771 const ConstantRange &CR =
3772 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3773 Check(CR.contains(CI->getValue()),
3774 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3775 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3776 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3777 Call);
3778 }
3779 }
3780 }
3781
3782 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3783 Value *ArgVal = Call.getArgOperand(i);
3784 bool hasOB =
3785 Call.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0;
3786 bool isMustTail = Call.isMustTailCall();
3787 Check(hasOB != isMustTail,
3788 "preallocated operand either requires a preallocated bundle or "
3789 "the call to be musttail (but not both)",
3790 ArgVal, Call);
3791 }
3792 }
3793
3794 if (FTy->isVarArg()) {
3795 // FIXME? is 'nest' even legal here?
3796 bool SawNest = false;
3797 bool SawReturned = false;
3798
3799 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3800 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3801 SawNest = true;
3802 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3803 SawReturned = true;
3804 }
3805
3806 // Check attributes on the varargs part.
3807 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3808 Type *Ty = Call.getArgOperand(Idx)->getType();
3809 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3810 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3811
3812 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3813 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3814 SawNest = true;
3815 }
3816
3817 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3818 Check(!SawReturned, "More than one parameter has attribute returned!",
3819 Call);
3820 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3821 "Incompatible argument and return types for 'returned' "
3822 "attribute",
3823 Call);
3824 SawReturned = true;
3825 }
3826
3827 // Statepoint intrinsic is vararg but the wrapped function may be not.
3828 // Allow sret here and check the wrapped function in verifyStatepoint.
3829 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
3830 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3831 "Attribute 'sret' cannot be used for vararg call arguments!",
3832 Call);
3833
3834 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3835 Check(Idx == Call.arg_size() - 1,
3836 "inalloca isn't on the last argument!", Call);
3837 }
3838 }
3839
3840 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3841 if (!IsIntrinsic) {
3842 for (Type *ParamTy : FTy->params()) {
3843 Check(!ParamTy->isMetadataTy(),
3844 "Function has metadata parameter but isn't an intrinsic", Call);
3845 Check(!ParamTy->isTokenLikeTy(),
3846 "Function has token parameter but isn't an intrinsic", Call);
3847 }
3848 }
3849
3850 // Verify that indirect calls don't return tokens.
3851 if (!Call.getCalledFunction()) {
3852 Check(!FTy->getReturnType()->isTokenLikeTy(),
3853 "Return type cannot be token for indirect call!");
3854 Check(!FTy->getReturnType()->isX86_AMXTy(),
3855 "Return type cannot be x86_amx for indirect call!");
3856 }
3857
3858 if (Intrinsic::ID ID = Call.getIntrinsicID())
3859 visitIntrinsicCall(ID, Call);
3860
3861 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3862 // most one "gc-transition", at most one "cfguardtarget", at most one
3863 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3864 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3865 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3866 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3867 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3868 FoundAttachedCallBundle = false;
3869 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3870 OperandBundleUse BU = Call.getOperandBundleAt(i);
3871 uint32_t Tag = BU.getTagID();
3872 if (Tag == LLVMContext::OB_deopt) {
3873 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3874 FoundDeoptBundle = true;
3875 } else if (Tag == LLVMContext::OB_gc_transition) {
3876 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3877 Call);
3878 FoundGCTransitionBundle = true;
3879 } else if (Tag == LLVMContext::OB_funclet) {
3880 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3881 FoundFuncletBundle = true;
3882 Check(BU.Inputs.size() == 1,
3883 "Expected exactly one funclet bundle operand", Call);
3884 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3885 "Funclet bundle operands should correspond to a FuncletPadInst",
3886 Call);
3887 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3888 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3889 Call);
3890 FoundCFGuardTargetBundle = true;
3891 Check(BU.Inputs.size() == 1,
3892 "Expected exactly one cfguardtarget bundle operand", Call);
3893 } else if (Tag == LLVMContext::OB_ptrauth) {
3894 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3895 FoundPtrauthBundle = true;
3896 Check(BU.Inputs.size() == 2,
3897 "Expected exactly two ptrauth bundle operands", Call);
3898 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3899 BU.Inputs[0]->getType()->isIntegerTy(32),
3900 "Ptrauth bundle key operand must be an i32 constant", Call);
3901 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3902 "Ptrauth bundle discriminator operand must be an i64", Call);
3903 } else if (Tag == LLVMContext::OB_kcfi) {
3904 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3905 FoundKCFIBundle = true;
3906 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3907 Call);
3908 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3909 BU.Inputs[0]->getType()->isIntegerTy(32),
3910 "Kcfi bundle operand must be an i32 constant", Call);
3911 } else if (Tag == LLVMContext::OB_preallocated) {
3912 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3913 Call);
3914 FoundPreallocatedBundle = true;
3915 Check(BU.Inputs.size() == 1,
3916 "Expected exactly one preallocated bundle operand", Call);
3917 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3918 Check(Input &&
3919 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3920 "\"preallocated\" argument must be a token from "
3921 "llvm.call.preallocated.setup",
3922 Call);
3923 } else if (Tag == LLVMContext::OB_gc_live) {
3924 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3925 FoundGCLiveBundle = true;
3927 Check(!FoundAttachedCallBundle,
3928 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3929 FoundAttachedCallBundle = true;
3930 verifyAttachedCallBundle(Call, BU);
3931 }
3932 }
3933
3934 // Verify that callee and callsite agree on whether to use pointer auth.
3935 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3936 "Direct call cannot have a ptrauth bundle", Call);
3937
3938 // Verify that each inlinable callsite of a debug-info-bearing function in a
3939 // debug-info-bearing function has a debug location attached to it. Failure to
3940 // do so causes assertion failures when the inliner sets up inline scope info
3941 // (Interposable functions are not inlinable, neither are functions without
3942 // definitions.)
3943 if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
3944 !Call.getCalledFunction()->isInterposable() &&
3945 !Call.getCalledFunction()->isDeclaration() &&
3946 Call.getCalledFunction()->getSubprogram())
3947 CheckDI(Call.getDebugLoc(),
3948 "inlinable function call in a function with "
3949 "debug info must have a !dbg location",
3950 Call);
3951
3952 if (Call.isInlineAsm())
3953 verifyInlineAsmCall(Call);
3954
3955 ConvergenceVerifyHelper.visit(Call);
3956
3957 visitInstruction(Call);
3958}
3959
3960void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
3961 StringRef Context) {
3962 Check(!Attrs.contains(Attribute::InAlloca),
3963 Twine("inalloca attribute not allowed in ") + Context);
3964 Check(!Attrs.contains(Attribute::InReg),
3965 Twine("inreg attribute not allowed in ") + Context);
3966 Check(!Attrs.contains(Attribute::SwiftError),
3967 Twine("swifterror attribute not allowed in ") + Context);
3968 Check(!Attrs.contains(Attribute::Preallocated),
3969 Twine("preallocated attribute not allowed in ") + Context);
3970 Check(!Attrs.contains(Attribute::ByRef),
3971 Twine("byref attribute not allowed in ") + Context);
3972}
3973
3974/// Two types are "congruent" if they are identical, or if they are both pointer
3975/// types with different pointee types and the same address space.
3976static bool isTypeCongruent(Type *L, Type *R) {
3977 if (L == R)
3978 return true;
3979 PointerType *PL = dyn_cast<PointerType>(L);
3980 PointerType *PR = dyn_cast<PointerType>(R);
3981 if (!PL || !PR)
3982 return false;
3983 return PL->getAddressSpace() == PR->getAddressSpace();
3984}
3985
3987 static const Attribute::AttrKind ABIAttrs[] = {
3988 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
3989 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
3990 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
3991 Attribute::ByRef};
3992 AttrBuilder Copy(C);
3993 for (auto AK : ABIAttrs) {
3994 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
3995 if (Attr.isValid())
3996 Copy.addAttribute(Attr);
3997 }
3998
3999 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4000 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4001 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4002 Attrs.hasParamAttr(I, Attribute::ByRef)))
4003 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4004 return Copy;
4005}
4006
4007void Verifier::verifyMustTailCall(CallInst &CI) {
4008 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4009
4010 Function *F = CI.getParent()->getParent();
4011 FunctionType *CallerTy = F->getFunctionType();
4012 FunctionType *CalleeTy = CI.getFunctionType();
4013 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4014 "cannot guarantee tail call due to mismatched varargs", &CI);
4015 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4016 "cannot guarantee tail call due to mismatched return types", &CI);
4017
4018 // - The calling conventions of the caller and callee must match.
4019 Check(F->getCallingConv() == CI.getCallingConv(),
4020 "cannot guarantee tail call due to mismatched calling conv", &CI);
4021
4022 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4023 // or a pointer bitcast followed by a ret instruction.
4024 // - The ret instruction must return the (possibly bitcasted) value
4025 // produced by the call or void.
4026 Value *RetVal = &CI;
4027 Instruction *Next = CI.getNextNode();
4028
4029 // Handle the optional bitcast.
4030 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4031 Check(BI->getOperand(0) == RetVal,
4032 "bitcast following musttail call must use the call", BI);
4033 RetVal = BI;
4034 Next = BI->getNextNode();
4035 }
4036
4037 // Check the return.
4038 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4039 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4040 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4041 isa<UndefValue>(Ret->getReturnValue()),
4042 "musttail call result must be returned", Ret);
4043
4044 AttributeList CallerAttrs = F->getAttributes();
4045 AttributeList CalleeAttrs = CI.getAttributes();
4048 StringRef CCName =
4049 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4050
4051 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4052 // are allowed in swifttailcc call
4053 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4054 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4055 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4056 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4057 }
4058 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4059 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4060 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4061 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4062 }
4063 // - Varargs functions are not allowed
4064 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4065 " tail call for varargs function");
4066 return;
4067 }
4068
4069 // - The caller and callee prototypes must match. Pointer types of
4070 // parameters or return types may differ in pointee type, but not
4071 // address space.
4072 if (!CI.getIntrinsicID()) {
4073 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4074 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4075 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4076 Check(
4077 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4078 "cannot guarantee tail call due to mismatched parameter types", &CI);
4079 }
4080 }
4081
4082 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4083 // returned, preallocated, and inalloca, must match.
4084 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4085 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4086 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4087 Check(CallerABIAttrs == CalleeABIAttrs,
4088 "cannot guarantee tail call due to mismatched ABI impacting "
4089 "function attributes",
4090 &CI, CI.getOperand(I));
4091 }
4092}
4093
4094void Verifier::visitCallInst(CallInst &CI) {
4095 visitCallBase(CI);
4096
4097 if (CI.isMustTailCall())
4098 verifyMustTailCall(CI);
4099}
4100
4101void Verifier::visitInvokeInst(InvokeInst &II) {
4103
4104 // Verify that the first non-PHI instruction of the unwind destination is an
4105 // exception handling instruction.
4106 Check(
4107 II.getUnwindDest()->isEHPad(),
4108 "The unwind destination does not have an exception handling instruction!",
4109 &II);
4110
4112}
4113
4114/// visitUnaryOperator - Check the argument to the unary operator.
4115///
4116void Verifier::visitUnaryOperator(UnaryOperator &U) {
4117 Check(U.getType() == U.getOperand(0)->getType(),
4118 "Unary operators must have same type for"
4119 "operands and result!",
4120 &U);
4121
4122 switch (U.getOpcode()) {
4123 // Check that floating-point arithmetic operators are only used with
4124 // floating-point operands.
4125 case Instruction::FNeg:
4126 Check(U.getType()->isFPOrFPVectorTy(),
4127 "FNeg operator only works with float types!", &U);
4128 break;
4129 default:
4130 llvm_unreachable("Unknown UnaryOperator opcode!");
4131 }
4132
4134}
4135
4136/// visitBinaryOperator - Check that both arguments to the binary operator are
4137/// of the same type!
4138///
4139void Verifier::visitBinaryOperator(BinaryOperator &B) {
4140 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4141 "Both operands to a binary operator are not of the same type!", &B);
4142
4143 switch (B.getOpcode()) {
4144 // Check that integer arithmetic operators are only used with
4145 // integral operands.
4146 case Instruction::Add:
4147 case Instruction::Sub:
4148 case Instruction::Mul:
4149 case Instruction::SDiv:
4150 case Instruction::UDiv:
4151 case Instruction::SRem:
4152 case Instruction::URem:
4153 Check(B.getType()->isIntOrIntVectorTy(),
4154 "Integer arithmetic operators only work with integral types!", &B);
4155 Check(B.getType() == B.getOperand(0)->getType(),
4156 "Integer arithmetic operators must have same type "
4157 "for operands and result!",
4158 &B);
4159 break;
4160 // Check that floating-point arithmetic operators are only used with
4161 // floating-point operands.
4162 case Instruction::FAdd:
4163 case Instruction::FSub:
4164 case Instruction::FMul:
4165 case Instruction::FDiv:
4166 case Instruction::FRem:
4167 Check(B.getType()->isFPOrFPVectorTy(),
4168 "Floating-point arithmetic operators only work with "
4169 "floating-point types!",
4170 &B);
4171 Check(B.getType() == B.getOperand(0)->getType(),
4172 "Floating-point arithmetic operators must have same type "
4173 "for operands and result!",
4174 &B);
4175 break;
4176 // Check that logical operators are only used with integral operands.
4177 case Instruction::And:
4178 case Instruction::Or:
4179 case Instruction::Xor:
4180 Check(B.getType()->isIntOrIntVectorTy(),
4181 "Logical operators only work with integral types!", &B);
4182 Check(B.getType() == B.getOperand(0)->getType(),
4183 "Logical operators must have same type for operands and result!", &B);
4184 break;
4185 case Instruction::Shl:
4186 case Instruction::LShr:
4187 case Instruction::AShr:
4188 Check(B.getType()->isIntOrIntVectorTy(),
4189 "Shifts only work with integral types!", &B);
4190 Check(B.getType() == B.getOperand(0)->getType(),
4191 "Shift return type must be same as operands!", &B);
4192 break;
4193 default:
4194 llvm_unreachable("Unknown BinaryOperator opcode!");
4195 }
4196
4198}
4199
4200void Verifier::visitICmpInst(ICmpInst &IC) {
4201 // Check that the operands are the same type
4202 Type *Op0Ty = IC.getOperand(0)->getType();
4203 Type *Op1Ty = IC.getOperand(1)->getType();
4204 Check(Op0Ty == Op1Ty,
4205 "Both operands to ICmp instruction are not of the same type!", &IC);
4206 // Check that the operands are the right type
4207 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4208 "Invalid operand types for ICmp instruction", &IC);
4209 // Check that the predicate is valid.
4210 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4211
4212 visitInstruction(IC);
4213}
4214
4215void Verifier::visitFCmpInst(FCmpInst &FC) {
4216 // Check that the operands are the same type
4217 Type *Op0Ty = FC.getOperand(0)->getType();
4218 Type *Op1Ty = FC.getOperand(1)->getType();
4219 Check(Op0Ty == Op1Ty,
4220 "Both operands to FCmp instruction are not of the same type!", &FC);
4221 // Check that the operands are the right type
4222 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4223 &FC);
4224 // Check that the predicate is valid.
4225 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4226
4227 visitInstruction(FC);
4228}
4229
4230void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4232 "Invalid extractelement operands!", &EI);
4233 visitInstruction(EI);
4234}
4235
4236void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4237 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4238 IE.getOperand(2)),
4239 "Invalid insertelement operands!", &IE);
4240 visitInstruction(IE);
4241}
4242
4243void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4245 SV.getShuffleMask()),
4246 "Invalid shufflevector operands!", &SV);
4247 visitInstruction(SV);
4248}
4249
4250void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4251 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4252
4253 Check(isa<PointerType>(TargetTy),
4254 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4255 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4256
4257 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4258 Check(!STy->isScalableTy(),
4259 "getelementptr cannot target structure that contains scalable vector"
4260 "type",
4261 &GEP);
4262 }
4263
4264 SmallVector<Value *, 16> Idxs(GEP.indices());
4265 Check(
4266 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4267 "GEP indexes must be integers", &GEP);
4268 Type *ElTy =
4269 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4270 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4271
4272 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4273
4274 Check(PtrTy && GEP.getResultElementType() == ElTy,
4275 "GEP is not of right type for indices!", &GEP, ElTy);
4276
4277 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4278 // Additional checks for vector GEPs.
4279 ElementCount GEPWidth = GEPVTy->getElementCount();
4280 if (GEP.getPointerOperandType()->isVectorTy())
4281 Check(
4282 GEPWidth ==
4283 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4284 "Vector GEP result width doesn't match operand's", &GEP);
4285 for (Value *Idx : Idxs) {
4286 Type *IndexTy = Idx->getType();
4287 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4288 ElementCount IndexWidth = IndexVTy->getElementCount();
4289 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4290 }
4291 Check(IndexTy->isIntOrIntVectorTy(),
4292 "All GEP indices should be of integer type");
4293 }
4294 }
4295
4296 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4297 "GEP address space doesn't match type", &GEP);
4298
4300}
4301
4302static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4303 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4304}
4305
4306/// Verify !range and !absolute_symbol metadata. These have the same
4307/// restrictions, except !absolute_symbol allows the full set.
4308void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4309 Type *Ty, RangeLikeMetadataKind Kind) {
4310 unsigned NumOperands = Range->getNumOperands();
4311 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4312 unsigned NumRanges = NumOperands / 2;
4313 Check(NumRanges >= 1, "It should have at least one range!", Range);
4314
4315 ConstantRange LastRange(1, true); // Dummy initial value
4316 for (unsigned i = 0; i < NumRanges; ++i) {
4317 ConstantInt *Low =
4318 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4319 Check(Low, "The lower limit must be an integer!", Low);
4320 ConstantInt *High =
4321 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4322 Check(High, "The upper limit must be an integer!", High);
4323
4324 Check(High->getType() == Low->getType(), "Range pair types must match!",
4325 &I);
4326
4327 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4328 Check(High->getType()->isIntegerTy(32),
4329 "noalias.addrspace type must be i32!", &I);
4330 } else {
4331 Check(High->getType() == Ty->getScalarType(),
4332 "Range types must match instruction type!", &I);
4333 }
4334
4335 APInt HighV = High->getValue();
4336 APInt LowV = Low->getValue();
4337
4338 // ConstantRange asserts if the ranges are the same except for the min/max
4339 // value. Leave the cases it tolerates for the empty range error below.
4340 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4341 "The upper and lower limits cannot be the same value", &I);
4342
4343 ConstantRange CurRange(LowV, HighV);
4344 Check(!CurRange.isEmptySet() &&
4345 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4346 !CurRange.isFullSet()),
4347 "Range must not be empty!", Range);
4348 if (i != 0) {
4349 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4350 "Intervals are overlapping", Range);
4351 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4352 Range);
4353 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4354 Range);
4355 }
4356 LastRange = ConstantRange(LowV, HighV);
4357 }
4358 if (NumRanges > 2) {
4359 APInt FirstLow =
4360 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4361 APInt FirstHigh =
4362 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4363 ConstantRange FirstRange(FirstLow, FirstHigh);
4364 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4365 "Intervals are overlapping", Range);
4366 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4367 Range);
4368 }
4369}
4370
4371void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4372 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4373 "precondition violation");
4374 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4375}
4376
4377void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4378 Type *Ty) {
4379 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4380 "precondition violation");
4381 verifyRangeLikeMetadata(I, Range, Ty,
4382 RangeLikeMetadataKind::NoaliasAddrspace);
4383}
4384
4385void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4386 unsigned Size = DL.getTypeSizeInBits(Ty);
4387 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4388 Check(!(Size & (Size - 1)),
4389 "atomic memory access' operand must have a power-of-two size", Ty, I);
4390}
4391
4392void Verifier::visitLoadInst(LoadInst &LI) {
4393 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
4394 Check(PTy, "Load operand must be a pointer.", &LI);
4395 Type *ElTy = LI.getType();
4396 if (MaybeAlign A = LI.getAlign()) {
4397 Check(A->value() <= Value::MaximumAlignment,
4398 "huge alignment values are unsupported", &LI);
4399 }
4400 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4401 if (LI.isAtomic()) {
4404 "Load cannot have Release ordering", &LI);
4405 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4406 "atomic load operand must have integer, pointer, or floating point "
4407 "type!",
4408 ElTy, &LI);
4409 checkAtomicMemAccessSize(ElTy, &LI);
4410 } else {
4412 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4413 }
4414
4415 visitInstruction(LI);
4416}
4417
4418void Verifier::visitStoreInst(StoreInst &SI) {
4419 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4420 Check(PTy, "Store operand must be a pointer.", &SI);
4421 Type *ElTy = SI.getOperand(0)->getType();
4422 if (MaybeAlign A = SI.getAlign()) {
4423 Check(A->value() <= Value::MaximumAlignment,
4424 "huge alignment values are unsupported", &SI);
4425 }
4426 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4427 if (SI.isAtomic()) {
4428 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4429 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4430 "Store cannot have Acquire ordering", &SI);
4431 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4432 "atomic store operand must have integer, pointer, or floating point "
4433 "type!",
4434 ElTy, &SI);
4435 checkAtomicMemAccessSize(ElTy, &SI);
4436 } else {
4437 Check(SI.getSyncScopeID() == SyncScope::System,
4438 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4439 }
4440 visitInstruction(SI);
4441}
4442
4443/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4444void Verifier::verifySwiftErrorCall(CallBase &Call,
4445 const Value *SwiftErrorVal) {
4446 for (const auto &I : llvm::enumerate(Call.args())) {
4447 if (I.value() == SwiftErrorVal) {
4448 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4449 "swifterror value when used in a callsite should be marked "
4450 "with swifterror attribute",
4451 SwiftErrorVal, Call);
4452 }
4453 }
4454}
4455
4456void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4457 // Check that swifterror value is only used by loads, stores, or as
4458 // a swifterror argument.
4459 for (const User *U : SwiftErrorVal->users()) {
4460 Check(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
4461 isa<InvokeInst>(U),
4462 "swifterror value can only be loaded and stored from, or "
4463 "as a swifterror argument!",
4464 SwiftErrorVal, U);
4465 // If it is used by a store, check it is the second operand.
4466 if (auto StoreI = dyn_cast<StoreInst>(U))
4467 Check(StoreI->getOperand(1) == SwiftErrorVal,
4468 "swifterror value should be the second operand when used "
4469 "by stores",
4470 SwiftErrorVal, U);
4471 if (auto *Call = dyn_cast<CallBase>(U))
4472 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4473 }
4474}
4475
4476void Verifier::visitAllocaInst(AllocaInst &AI) {
4477 Type *Ty = AI.getAllocatedType();
4478 SmallPtrSet<Type*, 4> Visited;
4479 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4480 // Check if it's a target extension type that disallows being used on the
4481 // stack.
4483 "Alloca has illegal target extension type", &AI);
4485 "Alloca array size must have integer type", &AI);
4486 if (MaybeAlign A = AI.getAlign()) {
4487 Check(A->value() <= Value::MaximumAlignment,
4488 "huge alignment values are unsupported", &AI);
4489 }
4490
4491 if (AI.isSwiftError()) {
4492 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4494 "swifterror alloca must not be array allocation", &AI);
4495 verifySwiftErrorValue(&AI);
4496 }
4497
4498 if (TT.isAMDGPU()) {
4500 "alloca on amdgpu must be in addrspace(5)", &AI);
4501 }
4502
4503 visitInstruction(AI);
4504}
4505
4506void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4507 Type *ElTy = CXI.getOperand(1)->getType();
4508 Check(ElTy->isIntOrPtrTy(),
4509 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4510 checkAtomicMemAccessSize(ElTy, &CXI);
4511 visitInstruction(CXI);
4512}
4513
4514void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4516 "atomicrmw instructions cannot be unordered.", &RMWI);
4517 auto Op = RMWI.getOperation();
4518 Type *ElTy = RMWI.getOperand(1)->getType();
4519 if (Op == AtomicRMWInst::Xchg) {
4520 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4521 ElTy->isPointerTy(),
4522 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4523 " operand must have integer or floating point type!",
4524 &RMWI, ElTy);
4525 } else if (AtomicRMWInst::isFPOperation(Op)) {
4526 Check(ElTy->isFPOrFPVectorTy() && !isa<ScalableVectorType>(ElTy),
4527 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4528 " operand must have floating-point or fixed vector of floating-point "
4529 "type!",
4530 &RMWI, ElTy);
4531 } else {
4532 Check(ElTy->isIntegerTy(),
4533 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4534 " operand must have integer type!",
4535 &RMWI, ElTy);
4536 }
4537 checkAtomicMemAccessSize(ElTy, &RMWI);
4539 "Invalid binary operation!", &RMWI);
4540 visitInstruction(RMWI);
4541}
4542
4543void Verifier::visitFenceInst(FenceInst &FI) {
4544 const AtomicOrdering Ordering = FI.getOrdering();
4545 Check(Ordering == AtomicOrdering::Acquire ||
4546 Ordering == AtomicOrdering::Release ||
4547 Ordering == AtomicOrdering::AcquireRelease ||
4549 "fence instructions may only have acquire, release, acq_rel, or "
4550 "seq_cst ordering.",
4551 &FI);
4552 visitInstruction(FI);
4553}
4554
4555void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4557 EVI.getIndices()) == EVI.getType(),
4558 "Invalid ExtractValueInst operands!", &EVI);
4559
4560 visitInstruction(EVI);
4561}
4562
4563void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4565 IVI.getIndices()) ==
4566 IVI.getOperand(1)->getType(),
4567 "Invalid InsertValueInst operands!", &IVI);
4568
4569 visitInstruction(IVI);
4570}
4571
4572static Value *getParentPad(Value *EHPad) {
4573 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4574 return FPI->getParentPad();
4575
4576 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4577}
4578
4579void Verifier::visitEHPadPredecessors(Instruction &I) {
4580 assert(I.isEHPad());
4581
4582 BasicBlock *BB = I.getParent();
4583 Function *F = BB->getParent();
4584
4585 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4586
4587 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4588 // The landingpad instruction defines its parent as a landing pad block. The
4589 // landing pad block may be branched to only by the unwind edge of an
4590 // invoke.
4591 for (BasicBlock *PredBB : predecessors(BB)) {
4592 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4593 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4594 "Block containing LandingPadInst must be jumped to "
4595 "only by the unwind edge of an invoke.",
4596 LPI);
4597 }
4598 return;
4599 }
4600 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4601 if (!pred_empty(BB))
4602 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4603 "Block containg CatchPadInst must be jumped to "
4604 "only by its catchswitch.",
4605 CPI);
4606 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4607 "Catchswitch cannot unwind to one of its catchpads",
4608 CPI->getCatchSwitch(), CPI);
4609 return;
4610 }
4611
4612 // Verify that each pred has a legal terminator with a legal to/from EH
4613 // pad relationship.
4614 Instruction *ToPad = &I;
4615 Value *ToPadParent = getParentPad(ToPad);
4616 for (BasicBlock *PredBB : predecessors(BB)) {
4617 Instruction *TI = PredBB->getTerminator();
4618 Value *FromPad;
4619 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4620 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4621 "EH pad must be jumped to via an unwind edge", ToPad, II);
4622 auto *CalledFn =
4623 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4624 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4625 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4626 continue;
4627 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4628 FromPad = Bundle->Inputs[0];
4629 else
4630 FromPad = ConstantTokenNone::get(II->getContext());
4631 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4632 FromPad = CRI->getOperand(0);
4633 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4634 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4635 FromPad = CSI;
4636 } else {
4637 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4638 }
4639
4640 // The edge may exit from zero or more nested pads.
4642 for (;; FromPad = getParentPad(FromPad)) {
4643 Check(FromPad != ToPad,
4644 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4645 if (FromPad == ToPadParent) {
4646 // This is a legal unwind edge.
4647 break;
4648 }
4649 Check(!isa<ConstantTokenNone>(FromPad),
4650 "A single unwind edge may only enter one EH pad", TI);
4651 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4652 FromPad);
4653
4654 // This will be diagnosed on the corresponding instruction already. We
4655 // need the extra check here to make sure getParentPad() works.
4656 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4657 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4658 }
4659 }
4660}
4661
4662void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4663 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4664 // isn't a cleanup.
4665 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4666 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4667
4668 visitEHPadPredecessors(LPI);
4669
4670 if (!LandingPadResultTy)
4671 LandingPadResultTy = LPI.getType();
4672 else
4673 Check(LandingPadResultTy == LPI.getType(),
4674 "The landingpad instruction should have a consistent result type "
4675 "inside a function.",
4676 &LPI);
4677
4678 Function *F = LPI.getParent()->getParent();
4679 Check(F->hasPersonalityFn(),
4680 "LandingPadInst needs to be in a function with a personality.", &LPI);
4681
4682 // The landingpad instruction must be the first non-PHI instruction in the
4683 // block.
4684 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4685 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4686
4687 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4688 Constant *Clause = LPI.getClause(i);
4689 if (LPI.isCatch(i)) {
4690 Check(isa<PointerType>(Clause->getType()),
4691 "Catch operand does not have pointer type!", &LPI);
4692 } else {
4693 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4694 Check(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
4695 "Filter operand is not an array of constants!", &LPI);
4696 }
4697 }
4698
4699 visitInstruction(LPI);
4700}
4701
4702void Verifier::visitResumeInst(ResumeInst &RI) {
4704 "ResumeInst needs to be in a function with a personality.", &RI);
4705
4706 if (!LandingPadResultTy)
4707 LandingPadResultTy = RI.getValue()->getType();
4708 else
4709 Check(LandingPadResultTy == RI.getValue()->getType(),
4710 "The resume instruction should have a consistent result type "
4711 "inside a function.",
4712 &RI);
4713
4714 visitTerminator(RI);
4715}
4716
4717void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4718 BasicBlock *BB = CPI.getParent();
4719
4720 Function *F = BB->getParent();
4721 Check(F->hasPersonalityFn(),
4722 "CatchPadInst needs to be in a function with a personality.", &CPI);
4723
4724 Check(isa<CatchSwitchInst>(CPI.getParentPad()),
4725 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4726 CPI.getParentPad());
4727
4728 // The catchpad instruction must be the first non-PHI instruction in the
4729 // block.
4730 Check(&*BB->getFirstNonPHIIt() == &CPI,
4731 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4732
4733 visitEHPadPredecessors(CPI);
4735}
4736
4737void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4738 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4739 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4740 CatchReturn.getOperand(0));
4741
4742 visitTerminator(CatchReturn);
4743}
4744
4745void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4746 BasicBlock *BB = CPI.getParent();
4747
4748 Function *F = BB->getParent();
4749 Check(F->hasPersonalityFn(),
4750 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4751
4752 // The cleanuppad instruction must be the first non-PHI instruction in the
4753 // block.
4754 Check(&*BB->getFirstNonPHIIt() == &CPI,
4755 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4756
4757 auto *ParentPad = CPI.getParentPad();
4758 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4759 "CleanupPadInst has an invalid parent.", &CPI);
4760
4761 visitEHPadPredecessors(CPI);
4763}
4764
4765void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4766 User *FirstUser = nullptr;
4767 Value *FirstUnwindPad = nullptr;
4768 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4770
4771 while (!Worklist.empty()) {
4772 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4773 Check(Seen.insert(CurrentPad).second,
4774 "FuncletPadInst must not be nested within itself", CurrentPad);
4775 Value *UnresolvedAncestorPad = nullptr;
4776 for (User *U : CurrentPad->users()) {
4777 BasicBlock *UnwindDest;
4778 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4779 UnwindDest = CRI->getUnwindDest();
4780 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4781 // We allow catchswitch unwind to caller to nest
4782 // within an outer pad that unwinds somewhere else,
4783 // because catchswitch doesn't have a nounwind variant.
4784 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4785 if (CSI->unwindsToCaller())
4786 continue;
4787 UnwindDest = CSI->getUnwindDest();
4788 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4789 UnwindDest = II->getUnwindDest();
4790 } else if (isa<CallInst>(U)) {
4791 // Calls which don't unwind may be found inside funclet
4792 // pads that unwind somewhere else. We don't *require*
4793 // such calls to be annotated nounwind.
4794 continue;
4795 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4796 // The unwind dest for a cleanup can only be found by
4797 // recursive search. Add it to the worklist, and we'll
4798 // search for its first use that determines where it unwinds.
4799 Worklist.push_back(CPI);
4800 continue;
4801 } else {
4802 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4803 continue;
4804 }
4805
4806 Value *UnwindPad;
4807 bool ExitsFPI;
4808 if (UnwindDest) {
4809 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4810 if (!cast<Instruction>(UnwindPad)->isEHPad())
4811 continue;
4812 Value *UnwindParent = getParentPad(UnwindPad);
4813 // Ignore unwind edges that don't exit CurrentPad.
4814 if (UnwindParent == CurrentPad)
4815 continue;
4816 // Determine whether the original funclet pad is exited,
4817 // and if we are scanning nested pads determine how many
4818 // of them are exited so we can stop searching their
4819 // children.
4820 Value *ExitedPad = CurrentPad;
4821 ExitsFPI = false;
4822 do {
4823 if (ExitedPad == &FPI) {
4824 ExitsFPI = true;
4825 // Now we can resolve any ancestors of CurrentPad up to
4826 // FPI, but not including FPI since we need to make sure
4827 // to check all direct users of FPI for consistency.
4828 UnresolvedAncestorPad = &FPI;
4829 break;
4830 }
4831 Value *ExitedParent = getParentPad(ExitedPad);
4832 if (ExitedParent == UnwindParent) {
4833 // ExitedPad is the ancestor-most pad which this unwind
4834 // edge exits, so we can resolve up to it, meaning that
4835 // ExitedParent is the first ancestor still unresolved.
4836 UnresolvedAncestorPad = ExitedParent;
4837 break;
4838 }
4839 ExitedPad = ExitedParent;
4840 } while (!isa<ConstantTokenNone>(ExitedPad));
4841 } else {
4842 // Unwinding to caller exits all pads.
4843 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4844 ExitsFPI = true;
4845 UnresolvedAncestorPad = &FPI;
4846 }
4847
4848 if (ExitsFPI) {
4849 // This unwind edge exits FPI. Make sure it agrees with other
4850 // such edges.
4851 if (FirstUser) {
4852 Check(UnwindPad == FirstUnwindPad,
4853 "Unwind edges out of a funclet "
4854 "pad must have the same unwind "
4855 "dest",
4856 &FPI, U, FirstUser);
4857 } else {
4858 FirstUser = U;
4859 FirstUnwindPad = UnwindPad;
4860 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4861 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4862 getParentPad(UnwindPad) == getParentPad(&FPI))
4863 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4864 }
4865 }
4866 // Make sure we visit all uses of FPI, but for nested pads stop as
4867 // soon as we know where they unwind to.
4868 if (CurrentPad != &FPI)
4869 break;
4870 }
4871 if (UnresolvedAncestorPad) {
4872 if (CurrentPad == UnresolvedAncestorPad) {
4873 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4874 // we've found an unwind edge that exits it, because we need to verify
4875 // all direct uses of FPI.
4876 assert(CurrentPad == &FPI);
4877 continue;
4878 }
4879 // Pop off the worklist any nested pads that we've found an unwind
4880 // destination for. The pads on the worklist are the uncles,
4881 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4882 // for all ancestors of CurrentPad up to but not including
4883 // UnresolvedAncestorPad.
4884 Value *ResolvedPad = CurrentPad;
4885 while (!Worklist.empty()) {
4886 Value *UnclePad = Worklist.back();
4887 Value *AncestorPad = getParentPad(UnclePad);
4888 // Walk ResolvedPad up the ancestor list until we either find the
4889 // uncle's parent or the last resolved ancestor.
4890 while (ResolvedPad != AncestorPad) {
4891 Value *ResolvedParent = getParentPad(ResolvedPad);
4892 if (ResolvedParent == UnresolvedAncestorPad) {
4893 break;
4894 }
4895 ResolvedPad = ResolvedParent;
4896 }
4897 // If the resolved ancestor search didn't find the uncle's parent,
4898 // then the uncle is not yet resolved.
4899 if (ResolvedPad != AncestorPad)
4900 break;
4901 // This uncle is resolved, so pop it from the worklist.
4902 Worklist.pop_back();
4903 }
4904 }
4905 }
4906
4907 if (FirstUnwindPad) {
4908 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4909 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4910 Value *SwitchUnwindPad;
4911 if (SwitchUnwindDest)
4912 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
4913 else
4914 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4915 Check(SwitchUnwindPad == FirstUnwindPad,
4916 "Unwind edges out of a catch must have the same unwind dest as "
4917 "the parent catchswitch",
4918 &FPI, FirstUser, CatchSwitch);
4919 }
4920 }
4921
4922 visitInstruction(FPI);
4923}
4924
4925void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4926 BasicBlock *BB = CatchSwitch.getParent();
4927
4928 Function *F = BB->getParent();
4929 Check(F->hasPersonalityFn(),
4930 "CatchSwitchInst needs to be in a function with a personality.",
4931 &CatchSwitch);
4932
4933 // The catchswitch instruction must be the first non-PHI instruction in the
4934 // block.
4935 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
4936 "CatchSwitchInst not the first non-PHI instruction in the block.",
4937 &CatchSwitch);
4938
4939 auto *ParentPad = CatchSwitch.getParentPad();
4940 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4941 "CatchSwitchInst has an invalid parent.", ParentPad);
4942
4943 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4944 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
4945 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4946 "CatchSwitchInst must unwind to an EH block which is not a "
4947 "landingpad.",
4948 &CatchSwitch);
4949
4950 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4951 if (getParentPad(&*I) == ParentPad)
4952 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4953 }
4954
4955 Check(CatchSwitch.getNumHandlers() != 0,
4956 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4957
4958 for (BasicBlock *Handler : CatchSwitch.handlers()) {
4959 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
4960 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4961 }
4962
4963 visitEHPadPredecessors(CatchSwitch);
4964 visitTerminator(CatchSwitch);
4965}
4966
4967void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4968 Check(isa<CleanupPadInst>(CRI.getOperand(0)),
4969 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4970 CRI.getOperand(0));
4971
4972 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4973 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
4974 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4975 "CleanupReturnInst must unwind to an EH block which is not a "
4976 "landingpad.",
4977 &CRI);
4978 }
4979
4980 visitTerminator(CRI);
4981}
4982
4983void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4984 Instruction *Op = cast<Instruction>(I.getOperand(i));
4985 // If the we have an invalid invoke, don't try to compute the dominance.
4986 // We already reject it in the invoke specific checks and the dominance
4987 // computation doesn't handle multiple edges.
4988 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4989 if (II->getNormalDest() == II->getUnwindDest())
4990 return;
4991 }
4992
4993 // Quick check whether the def has already been encountered in the same block.
4994 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4995 // uses are defined to happen on the incoming edge, not at the instruction.
4996 //
4997 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4998 // wrapping an SSA value, assert that we've already encountered it. See
4999 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5000 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5001 return;
5002
5003 const Use &U = I.getOperandUse(i);
5004 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5005}
5006
5007void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5008 Check(I.getType()->isPointerTy(),
5009 "dereferenceable, dereferenceable_or_null "
5010 "apply only to pointer types",
5011 &I);
5012 Check((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
5013 "dereferenceable, dereferenceable_or_null apply only to load"
5014 " and inttoptr instructions, use attributes for calls or invokes",
5015 &I);
5016 Check(MD->getNumOperands() == 1,
5017 "dereferenceable, dereferenceable_or_null "
5018 "take one operand!",
5019 &I);
5020 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5021 Check(CI && CI->getType()->isIntegerTy(64),
5022 "dereferenceable, "
5023 "dereferenceable_or_null metadata value must be an i64!",
5024 &I);
5025}
5026
5027void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5028 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5029 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5030 &I);
5031 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5032}
5033
5034void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5035 auto GetBranchingTerminatorNumOperands = [&]() {
5036 unsigned ExpectedNumOperands = 0;
5037 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5038 ExpectedNumOperands = BI->getNumSuccessors();
5039 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5040 ExpectedNumOperands = SI->getNumSuccessors();
5041 else if (isa<CallInst>(&I))
5042 ExpectedNumOperands = 1;
5043 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5044 ExpectedNumOperands = IBI->getNumDestinations();
5045 else if (isa<SelectInst>(&I))
5046 ExpectedNumOperands = 2;
5047 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5048 ExpectedNumOperands = CI->getNumSuccessors();
5049 return ExpectedNumOperands;
5050 };
5051 Check(MD->getNumOperands() >= 1,
5052 "!prof annotations should have at least 1 operand", MD);
5053 // Check first operand.
5054 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5055 Check(isa<MDString>(MD->getOperand(0)),
5056 "expected string with name of the !prof annotation", MD);
5057 MDString *MDS = cast<MDString>(MD->getOperand(0));
5058 StringRef ProfName = MDS->getString();
5059
5061 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5062 "'unknown' !prof should only appear on instructions on which "
5063 "'branch_weights' would",
5064 MD);
5065 Check(MD->getNumOperands() == 1,
5066 "'unknown' !prof should have no additional operands", MD);
5067 return;
5068 }
5069
5070 Check(MD->getNumOperands() >= 2,
5071 "!prof annotations should have no less than 2 operands", MD);
5072
5073 // Check consistency of !prof branch_weights metadata.
5074 if (ProfName == MDProfLabels::BranchWeights) {
5075 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5076 if (isa<InvokeInst>(&I)) {
5077 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5078 "Wrong number of InvokeInst branch_weights operands", MD);
5079 } else {
5080 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5081 if (ExpectedNumOperands == 0)
5082 CheckFailed("!prof branch_weights are not allowed for this instruction",
5083 MD);
5084
5085 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5086 MD);
5087 }
5088 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5089 ++i) {
5090 auto &MDO = MD->getOperand(i);
5091 Check(MDO, "second operand should not be null", MD);
5092 Check(mdconst::dyn_extract<ConstantInt>(MDO),
5093 "!prof brunch_weights operand is not a const int");
5094 }
5095 } else if (ProfName == MDProfLabels::ValueProfile) {
5096 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5097 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5098 Check(KindInt, "VP !prof missing kind argument", MD);
5099
5100 auto Kind = KindInt->getZExtValue();
5101 Check(Kind >= InstrProfValueKind::IPVK_First &&
5102 Kind <= InstrProfValueKind::IPVK_Last,
5103 "Invalid VP !prof kind", MD);
5104 Check(MD->getNumOperands() % 2 == 1,
5105 "VP !prof should have an even number "
5106 "of arguments after 'VP'",
5107 MD);
5108 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5109 Kind == InstrProfValueKind::IPVK_MemOPSize)
5110 Check(isa<CallBase>(I),
5111 "VP !prof indirect call or memop size expected to be applied to "
5112 "CallBase instructions only",
5113 MD);
5114 } else {
5115 CheckFailed("expected either branch_weights or VP profile name", MD);
5116 }
5117}
5118
5119void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5120 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5121 // DIAssignID metadata must be attached to either an alloca or some form of
5122 // store/memory-writing instruction.
5123 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5124 // possible store intrinsics.
5125 bool ExpectedInstTy =
5126 isa<AllocaInst>(I) || isa<StoreInst>(I) || isa<IntrinsicInst>(I);
5127 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5128 I, MD);
5129 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5130 // only be found as DbgAssignIntrinsic operands.
5131 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5132 for (auto *User : AsValue->users()) {
5133 CheckDI(isa<DbgAssignIntrinsic>(User),
5134 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5135 MD, User);
5136 // All of the dbg.assign intrinsics should be in the same function as I.
5137 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5138 CheckDI(DAI->getFunction() == I.getFunction(),
5139 "dbg.assign not in same function as inst", DAI, &I);
5140 }
5141 }
5142 for (DbgVariableRecord *DVR :
5143 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5144 CheckDI(DVR->isDbgAssign(),
5145 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5146 CheckDI(DVR->getFunction() == I.getFunction(),
5147 "DVRAssign not in same function as inst", DVR, &I);
5148 }
5149}
5150
5151void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5153 "!mmra metadata attached to unexpected instruction kind", I, MD);
5154
5155 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5156 // list of tags such as !2 in the following example:
5157 // !0 = !{!"a", !"b"}
5158 // !1 = !{!"c", !"d"}
5159 // !2 = !{!0, !1}
5160 if (MMRAMetadata::isTagMD(MD))
5161 return;
5162
5163 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5164 for (const MDOperand &MDOp : MD->operands())
5165 Check(MMRAMetadata::isTagMD(MDOp.get()),
5166 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5167}
5168
5169void Verifier::visitCallStackMetadata(MDNode *MD) {
5170 // Call stack metadata should consist of a list of at least 1 constant int
5171 // (representing a hash of the location).
5172 Check(MD->getNumOperands() >= 1,
5173 "call stack metadata should have at least 1 operand", MD);
5174
5175 for (const auto &Op : MD->operands())
5176 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op),
5177 "call stack metadata operand should be constant integer", Op);
5178}
5179
5180void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5181 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5182 Check(MD->getNumOperands() >= 1,
5183 "!memprof annotations should have at least 1 metadata operand "
5184 "(MemInfoBlock)",
5185 MD);
5186
5187 // Check each MIB
5188 for (auto &MIBOp : MD->operands()) {
5189 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5190 // The first operand of an MIB should be the call stack metadata.
5191 // There rest of the operands should be MDString tags, and there should be
5192 // at least one.
5193 Check(MIB->getNumOperands() >= 2,
5194 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5195
5196 // Check call stack metadata (first operand).
5197 Check(MIB->getOperand(0) != nullptr,
5198 "!memprof MemInfoBlock first operand should not be null", MIB);
5199 Check(isa<MDNode>(MIB->getOperand(0)),
5200 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5201 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5202 visitCallStackMetadata(StackMD);
5203
5204 // The next set of 1 or more operands should be MDString.
5205 unsigned I = 1;
5206 for (; I < MIB->getNumOperands(); ++I) {
5207 if (!isa<MDString>(MIB->getOperand(I))) {
5208 Check(I > 1,
5209 "!memprof MemInfoBlock second operand should be an MDString",
5210 MIB);
5211 break;
5212 }
5213 }
5214
5215 // Any remaining should be MDNode that are pairs of integers
5216 for (; I < MIB->getNumOperands(); ++I) {
5217 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5218 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5219 MIB);
5220 Check(OpNode->getNumOperands() == 2,
5221 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5222 "operands",
5223 MIB);
5224 // Check that all of Op's operands are ConstantInt.
5225 Check(llvm::all_of(OpNode->operands(),
5226 [](const MDOperand &Op) {
5227 return mdconst::hasa<ConstantInt>(Op);
5228 }),
5229 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5230 "ConstantInt operands",
5231 MIB);
5232 }
5233 }
5234}
5235
5236void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5237 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5238 // Verify the partial callstack annotated from memprof profiles. This callsite
5239 // is a part of a profiled allocation callstack.
5240 visitCallStackMetadata(MD);
5241}
5242
5243static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5244 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5245 return isa<ConstantInt>(VAL->getValue());
5246 return false;
5247}
5248
5249void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5250 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5251 &I);
5252 for (Metadata *Op : MD->operands()) {
5253 Check(isa<MDNode>(Op),
5254 "The callee_type metadata must be a list of type metadata nodes", Op);
5255 auto *TypeMD = cast<MDNode>(Op);
5256 Check(TypeMD->getNumOperands() == 2,
5257 "Well-formed generalized type metadata must contain exactly two "
5258 "operands",
5259 Op);
5260 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5261 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5262 "The first operand of type metadata for functions must be zero", Op);
5263 Check(TypeMD->hasGeneralizedMDString(),
5264 "Only generalized type metadata can be part of the callee_type "
5265 "metadata list",
5266 Op);
5267 }
5268}
5269
5270void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5271 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5272 Check(Annotation->getNumOperands() >= 1,
5273 "annotation must have at least one operand");
5274 for (const MDOperand &Op : Annotation->operands()) {
5275 bool TupleOfStrings =
5276 isa<MDTuple>(Op.get()) &&
5277 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5278 return isa<MDString>(Annotation.get());
5279 });
5280 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5281 "operands must be a string or a tuple of strings");
5282 }
5283}
5284
5285void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5286 unsigned NumOps = MD->getNumOperands();
5287 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5288 MD);
5289 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5290 "first scope operand must be self-referential or string", MD);
5291 if (NumOps == 3)
5292 Check(isa<MDString>(MD->getOperand(2)),
5293 "third scope operand must be string (if used)", MD);
5294
5295 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5296 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5297
5298 unsigned NumDomainOps = Domain->getNumOperands();
5299 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5300 "domain must have one or two operands", Domain);
5301 Check(Domain->getOperand(0).get() == Domain ||
5302 isa<MDString>(Domain->getOperand(0)),
5303 "first domain operand must be self-referential or string", Domain);
5304 if (NumDomainOps == 2)
5305 Check(isa<MDString>(Domain->getOperand(1)),
5306 "second domain operand must be string (if used)", Domain);
5307}
5308
5309void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5310 for (const MDOperand &Op : MD->operands()) {
5311 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5312 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5313 visitAliasScopeMetadata(OpMD);
5314 }
5315}
5316
5317void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5318 auto IsValidAccessScope = [](const MDNode *MD) {
5319 return MD->getNumOperands() == 0 && MD->isDistinct();
5320 };
5321
5322 // It must be either an access scope itself...
5323 if (IsValidAccessScope(MD))
5324 return;
5325
5326 // ...or a list of access scopes.
5327 for (const MDOperand &Op : MD->operands()) {
5328 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5329 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5330 Check(IsValidAccessScope(OpMD),
5331 "Access scope list contains invalid access scope", MD);
5332 }
5333}
5334
5335/// verifyInstruction - Verify that an instruction is well formed.
5336///
5337void Verifier::visitInstruction(Instruction &I) {
5338 BasicBlock *BB = I.getParent();
5339 Check(BB, "Instruction not embedded in basic block!", &I);
5340
5341 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5342 for (User *U : I.users()) {
5343 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5344 "Only PHI nodes may reference their own value!", &I);
5345 }
5346 }
5347
5348 // Check that void typed values don't have names
5349 Check(!I.getType()->isVoidTy() || !I.hasName(),
5350 "Instruction has a name, but provides a void value!", &I);
5351
5352 // Check that the return value of the instruction is either void or a legal
5353 // value type.
5354 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5355 "Instruction returns a non-scalar type!", &I);
5356
5357 // Check that the instruction doesn't produce metadata. Calls are already
5358 // checked against the callee type.
5359 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5360 "Invalid use of metadata!", &I);
5361
5362 // Check that all uses of the instruction, if they are instructions
5363 // themselves, actually have parent basic blocks. If the use is not an
5364 // instruction, it is an error!
5365 for (Use &U : I.uses()) {
5366 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5367 Check(Used->getParent() != nullptr,
5368 "Instruction referencing"
5369 " instruction not embedded in a basic block!",
5370 &I, Used);
5371 else {
5372 CheckFailed("Use of instruction is not an instruction!", U);
5373 return;
5374 }
5375 }
5376
5377 // Get a pointer to the call base of the instruction if it is some form of
5378 // call.
5379 const CallBase *CBI = dyn_cast<CallBase>(&I);
5380
5381 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5382 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5383
5384 // Check to make sure that only first-class-values are operands to
5385 // instructions.
5386 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5387 Check(false, "Instruction operands must be first-class values!", &I);
5388 }
5389
5390 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5391 // This code checks whether the function is used as the operand of a
5392 // clang_arc_attachedcall operand bundle.
5393 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5394 int Idx) {
5395 return CBI && CBI->isOperandBundleOfType(
5397 };
5398
5399 // Check to make sure that the "address of" an intrinsic function is never
5400 // taken. Ignore cases where the address of the intrinsic function is used
5401 // as the argument of operand bundle "clang.arc.attachedcall" as those
5402 // cases are handled in verifyAttachedCallBundle.
5403 Check((!F->isIntrinsic() ||
5404 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5405 IsAttachedCallOperand(F, CBI, i)),
5406 "Cannot take the address of an intrinsic!", &I);
5407 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5408 F->getIntrinsicID() == Intrinsic::donothing ||
5409 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5410 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5411 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5412 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5413 F->getIntrinsicID() == Intrinsic::coro_resume ||
5414 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5415 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5416 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5417 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5418 F->getIntrinsicID() ==
5419 Intrinsic::experimental_patchpoint_void ||
5420 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5421 F->getIntrinsicID() == Intrinsic::fake_use ||
5422 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5423 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5424 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5425 IsAttachedCallOperand(F, CBI, i),
5426 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5427 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5428 "wasm.(re)throw",
5429 &I);
5430 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5431 &M, F, F->getParent());
5432 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5433 Check(OpBB->getParent() == BB->getParent(),
5434 "Referring to a basic block in another function!", &I);
5435 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5436 Check(OpArg->getParent() == BB->getParent(),
5437 "Referring to an argument in another function!", &I);
5438 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5439 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5440 &M, GV, GV->getParent());
5441 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5442 Check(OpInst->getFunction() == BB->getParent(),
5443 "Referring to an instruction in another function!", &I);
5444 verifyDominatesUse(I, i);
5445 } else if (isa<InlineAsm>(I.getOperand(i))) {
5446 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5447 "Cannot take the address of an inline asm!", &I);
5448 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5449 visitConstantExprsRecursively(CPA);
5450 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5451 if (CE->getType()->isPtrOrPtrVectorTy()) {
5452 // If we have a ConstantExpr pointer, we need to see if it came from an
5453 // illegal bitcast.
5454 visitConstantExprsRecursively(CE);
5455 }
5456 }
5457 }
5458
5459 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5460 Check(I.getType()->isFPOrFPVectorTy(),
5461 "fpmath requires a floating point result!", &I);
5462 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5463 if (ConstantFP *CFP0 =
5464 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
5465 const APFloat &Accuracy = CFP0->getValueAPF();
5466 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5467 "fpmath accuracy must have float type", &I);
5468 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5469 "fpmath accuracy not a positive number!", &I);
5470 } else {
5471 Check(false, "invalid fpmath accuracy!", &I);
5472 }
5473 }
5474
5475 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5476 Check(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
5477 "Ranges are only for loads, calls and invokes!", &I);
5478 visitRangeMetadata(I, Range, I.getType());
5479 }
5480
5481 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5482 Check(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<AtomicRMWInst>(I) ||
5483 isa<AtomicCmpXchgInst>(I) || isa<CallInst>(I),
5484 "noalias.addrspace are only for memory operations!", &I);
5485 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5486 }
5487
5488 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5489 Check(isa<LoadInst>(I) || isa<StoreInst>(I),
5490 "invariant.group metadata is only for loads and stores", &I);
5491 }
5492
5493 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5494 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5495 &I);
5496 Check(isa<LoadInst>(I),
5497 "nonnull applies only to load instructions, use attributes"
5498 " for calls or invokes",
5499 &I);
5500 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5501 }
5502
5503 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5504 visitDereferenceableMetadata(I, MD);
5505
5506 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5507 visitDereferenceableMetadata(I, MD);
5508
5509 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5510 visitNofreeMetadata(I, MD);
5511
5512 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5513 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
5514
5515 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5516 visitAliasScopeListMetadata(MD);
5517 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5518 visitAliasScopeListMetadata(MD);
5519
5520 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5521 visitAccessGroupMetadata(MD);
5522
5523 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5524 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5525 &I);
5526 Check(isa<LoadInst>(I),
5527 "align applies only to load instructions, "
5528 "use attributes for calls or invokes",
5529 &I);
5530 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5531 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5532 Check(CI && CI->getType()->isIntegerTy(64),
5533 "align metadata value must be an i64!", &I);
5534 uint64_t Align = CI->getZExtValue();
5535 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5536 &I);
5538 "alignment is larger that implementation defined limit", &I);
5539 }
5540
5541 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5542 visitProfMetadata(I, MD);
5543
5544 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5545 visitMemProfMetadata(I, MD);
5546
5547 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5548 visitCallsiteMetadata(I, MD);
5549
5550 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5551 visitCalleeTypeMetadata(I, MD);
5552
5553 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5554 visitDIAssignIDMetadata(I, MD);
5555
5556 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5557 visitMMRAMetadata(I, MMRA);
5558
5559 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5560 visitAnnotationMetadata(Annotation);
5561
5562 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5563 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5564 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5565
5566 if (auto *DL = dyn_cast<DILocation>(N)) {
5567 if (DL->getAtomGroup()) {
5568 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5569 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5570 "Instructions enabled",
5571 DL, DL->getScope()->getSubprogram());
5572 }
5573 }
5574 }
5575
5577 I.getAllMetadata(MDs);
5578 for (auto Attachment : MDs) {
5579 unsigned Kind = Attachment.first;
5580 auto AllowLocs =
5581 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5582 ? AreDebugLocsAllowed::Yes
5583 : AreDebugLocsAllowed::No;
5584 visitMDNode(*Attachment.second, AllowLocs);
5585 }
5586
5587 InstsInThisBlock.insert(&I);
5588}
5589
5590/// Allow intrinsics to be verified in different ways.
5591void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5592 Function *IF = Call.getCalledFunction();
5593 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5594 IF);
5595
5596 // Verify that the intrinsic prototype lines up with what the .td files
5597 // describe.
5598 FunctionType *IFTy = IF->getFunctionType();
5599 bool IsVarArg = IFTy->isVarArg();
5600
5604
5605 // Walk the descriptors to extract overloaded types.
5610 "Intrinsic has incorrect return type!", IF);
5612 "Intrinsic has incorrect argument type!", IF);
5613
5614 // Verify if the intrinsic call matches the vararg property.
5615 if (IsVarArg)
5617 "Intrinsic was not defined with variable arguments!", IF);
5618 else
5620 "Callsite was not defined with variable arguments!", IF);
5621
5622 // All descriptors should be absorbed by now.
5623 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5624
5625 // Now that we have the intrinsic ID and the actual argument types (and we
5626 // know they are legal for the intrinsic!) get the intrinsic name through the
5627 // usual means. This allows us to verify the mangling of argument types into
5628 // the name.
5629 const std::string ExpectedName =
5630 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5631 Check(ExpectedName == IF->getName(),
5632 "Intrinsic name not mangled correctly for type arguments! "
5633 "Should be: " +
5634 ExpectedName,
5635 IF);
5636
5637 // If the intrinsic takes MDNode arguments, verify that they are either global
5638 // or are local to *this* function.
5639 for (Value *V : Call.args()) {
5640 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5641 visitMetadataAsValue(*MD, Call.getCaller());
5642 if (auto *Const = dyn_cast<Constant>(V))
5643 Check(!Const->getType()->isX86_AMXTy(),
5644 "const x86_amx is not allowed in argument!");
5645 }
5646
5647 switch (ID) {
5648 default:
5649 break;
5650 case Intrinsic::assume: {
5651 for (auto &Elem : Call.bundle_op_infos()) {
5652 unsigned ArgCount = Elem.End - Elem.Begin;
5653 // Separate storage assumptions are special insofar as they're the only
5654 // operand bundles allowed on assumes that aren't parameter attributes.
5655 if (Elem.Tag->getKey() == "separate_storage") {
5656 Check(ArgCount == 2,
5657 "separate_storage assumptions should have 2 arguments", Call);
5658 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5659 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5660 "arguments to separate_storage assumptions should be pointers",
5661 Call);
5662 continue;
5663 }
5664 Check(Elem.Tag->getKey() == "ignore" ||
5665 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5666 "tags must be valid attribute names", Call);
5668 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5669 if (Kind == Attribute::Alignment) {
5670 Check(ArgCount <= 3 && ArgCount >= 2,
5671 "alignment assumptions should have 2 or 3 arguments", Call);
5672 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5673 "first argument should be a pointer", Call);
5674 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5675 "second argument should be an integer", Call);
5676 if (ArgCount == 3)
5677 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5678 "third argument should be an integer if present", Call);
5679 continue;
5680 }
5681 if (Kind == Attribute::Dereferenceable) {
5682 Check(ArgCount == 2,
5683 "dereferenceable assumptions should have 2 arguments", Call);
5684 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5685 "first argument should be a pointer", Call);
5686 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5687 "second argument should be an integer", Call);
5688 continue;
5689 }
5690 Check(ArgCount <= 2, "too many arguments", Call);
5691 if (Kind == Attribute::None)
5692 break;
5693 if (Attribute::isIntAttrKind(Kind)) {
5694 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5695 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5696 "the second argument should be a constant integral value", Call);
5697 } else if (Attribute::canUseAsParamAttr(Kind)) {
5698 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5699 } else if (Attribute::canUseAsFnAttr(Kind)) {
5700 Check((ArgCount) == 0, "this attribute has no argument", Call);
5701 }
5702 }
5703 break;
5704 }
5705 case Intrinsic::ucmp:
5706 case Intrinsic::scmp: {
5707 Type *SrcTy = Call.getOperand(0)->getType();
5708 Type *DestTy = Call.getType();
5709
5710 Check(DestTy->getScalarSizeInBits() >= 2,
5711 "result type must be at least 2 bits wide", Call);
5712
5713 bool IsDestTypeVector = DestTy->isVectorTy();
5714 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5715 "ucmp/scmp argument and result types must both be either vector or "
5716 "scalar types",
5717 Call);
5718 if (IsDestTypeVector) {
5719 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5720 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5721 Check(SrcVecLen == DestVecLen,
5722 "return type and arguments must have the same number of "
5723 "elements",
5724 Call);
5725 }
5726 break;
5727 }
5728 case Intrinsic::coro_id: {
5729 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5730 if (isa<ConstantPointerNull>(InfoArg))
5731 break;
5732 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5733 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5734 "info argument of llvm.coro.id must refer to an initialized "
5735 "constant");
5736 Constant *Init = GV->getInitializer();
5737 Check(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
5738 "info argument of llvm.coro.id must refer to either a struct or "
5739 "an array");
5740 break;
5741 }
5742 case Intrinsic::is_fpclass: {
5743 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5744 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5745 "unsupported bits for llvm.is.fpclass test mask");
5746 break;
5747 }
5748 case Intrinsic::fptrunc_round: {
5749 // Check the rounding mode
5750 Metadata *MD = nullptr;
5751 auto *MAV = dyn_cast<MetadataAsValue>(Call.getOperand(1));
5752 if (MAV)
5753 MD = MAV->getMetadata();
5754
5755 Check(MD != nullptr, "missing rounding mode argument", Call);
5756
5757 Check(isa<MDString>(MD),
5758 ("invalid value for llvm.fptrunc.round metadata operand"
5759 " (the operand should be a string)"),
5760 MD);
5761
5762 std::optional<RoundingMode> RoundMode =
5763 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5764 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5765 "unsupported rounding mode argument", Call);
5766 break;
5767 }
5768#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5769#include "llvm/IR/VPIntrinsics.def"
5770#undef BEGIN_REGISTER_VP_INTRINSIC
5771 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5772 break;
5773#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5774 case Intrinsic::INTRINSIC:
5775#include "llvm/IR/ConstrainedOps.def"
5776#undef INSTRUCTION
5777 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5778 break;
5779 case Intrinsic::dbg_declare: // llvm.dbg.declare
5780 case Intrinsic::dbg_value: // llvm.dbg.value
5781 case Intrinsic::dbg_assign: // llvm.dbg.assign
5782 case Intrinsic::dbg_label: // llvm.dbg.label
5783 // We no longer interpret debug intrinsics (the old variable-location
5784 // design). They're meaningless as far as LLVM is concerned we could make
5785 // it an error for them to appear, but it's possible we'll have users
5786 // converting back to intrinsics for the forseeable future (such as DXIL),
5787 // so tolerate their existance.
5788 break;
5789 case Intrinsic::memcpy:
5790 case Intrinsic::memcpy_inline:
5791 case Intrinsic::memmove:
5792 case Intrinsic::memset:
5793 case Intrinsic::memset_inline:
5794 break;
5795 case Intrinsic::experimental_memset_pattern: {
5796 const auto Memset = cast<MemSetPatternInst>(&Call);
5797 Check(Memset->getValue()->getType()->isSized(),
5798 "unsized types cannot be used as memset patterns", Call);
5799 break;
5800 }
5801 case Intrinsic::memcpy_element_unordered_atomic:
5802 case Intrinsic::memmove_element_unordered_atomic:
5803 case Intrinsic::memset_element_unordered_atomic: {
5804 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
5805
5806 ConstantInt *ElementSizeCI =
5807 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5808 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5809 Check(ElementSizeVal.isPowerOf2(),
5810 "element size of the element-wise atomic memory intrinsic "
5811 "must be a power of 2",
5812 Call);
5813
5814 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5815 return Alignment && ElementSizeVal.ule(Alignment->value());
5816 };
5817 Check(IsValidAlignment(AMI->getDestAlign()),
5818 "incorrect alignment of the destination argument", Call);
5819 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
5820 Check(IsValidAlignment(AMT->getSourceAlign()),
5821 "incorrect alignment of the source argument", Call);
5822 }
5823 break;
5824 }
5825 case Intrinsic::call_preallocated_setup: {
5826 auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5827 Check(NumArgs != nullptr,
5828 "llvm.call.preallocated.setup argument must be a constant");
5829 bool FoundCall = false;
5830 for (User *U : Call.users()) {
5831 auto *UseCall = dyn_cast<CallBase>(U);
5832 Check(UseCall != nullptr,
5833 "Uses of llvm.call.preallocated.setup must be calls");
5834 Intrinsic::ID IID = UseCall->getIntrinsicID();
5835 if (IID == Intrinsic::call_preallocated_arg) {
5836 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5837 Check(AllocArgIndex != nullptr,
5838 "llvm.call.preallocated.alloc arg index must be a constant");
5839 auto AllocArgIndexInt = AllocArgIndex->getValue();
5840 Check(AllocArgIndexInt.sge(0) &&
5841 AllocArgIndexInt.slt(NumArgs->getValue()),
5842 "llvm.call.preallocated.alloc arg index must be between 0 and "
5843 "corresponding "
5844 "llvm.call.preallocated.setup's argument count");
5845 } else if (IID == Intrinsic::call_preallocated_teardown) {
5846 // nothing to do
5847 } else {
5848 Check(!FoundCall, "Can have at most one call corresponding to a "
5849 "llvm.call.preallocated.setup");
5850 FoundCall = true;
5851 size_t NumPreallocatedArgs = 0;
5852 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5853 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5854 ++NumPreallocatedArgs;
5855 }
5856 }
5857 Check(NumPreallocatedArgs != 0,
5858 "cannot use preallocated intrinsics on a call without "
5859 "preallocated arguments");
5860 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5861 "llvm.call.preallocated.setup arg size must be equal to number "
5862 "of preallocated arguments "
5863 "at call site",
5864 Call, *UseCall);
5865 // getOperandBundle() cannot be called if more than one of the operand
5866 // bundle exists. There is already a check elsewhere for this, so skip
5867 // here if we see more than one.
5868 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5869 1) {
5870 return;
5871 }
5872 auto PreallocatedBundle =
5873 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5874 Check(PreallocatedBundle,
5875 "Use of llvm.call.preallocated.setup outside intrinsics "
5876 "must be in \"preallocated\" operand bundle");
5877 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5878 "preallocated bundle must have token from corresponding "
5879 "llvm.call.preallocated.setup");
5880 }
5881 }
5882 break;
5883 }
5884 case Intrinsic::call_preallocated_arg: {
5885 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5886 Check(Token &&
5887 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5888 "llvm.call.preallocated.arg token argument must be a "
5889 "llvm.call.preallocated.setup");
5890 Check(Call.hasFnAttr(Attribute::Preallocated),
5891 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5892 "call site attribute");
5893 break;
5894 }
5895 case Intrinsic::call_preallocated_teardown: {
5896 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5897 Check(Token &&
5898 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5899 "llvm.call.preallocated.teardown token argument must be a "
5900 "llvm.call.preallocated.setup");
5901 break;
5902 }
5903 case Intrinsic::gcroot:
5904 case Intrinsic::gcwrite:
5905 case Intrinsic::gcread:
5906 if (ID == Intrinsic::gcroot) {
5907 AllocaInst *AI =
5908 dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
5909 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5910 Check(isa<Constant>(Call.getArgOperand(1)),
5911 "llvm.gcroot parameter #2 must be a constant.", Call);
5912 if (!AI->getAllocatedType()->isPointerTy()) {
5913 Check(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
5914 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5915 "or argument #2 must be a non-null constant.",
5916 Call);
5917 }
5918 }
5919
5920 Check(Call.getParent()->getParent()->hasGC(),
5921 "Enclosing function does not use GC.", Call);
5922 break;
5923 case Intrinsic::init_trampoline:
5924 Check(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
5925 "llvm.init_trampoline parameter #2 must resolve to a function.",
5926 Call);
5927 break;
5928 case Intrinsic::prefetch:
5929 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5930 "rw argument to llvm.prefetch must be 0-1", Call);
5931 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5932 "locality argument to llvm.prefetch must be 0-3", Call);
5933 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
5934 "cache type argument to llvm.prefetch must be 0-1", Call);
5935 break;
5936 case Intrinsic::stackprotector:
5937 Check(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
5938 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5939 break;
5940 case Intrinsic::localescape: {
5941 BasicBlock *BB = Call.getParent();
5942 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
5943 Call);
5944 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5945 Call);
5946 for (Value *Arg : Call.args()) {
5947 if (isa<ConstantPointerNull>(Arg))
5948 continue; // Null values are allowed as placeholders.
5949 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5950 Check(AI && AI->isStaticAlloca(),
5951 "llvm.localescape only accepts static allocas", Call);
5952 }
5953 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
5954 SawFrameEscape = true;
5955 break;
5956 }
5957 case Intrinsic::localrecover: {
5958 Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
5959 Function *Fn = dyn_cast<Function>(FnArg);
5960 Check(Fn && !Fn->isDeclaration(),
5961 "llvm.localrecover first "
5962 "argument must be function defined in this module",
5963 Call);
5964 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
5965 auto &Entry = FrameEscapeInfo[Fn];
5966 Entry.second = unsigned(
5967 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
5968 break;
5969 }
5970
5971 case Intrinsic::experimental_gc_statepoint:
5972 if (auto *CI = dyn_cast<CallInst>(&Call))
5973 Check(!CI->isInlineAsm(),
5974 "gc.statepoint support for inline assembly unimplemented", CI);
5975 Check(Call.getParent()->getParent()->hasGC(),
5976 "Enclosing function does not use GC.", Call);
5977
5978 verifyStatepoint(Call);
5979 break;
5980 case Intrinsic::experimental_gc_result: {
5981 Check(Call.getParent()->getParent()->hasGC(),
5982 "Enclosing function does not use GC.", Call);
5983
5984 auto *Statepoint = Call.getArgOperand(0);
5985 if (isa<UndefValue>(Statepoint))
5986 break;
5987
5988 // Are we tied to a statepoint properly?
5989 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
5990 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
5991 Intrinsic::experimental_gc_statepoint,
5992 "gc.result operand #1 must be from a statepoint", Call,
5993 Call.getArgOperand(0));
5994
5995 // Check that result type matches wrapped callee.
5996 auto *TargetFuncType =
5997 cast<FunctionType>(StatepointCall->getParamElementType(2));
5998 Check(Call.getType() == TargetFuncType->getReturnType(),
5999 "gc.result result type does not match wrapped callee", Call);
6000 break;
6001 }
6002 case Intrinsic::experimental_gc_relocate: {
6003 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6004
6005 Check(isa<PointerType>(Call.getType()->getScalarType()),
6006 "gc.relocate must return a pointer or a vector of pointers", Call);
6007
6008 // Check that this relocate is correctly tied to the statepoint
6009
6010 // This is case for relocate on the unwinding path of an invoke statepoint
6011 if (LandingPadInst *LandingPad =
6012 dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
6013
6014 const BasicBlock *InvokeBB =
6015 LandingPad->getParent()->getUniquePredecessor();
6016
6017 // Landingpad relocates should have only one predecessor with invoke
6018 // statepoint terminator
6019 Check(InvokeBB, "safepoints should have unique landingpads",
6020 LandingPad->getParent());
6021 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6022 InvokeBB);
6023 Check(isa<GCStatepointInst>(InvokeBB->getTerminator()),
6024 "gc relocate should be linked to a statepoint", InvokeBB);
6025 } else {
6026 // In all other cases relocate should be tied to the statepoint directly.
6027 // This covers relocates on a normal return path of invoke statepoint and
6028 // relocates of a call statepoint.
6029 auto *Token = Call.getArgOperand(0);
6030 Check(isa<GCStatepointInst>(Token) || isa<UndefValue>(Token),
6031 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6032 }
6033
6034 // Verify rest of the relocate arguments.
6035 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6036
6037 // Both the base and derived must be piped through the safepoint.
6038 Value *Base = Call.getArgOperand(1);
6039 Check(isa<ConstantInt>(Base),
6040 "gc.relocate operand #2 must be integer offset", Call);
6041
6042 Value *Derived = Call.getArgOperand(2);
6043 Check(isa<ConstantInt>(Derived),
6044 "gc.relocate operand #3 must be integer offset", Call);
6045
6046 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6047 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6048
6049 // Check the bounds
6050 if (isa<UndefValue>(StatepointCall))
6051 break;
6052 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6053 .getOperandBundle(LLVMContext::OB_gc_live)) {
6054 Check(BaseIndex < Opt->Inputs.size(),
6055 "gc.relocate: statepoint base index out of bounds", Call);
6056 Check(DerivedIndex < Opt->Inputs.size(),
6057 "gc.relocate: statepoint derived index out of bounds", Call);
6058 }
6059
6060 // Relocated value must be either a pointer type or vector-of-pointer type,
6061 // but gc_relocate does not need to return the same pointer type as the
6062 // relocated pointer. It can be casted to the correct type later if it's
6063 // desired. However, they must have the same address space and 'vectorness'
6064 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6065 auto *ResultType = Call.getType();
6066 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6067 auto *BaseType = Relocate.getBasePtr()->getType();
6068
6069 Check(BaseType->isPtrOrPtrVectorTy(),
6070 "gc.relocate: relocated value must be a pointer", Call);
6071 Check(DerivedType->isPtrOrPtrVectorTy(),
6072 "gc.relocate: relocated value must be a pointer", Call);
6073
6074 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6075 "gc.relocate: vector relocates to vector and pointer to pointer",
6076 Call);
6077 Check(
6078 ResultType->getPointerAddressSpace() ==
6079 DerivedType->getPointerAddressSpace(),
6080 "gc.relocate: relocating a pointer shouldn't change its address space",
6081 Call);
6082
6083 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6084 Check(GC, "gc.relocate: calling function must have GCStrategy",
6085 Call.getFunction());
6086 if (GC) {
6087 auto isGCPtr = [&GC](Type *PTy) {
6088 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6089 };
6090 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6091 Check(isGCPtr(BaseType),
6092 "gc.relocate: relocated value must be a gc pointer", Call);
6093 Check(isGCPtr(DerivedType),
6094 "gc.relocate: relocated value must be a gc pointer", Call);
6095 }
6096 break;
6097 }
6098 case Intrinsic::experimental_patchpoint: {
6099 if (Call.getCallingConv() == CallingConv::AnyReg) {
6100 Check(Call.getType()->isSingleValueType(),
6101 "patchpoint: invalid return type used with anyregcc", Call);
6102 }
6103 break;
6104 }
6105 case Intrinsic::eh_exceptioncode:
6106 case Intrinsic::eh_exceptionpointer: {
6107 Check(isa<CatchPadInst>(Call.getArgOperand(0)),
6108 "eh.exceptionpointer argument must be a catchpad", Call);
6109 break;
6110 }
6111 case Intrinsic::get_active_lane_mask: {
6112 Check(Call.getType()->isVectorTy(),
6113 "get_active_lane_mask: must return a "
6114 "vector",
6115 Call);
6116 auto *ElemTy = Call.getType()->getScalarType();
6117 Check(ElemTy->isIntegerTy(1),
6118 "get_active_lane_mask: element type is not "
6119 "i1",
6120 Call);
6121 break;
6122 }
6123 case Intrinsic::experimental_get_vector_length: {
6124 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6125 Check(!VF->isNegative() && !VF->isZero(),
6126 "get_vector_length: VF must be positive", Call);
6127 break;
6128 }
6129 case Intrinsic::masked_load: {
6130 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6131 Call);
6132
6133 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
6134 Value *Mask = Call.getArgOperand(2);
6135 Value *PassThru = Call.getArgOperand(3);
6136 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6137 Call);
6138 Check(Alignment->getValue().isPowerOf2(),
6139 "masked_load: alignment must be a power of 2", Call);
6140 Check(PassThru->getType() == Call.getType(),
6141 "masked_load: pass through and return type must match", Call);
6142 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6143 cast<VectorType>(Call.getType())->getElementCount(),
6144 "masked_load: vector mask must be same length as return", Call);
6145 break;
6146 }
6147 case Intrinsic::masked_store: {
6148 Value *Val = Call.getArgOperand(0);
6149 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
6150 Value *Mask = Call.getArgOperand(3);
6151 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6152 Call);
6153 Check(Alignment->getValue().isPowerOf2(),
6154 "masked_store: alignment must be a power of 2", Call);
6155 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6156 cast<VectorType>(Val->getType())->getElementCount(),
6157 "masked_store: vector mask must be same length as value", Call);
6158 break;
6159 }
6160
6161 case Intrinsic::masked_gather: {
6162 const APInt &Alignment =
6163 cast<ConstantInt>(Call.getArgOperand(1))->getValue();
6164 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6165 "masked_gather: alignment must be 0 or a power of 2", Call);
6166 break;
6167 }
6168 case Intrinsic::masked_scatter: {
6169 const APInt &Alignment =
6170 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
6171 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6172 "masked_scatter: alignment must be 0 or a power of 2", Call);
6173 break;
6174 }
6175
6176 case Intrinsic::experimental_guard: {
6177 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6178 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
6179 "experimental_guard must have exactly one "
6180 "\"deopt\" operand bundle");
6181 break;
6182 }
6183
6184 case Intrinsic::experimental_deoptimize: {
6185 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6186 Call);
6187 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
6188 "experimental_deoptimize must have exactly one "
6189 "\"deopt\" operand bundle");
6190 Check(Call.getType() == Call.getFunction()->getReturnType(),
6191 "experimental_deoptimize return type must match caller return type");
6192
6193 if (isa<CallInst>(Call)) {
6194 auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
6195 Check(RI,
6196 "calls to experimental_deoptimize must be followed by a return");
6197
6198 if (!Call.getType()->isVoidTy() && RI)
6199 Check(RI->getReturnValue() == &Call,
6200 "calls to experimental_deoptimize must be followed by a return "
6201 "of the value computed by experimental_deoptimize");
6202 }
6203
6204 break;
6205 }
6206 case Intrinsic::vastart: {
6207 Check(Call.getFunction()->isVarArg(),
6208 "va_start called in a non-varargs function");
6209 break;
6210 }
6211 case Intrinsic::get_dynamic_area_offset: {
6212 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6213 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6214 IntTy->getBitWidth(),
6215 "get_dynamic_area_offset result type must be scalar integer matching "
6216 "alloca address space width",
6217 Call);
6218 break;
6219 }
6220 case Intrinsic::vector_reduce_and:
6221 case Intrinsic::vector_reduce_or:
6222 case Intrinsic::vector_reduce_xor:
6223 case Intrinsic::vector_reduce_add:
6224 case Intrinsic::vector_reduce_mul:
6225 case Intrinsic::vector_reduce_smax:
6226 case Intrinsic::vector_reduce_smin:
6227 case Intrinsic::vector_reduce_umax:
6228 case Intrinsic::vector_reduce_umin: {
6229 Type *ArgTy = Call.getArgOperand(0)->getType();
6230 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6231 "Intrinsic has incorrect argument type!");
6232 break;
6233 }
6234 case Intrinsic::vector_reduce_fmax:
6235 case Intrinsic::vector_reduce_fmin: {
6236 Type *ArgTy = Call.getArgOperand(0)->getType();
6237 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6238 "Intrinsic has incorrect argument type!");
6239 break;
6240 }
6241 case Intrinsic::vector_reduce_fadd:
6242 case Intrinsic::vector_reduce_fmul: {
6243 // Unlike the other reductions, the first argument is a start value. The
6244 // second argument is the vector to be reduced.
6245 Type *ArgTy = Call.getArgOperand(1)->getType();
6246 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6247 "Intrinsic has incorrect argument type!");
6248 break;
6249 }
6250 case Intrinsic::smul_fix:
6251 case Intrinsic::smul_fix_sat:
6252 case Intrinsic::umul_fix:
6253 case Intrinsic::umul_fix_sat:
6254 case Intrinsic::sdiv_fix:
6255 case Intrinsic::sdiv_fix_sat:
6256 case Intrinsic::udiv_fix:
6257 case Intrinsic::udiv_fix_sat: {
6258 Value *Op1 = Call.getArgOperand(0);
6259 Value *Op2 = Call.getArgOperand(1);
6261 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6262 "vector of ints");
6264 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6265 "vector of ints");
6266
6267 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6268 Check(Op3->getType()->isIntegerTy(),
6269 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6270 Check(Op3->getBitWidth() <= 32,
6271 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6272
6273 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6274 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6275 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6276 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6277 "the operands");
6278 } else {
6279 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6280 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6281 "to the width of the operands");
6282 }
6283 break;
6284 }
6285 case Intrinsic::lrint:
6286 case Intrinsic::llrint:
6287 case Intrinsic::lround:
6288 case Intrinsic::llround: {
6289 Type *ValTy = Call.getArgOperand(0)->getType();
6290 Type *ResultTy = Call.getType();
6291 auto *VTy = dyn_cast<VectorType>(ValTy);
6292 auto *RTy = dyn_cast<VectorType>(ResultTy);
6293 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6294 ExpectedName + ": argument must be floating-point or vector "
6295 "of floating-points, and result must be integer or "
6296 "vector of integers",
6297 &Call);
6298 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6299 ExpectedName + ": argument and result disagree on vector use", &Call);
6300 if (VTy) {
6301 Check(VTy->getElementCount() == RTy->getElementCount(),
6302 ExpectedName + ": argument must be same length as result", &Call);
6303 }
6304 break;
6305 }
6306 case Intrinsic::bswap: {
6307 Type *Ty = Call.getType();
6308 unsigned Size = Ty->getScalarSizeInBits();
6309 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6310 break;
6311 }
6312 case Intrinsic::invariant_start: {
6313 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6314 Check(InvariantSize &&
6315 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6316 "invariant_start parameter must be -1, 0 or a positive number",
6317 &Call);
6318 break;
6319 }
6320 case Intrinsic::matrix_multiply:
6321 case Intrinsic::matrix_transpose:
6322 case Intrinsic::matrix_column_major_load:
6323 case Intrinsic::matrix_column_major_store: {
6324 Function *IF = Call.getCalledFunction();
6325 ConstantInt *Stride = nullptr;
6326 ConstantInt *NumRows;
6327 ConstantInt *NumColumns;
6328 VectorType *ResultTy;
6329 Type *Op0ElemTy = nullptr;
6330 Type *Op1ElemTy = nullptr;
6331 switch (ID) {
6332 case Intrinsic::matrix_multiply: {
6333 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6334 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6335 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6336 Check(cast<FixedVectorType>(Call.getArgOperand(0)->getType())
6337 ->getNumElements() ==
6338 NumRows->getZExtValue() * N->getZExtValue(),
6339 "First argument of a matrix operation does not match specified "
6340 "shape!");
6341 Check(cast<FixedVectorType>(Call.getArgOperand(1)->getType())
6342 ->getNumElements() ==
6343 N->getZExtValue() * NumColumns->getZExtValue(),
6344 "Second argument of a matrix operation does not match specified "
6345 "shape!");
6346
6347 ResultTy = cast<VectorType>(Call.getType());
6348 Op0ElemTy =
6349 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6350 Op1ElemTy =
6351 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6352 break;
6353 }
6354 case Intrinsic::matrix_transpose:
6355 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6356 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6357 ResultTy = cast<VectorType>(Call.getType());
6358 Op0ElemTy =
6359 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6360 break;
6361 case Intrinsic::matrix_column_major_load: {
6362 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(1));
6363 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6364 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6365 ResultTy = cast<VectorType>(Call.getType());
6366 break;
6367 }
6368 case Intrinsic::matrix_column_major_store: {
6369 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(2));
6370 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6371 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6372 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6373 Op0ElemTy =
6374 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6375 break;
6376 }
6377 default:
6378 llvm_unreachable("unexpected intrinsic");
6379 }
6380
6381 Check(ResultTy->getElementType()->isIntegerTy() ||
6382 ResultTy->getElementType()->isFloatingPointTy(),
6383 "Result type must be an integer or floating-point type!", IF);
6384
6385 if (Op0ElemTy)
6386 Check(ResultTy->getElementType() == Op0ElemTy,
6387 "Vector element type mismatch of the result and first operand "
6388 "vector!",
6389 IF);
6390
6391 if (Op1ElemTy)
6392 Check(ResultTy->getElementType() == Op1ElemTy,
6393 "Vector element type mismatch of the result and second operand "
6394 "vector!",
6395 IF);
6396
6397 Check(cast<FixedVectorType>(ResultTy)->getNumElements() ==
6398 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6399 "Result of a matrix operation does not fit in the returned vector!");
6400
6401 if (Stride)
6402 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6403 "Stride must be greater or equal than the number of rows!", IF);
6404
6405 break;
6406 }
6407 case Intrinsic::vector_splice: {
6408 VectorType *VecTy = cast<VectorType>(Call.getType());
6409 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6410 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6411 if (Call.getParent() && Call.getParent()->getParent()) {
6412 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6413 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6414 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6415 }
6416 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6417 (Idx >= 0 && Idx < KnownMinNumElements),
6418 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6419 "known minimum number of elements in the vector. For scalable "
6420 "vectors the minimum number of elements is determined from "
6421 "vscale_range.",
6422 &Call);
6423 break;
6424 }
6425 case Intrinsic::stepvector: {
6426 VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
6427 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6428 VecTy->getScalarSizeInBits() >= 8,
6429 "stepvector only supported for vectors of integers "
6430 "with a bitwidth of at least 8.",
6431 &Call);
6432 break;
6433 }
6434 case Intrinsic::experimental_vector_match: {
6435 Value *Op1 = Call.getArgOperand(0);
6436 Value *Op2 = Call.getArgOperand(1);
6437 Value *Mask = Call.getArgOperand(2);
6438
6439 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6440 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6441 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6442
6443 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6444 Check(isa<FixedVectorType>(Op2Ty),
6445 "Second operand must be a fixed length vector.", &Call);
6446 Check(Op1Ty->getElementType()->isIntegerTy(),
6447 "First operand must be a vector of integers.", &Call);
6448 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6449 "First two operands must have the same element type.", &Call);
6450 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6451 "First operand and mask must have the same number of elements.",
6452 &Call);
6453 Check(MaskTy->getElementType()->isIntegerTy(1),
6454 "Mask must be a vector of i1's.", &Call);
6455 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6456 &Call);
6457 break;
6458 }
6459 case Intrinsic::vector_insert: {
6460 Value *Vec = Call.getArgOperand(0);
6461 Value *SubVec = Call.getArgOperand(1);
6462 Value *Idx = Call.getArgOperand(2);
6463 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6464
6465 VectorType *VecTy = cast<VectorType>(Vec->getType());
6466 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6467
6468 ElementCount VecEC = VecTy->getElementCount();
6469 ElementCount SubVecEC = SubVecTy->getElementCount();
6470 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6471 "vector_insert parameters must have the same element "
6472 "type.",
6473 &Call);
6474 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6475 "vector_insert index must be a constant multiple of "
6476 "the subvector's known minimum vector length.");
6477
6478 // If this insertion is not the 'mixed' case where a fixed vector is
6479 // inserted into a scalable vector, ensure that the insertion of the
6480 // subvector does not overrun the parent vector.
6481 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6482 Check(IdxN < VecEC.getKnownMinValue() &&
6483 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6484 "subvector operand of vector_insert would overrun the "
6485 "vector being inserted into.");
6486 }
6487 break;
6488 }
6489 case Intrinsic::vector_extract: {
6490 Value *Vec = Call.getArgOperand(0);
6491 Value *Idx = Call.getArgOperand(1);
6492 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6493
6494 VectorType *ResultTy = cast<VectorType>(Call.getType());
6495 VectorType *VecTy = cast<VectorType>(Vec->getType());
6496
6497 ElementCount VecEC = VecTy->getElementCount();
6498 ElementCount ResultEC = ResultTy->getElementCount();
6499
6500 Check(ResultTy->getElementType() == VecTy->getElementType(),
6501 "vector_extract result must have the same element "
6502 "type as the input vector.",
6503 &Call);
6504 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6505 "vector_extract index must be a constant multiple of "
6506 "the result type's known minimum vector length.");
6507
6508 // If this extraction is not the 'mixed' case where a fixed vector is
6509 // extracted from a scalable vector, ensure that the extraction does not
6510 // overrun the parent vector.
6511 if (VecEC.isScalable() == ResultEC.isScalable()) {
6512 Check(IdxN < VecEC.getKnownMinValue() &&
6513 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6514 "vector_extract would overrun.");
6515 }
6516 break;
6517 }
6518 case Intrinsic::experimental_vector_partial_reduce_add: {
6519 VectorType *AccTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6520 VectorType *VecTy = cast<VectorType>(Call.getArgOperand(1)->getType());
6521
6522 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6523 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6524
6525 Check((VecWidth % AccWidth) == 0,
6526 "Invalid vector widths for partial "
6527 "reduction. The width of the input vector "
6528 "must be a positive integer multiple of "
6529 "the width of the accumulator vector.");
6530 break;
6531 }
6532 case Intrinsic::experimental_noalias_scope_decl: {
6533 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6534 break;
6535 }
6536 case Intrinsic::preserve_array_access_index:
6537 case Intrinsic::preserve_struct_access_index:
6538 case Intrinsic::aarch64_ldaxr:
6539 case Intrinsic::aarch64_ldxr:
6540 case Intrinsic::arm_ldaex:
6541 case Intrinsic::arm_ldrex: {
6542 Type *ElemTy = Call.getParamElementType(0);
6543 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6544 &Call);
6545 break;
6546 }
6547 case Intrinsic::aarch64_stlxr:
6548 case Intrinsic::aarch64_stxr:
6549 case Intrinsic::arm_stlex:
6550 case Intrinsic::arm_strex: {
6551 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6552 Check(ElemTy,
6553 "Intrinsic requires elementtype attribute on second argument.",
6554 &Call);
6555 break;
6556 }
6557 case Intrinsic::aarch64_prefetch: {
6558 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6559 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6560 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6561 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6562 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6563 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6564 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6565 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6566 break;
6567 }
6568 case Intrinsic::callbr_landingpad: {
6569 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6570 Check(CBR, "intrinstic requires callbr operand", &Call);
6571 if (!CBR)
6572 break;
6573
6574 const BasicBlock *LandingPadBB = Call.getParent();
6575 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6576 if (!PredBB) {
6577 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6578 break;
6579 }
6580 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6581 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6582 &Call);
6583 break;
6584 }
6585 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6586 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6587 "block in indirect destination list",
6588 &Call);
6589 const Instruction &First = *LandingPadBB->begin();
6590 Check(&First == &Call, "No other instructions may proceed intrinsic",
6591 &Call);
6592 break;
6593 }
6594 case Intrinsic::amdgcn_cs_chain: {
6595 auto CallerCC = Call.getCaller()->getCallingConv();
6596 switch (CallerCC) {
6600 break;
6601 default:
6602 CheckFailed("Intrinsic can only be used from functions with the "
6603 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6604 "calling conventions",
6605 &Call);
6606 break;
6607 }
6608
6609 Check(Call.paramHasAttr(2, Attribute::InReg),
6610 "SGPR arguments must have the `inreg` attribute", &Call);
6611 Check(!Call.paramHasAttr(3, Attribute::InReg),
6612 "VGPR arguments must not have the `inreg` attribute", &Call);
6613
6614 auto *Next = Call.getNextNode();
6615 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6616 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6617 Intrinsic::amdgcn_unreachable;
6618 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6619 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6620 break;
6621 }
6622 case Intrinsic::amdgcn_init_exec_from_input: {
6623 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6624 Check(Arg && Arg->hasInRegAttr(),
6625 "only inreg arguments to the parent function are valid as inputs to "
6626 "this intrinsic",
6627 &Call);
6628 break;
6629 }
6630 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6631 auto CallerCC = Call.getCaller()->getCallingConv();
6632 switch (CallerCC) {
6635 break;
6636 default:
6637 CheckFailed("Intrinsic can only be used from functions with the "
6638 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6639 "calling conventions",
6640 &Call);
6641 break;
6642 }
6643
6644 unsigned InactiveIdx = 1;
6645 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6646 "Value for inactive lanes must not have the `inreg` attribute",
6647 &Call);
6648 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6649 "Value for inactive lanes must be a function argument", &Call);
6650 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6651 "Value for inactive lanes must be a VGPR function argument", &Call);
6652 break;
6653 }
6654 case Intrinsic::amdgcn_call_whole_wave: {
6655 auto F = dyn_cast<Function>(Call.getArgOperand(0));
6656 Check(F, "Indirect whole wave calls are not allowed", &Call);
6657
6658 CallingConv::ID CC = F->getCallingConv();
6660 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6661 &Call);
6662
6663 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6664
6665 Check(Call.arg_size() == F->arg_size(),
6666 "Call argument count must match callee argument count", &Call);
6667
6668 // The first argument of the call is the callee, and the first argument of
6669 // the callee is the active mask. The rest of the arguments must match.
6670 Check(F->arg_begin()->getType()->isIntegerTy(1),
6671 "Callee must have i1 as its first argument", &Call);
6672 for (auto [CallArg, FuncArg] :
6673 drop_begin(zip_equal(Call.args(), F->args()))) {
6674 Check(CallArg->getType() == FuncArg.getType(),
6675 "Argument types must match", &Call);
6676
6677 // Check that inreg attributes match between call site and function
6678 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6679 FuncArg.hasInRegAttr(),
6680 "Argument inreg attributes must match", &Call);
6681 }
6682 break;
6683 }
6684 case Intrinsic::amdgcn_s_prefetch_data: {
6685 Check(
6687 Call.getArgOperand(0)->getType()->getPointerAddressSpace()),
6688 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6689 break;
6690 }
6691 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6692 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6693 Value *Src0 = Call.getArgOperand(0);
6694 Value *Src1 = Call.getArgOperand(1);
6695
6696 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6697 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6698 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6699 Call.getArgOperand(3));
6700 Check(BLGP <= 4, "invalid value for blgp format", Call,
6701 Call.getArgOperand(4));
6702
6703 // AMDGPU::MFMAScaleFormats values
6704 auto getFormatNumRegs = [](unsigned FormatVal) {
6705 switch (FormatVal) {
6706 case 0:
6707 case 1:
6708 return 8u;
6709 case 2:
6710 case 3:
6711 return 6u;
6712 case 4:
6713 return 4u;
6714 default:
6715 llvm_unreachable("invalid format value");
6716 }
6717 };
6718
6719 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6720 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6721 return false;
6722 unsigned NumElts = Ty->getNumElements();
6723 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6724 };
6725
6726 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6727 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6728 Check(isValidSrcASrcBVector(Src0Ty),
6729 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6730 Check(isValidSrcASrcBVector(Src1Ty),
6731 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6732
6733 // Permit excess registers for the format.
6734 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6735 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6736 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6737 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6738 break;
6739 }
6740 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6741 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6742 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6743 Value *Src0 = Call.getArgOperand(1);
6744 Value *Src1 = Call.getArgOperand(3);
6745
6746 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6747 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6748 Check(FmtA <= 4, "invalid value for matrix format", Call,
6749 Call.getArgOperand(0));
6750 Check(FmtB <= 4, "invalid value for matrix format", Call,
6751 Call.getArgOperand(2));
6752
6753 // AMDGPU::MatrixFMT values
6754 auto getFormatNumRegs = [](unsigned FormatVal) {
6755 switch (FormatVal) {
6756 case 0:
6757 case 1:
6758 return 16u;
6759 case 2:
6760 case 3:
6761 return 12u;
6762 case 4:
6763 return 8u;
6764 default:
6765 llvm_unreachable("invalid format value");
6766 }
6767 };
6768
6769 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6770 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6771 return false;
6772 unsigned NumElts = Ty->getNumElements();
6773 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6774 };
6775
6776 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6777 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6778 Check(isValidSrcASrcBVector(Src0Ty),
6779 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6780 Check(isValidSrcASrcBVector(Src1Ty),
6781 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6782
6783 // Permit excess registers for the format.
6784 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6785 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6786 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6787 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6788 break;
6789 }
6790 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6791 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6792 Value *V = Call.getArgOperand(0);
6793 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6794 Check(RegCount % 8 == 0,
6795 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6796 break;
6797 }
6798 case Intrinsic::experimental_convergence_entry:
6799 case Intrinsic::experimental_convergence_anchor:
6800 break;
6801 case Intrinsic::experimental_convergence_loop:
6802 break;
6803 case Intrinsic::ptrmask: {
6804 Type *Ty0 = Call.getArgOperand(0)->getType();
6805 Type *Ty1 = Call.getArgOperand(1)->getType();
6807 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6808 "of pointers",
6809 &Call);
6810 Check(
6811 Ty0->isVectorTy() == Ty1->isVectorTy(),
6812 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6813 &Call);
6814 if (Ty0->isVectorTy())
6815 Check(cast<VectorType>(Ty0)->getElementCount() ==
6816 cast<VectorType>(Ty1)->getElementCount(),
6817 "llvm.ptrmask intrinsic arguments must have the same number of "
6818 "elements",
6819 &Call);
6820 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6821 "llvm.ptrmask intrinsic second argument bitwidth must match "
6822 "pointer index type size of first argument",
6823 &Call);
6824 break;
6825 }
6826 case Intrinsic::thread_pointer: {
6827 Check(Call.getType()->getPointerAddressSpace() ==
6828 DL.getDefaultGlobalsAddressSpace(),
6829 "llvm.thread.pointer intrinsic return type must be for the globals "
6830 "address space",
6831 &Call);
6832 break;
6833 }
6834 case Intrinsic::threadlocal_address: {
6835 const Value &Arg0 = *Call.getArgOperand(0);
6836 Check(isa<GlobalValue>(Arg0),
6837 "llvm.threadlocal.address first argument must be a GlobalValue");
6838 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6839 "llvm.threadlocal.address operand isThreadLocal() must be true");
6840 break;
6841 }
6842 case Intrinsic::lifetime_start:
6843 case Intrinsic::lifetime_end: {
6844 Value *Ptr = Call.getArgOperand(0);
6845 Check(isa<AllocaInst>(Ptr) || isa<PoisonValue>(Ptr),
6846 "llvm.lifetime.start/end can only be used on alloca or poison",
6847 &Call);
6848 break;
6849 }
6850 };
6851
6852 // Verify that there aren't any unmediated control transfers between funclets.
6854 Function *F = Call.getParent()->getParent();
6855 if (F->hasPersonalityFn() &&
6856 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6857 // Run EH funclet coloring on-demand and cache results for other intrinsic
6858 // calls in this function
6859 if (BlockEHFuncletColors.empty())
6860 BlockEHFuncletColors = colorEHFunclets(*F);
6861
6862 // Check for catch-/cleanup-pad in first funclet block
6863 bool InEHFunclet = false;
6864 BasicBlock *CallBB = Call.getParent();
6865 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6866 assert(CV.size() > 0 && "Uncolored block");
6867 for (BasicBlock *ColorFirstBB : CV)
6868 if (auto It = ColorFirstBB->getFirstNonPHIIt();
6869 It != ColorFirstBB->end())
6870 if (isa_and_nonnull<FuncletPadInst>(&*It))
6871 InEHFunclet = true;
6872
6873 // Check for funclet operand bundle
6874 bool HasToken = false;
6875 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6876 if (Call.getOperandBundleAt(I).getTagID() == LLVMContext::OB_funclet)
6877 HasToken = true;
6878
6879 // This would cause silent code truncation in WinEHPrepare
6880 if (InEHFunclet)
6881 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6882 }
6883 }
6884}
6885
6886/// Carefully grab the subprogram from a local scope.
6887///
6888/// This carefully grabs the subprogram from a local scope, avoiding the
6889/// built-in assertions that would typically fire.
6891 if (!LocalScope)
6892 return nullptr;
6893
6894 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6895 return SP;
6896
6897 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6898 return getSubprogram(LB->getRawScope());
6899
6900 // Just return null; broken scope chains are checked elsewhere.
6901 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6902 return nullptr;
6903}
6904
6905void Verifier::visit(DbgLabelRecord &DLR) {
6906 CheckDI(isa<DILabel>(DLR.getRawLabel()),
6907 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6908
6909 // Ignore broken !dbg attachments; they're checked elsewhere.
6910 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
6911 if (!isa<DILocation>(N))
6912 return;
6913
6914 BasicBlock *BB = DLR.getParent();
6915 Function *F = BB ? BB->getParent() : nullptr;
6916
6917 // The scopes for variables and !dbg attachments must agree.
6918 DILabel *Label = DLR.getLabel();
6919 DILocation *Loc = DLR.getDebugLoc();
6920 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
6921
6922 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6923 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6924 if (!LabelSP || !LocSP)
6925 return;
6926
6927 CheckDI(LabelSP == LocSP,
6928 "mismatched subprogram between #dbg_label label and !dbg attachment",
6929 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6930 Loc->getScope()->getSubprogram());
6931}
6932
6933void Verifier::visit(DbgVariableRecord &DVR) {
6934 BasicBlock *BB = DVR.getParent();
6935 Function *F = BB->getParent();
6936
6940 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
6941
6942 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
6943 // DIArgList, or an empty MDNode (which is a legacy representation for an
6944 // "undef" location).
6945 auto *MD = DVR.getRawLocation();
6946 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6947 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
6948 "invalid #dbg record address/value", &DVR, MD, BB, F);
6949 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
6950 visitValueAsMetadata(*VAM, F);
6951 if (DVR.isDbgDeclare()) {
6952 // Allow integers here to support inttoptr salvage.
6953 Type *Ty = VAM->getValue()->getType();
6954 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
6955 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
6956 F);
6957 }
6958 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
6959 visitDIArgList(*AL, F);
6960 }
6961
6962 CheckDI(isa_and_nonnull<DILocalVariable>(DVR.getRawVariable()),
6963 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
6964 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
6965
6966 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawExpression()),
6967 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
6968 F);
6969 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
6970
6971 if (DVR.isDbgAssign()) {
6972 CheckDI(isa_and_nonnull<DIAssignID>(DVR.getRawAssignID()),
6973 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
6974 F);
6975 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
6976 AreDebugLocsAllowed::No);
6977
6978 const auto *RawAddr = DVR.getRawAddress();
6979 // Similarly to the location above, the address for an assign
6980 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
6981 // represents an undef address.
6982 CheckDI(
6983 isa<ValueAsMetadata>(RawAddr) ||
6984 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6985 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
6986 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
6987 visitValueAsMetadata(*VAM, F);
6988
6989 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawAddressExpression()),
6990 "invalid #dbg_assign address expression", &DVR,
6991 DVR.getRawAddressExpression(), BB, F);
6992 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
6993
6994 // All of the linked instructions should be in the same function as DVR.
6995 for (Instruction *I : at::getAssignmentInsts(&DVR))
6996 CheckDI(DVR.getFunction() == I->getFunction(),
6997 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
6998 }
6999
7000 // This check is redundant with one in visitLocalVariable().
7001 DILocalVariable *Var = DVR.getVariable();
7002 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7003 BB, F);
7004
7005 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7006 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7007 &DVR, DLNode, BB, F);
7008 DILocation *Loc = DVR.getDebugLoc();
7009
7010 // The scopes for variables and !dbg attachments must agree.
7011 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7012 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7013 if (!VarSP || !LocSP)
7014 return; // Broken scope chains are checked elsewhere.
7015
7016 CheckDI(VarSP == LocSP,
7017 "mismatched subprogram between #dbg record variable and DILocation",
7018 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7019 Loc->getScope()->getSubprogram(), BB, F);
7020
7021 verifyFnArgs(DVR);
7022}
7023
7024void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7025 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7026 auto *RetTy = cast<VectorType>(VPCast->getType());
7027 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7028 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7029 "VP cast intrinsic first argument and result vector lengths must be "
7030 "equal",
7031 *VPCast);
7032
7033 switch (VPCast->getIntrinsicID()) {
7034 default:
7035 llvm_unreachable("Unknown VP cast intrinsic");
7036 case Intrinsic::vp_trunc:
7037 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7038 "llvm.vp.trunc intrinsic first argument and result element type "
7039 "must be integer",
7040 *VPCast);
7041 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7042 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7043 "larger than the bit size of the return type",
7044 *VPCast);
7045 break;
7046 case Intrinsic::vp_zext:
7047 case Intrinsic::vp_sext:
7048 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7049 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7050 "element type must be integer",
7051 *VPCast);
7052 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7053 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7054 "argument must be smaller than the bit size of the return type",
7055 *VPCast);
7056 break;
7057 case Intrinsic::vp_fptoui:
7058 case Intrinsic::vp_fptosi:
7059 case Intrinsic::vp_lrint:
7060 case Intrinsic::vp_llrint:
7061 Check(
7062 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7063 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7064 "type must be floating-point and result element type must be integer",
7065 *VPCast);
7066 break;
7067 case Intrinsic::vp_uitofp:
7068 case Intrinsic::vp_sitofp:
7069 Check(
7070 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7071 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7072 "type must be integer and result element type must be floating-point",
7073 *VPCast);
7074 break;
7075 case Intrinsic::vp_fptrunc:
7076 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7077 "llvm.vp.fptrunc intrinsic first argument and result element type "
7078 "must be floating-point",
7079 *VPCast);
7080 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7081 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7082 "larger than the bit size of the return type",
7083 *VPCast);
7084 break;
7085 case Intrinsic::vp_fpext:
7086 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7087 "llvm.vp.fpext intrinsic first argument and result element type "
7088 "must be floating-point",
7089 *VPCast);
7090 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7091 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7092 "smaller than the bit size of the return type",
7093 *VPCast);
7094 break;
7095 case Intrinsic::vp_ptrtoint:
7096 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7097 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7098 "pointer and result element type must be integer",
7099 *VPCast);
7100 break;
7101 case Intrinsic::vp_inttoptr:
7102 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7103 "llvm.vp.inttoptr intrinsic first argument element type must be "
7104 "integer and result element type must be pointer",
7105 *VPCast);
7106 break;
7107 }
7108 }
7109
7110 switch (VPI.getIntrinsicID()) {
7111 case Intrinsic::vp_fcmp: {
7112 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7114 "invalid predicate for VP FP comparison intrinsic", &VPI);
7115 break;
7116 }
7117 case Intrinsic::vp_icmp: {
7118 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7120 "invalid predicate for VP integer comparison intrinsic", &VPI);
7121 break;
7122 }
7123 case Intrinsic::vp_is_fpclass: {
7124 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7125 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7126 "unsupported bits for llvm.vp.is.fpclass test mask");
7127 break;
7128 }
7129 case Intrinsic::experimental_vp_splice: {
7130 VectorType *VecTy = cast<VectorType>(VPI.getType());
7131 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7132 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7133 if (VPI.getParent() && VPI.getParent()->getParent()) {
7134 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7135 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7136 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7137 }
7138 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7139 (Idx >= 0 && Idx < KnownMinNumElements),
7140 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7141 "known minimum number of elements in the vector. For scalable "
7142 "vectors the minimum number of elements is determined from "
7143 "vscale_range.",
7144 &VPI);
7145 break;
7146 }
7147 }
7148}
7149
7150void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7151 unsigned NumOperands = FPI.getNonMetadataArgCount();
7152 bool HasRoundingMD =
7154
7155 // Add the expected number of metadata operands.
7156 NumOperands += (1 + HasRoundingMD);
7157
7158 // Compare intrinsics carry an extra predicate metadata operand.
7159 if (isa<ConstrainedFPCmpIntrinsic>(FPI))
7160 NumOperands += 1;
7161 Check((FPI.arg_size() == NumOperands),
7162 "invalid arguments for constrained FP intrinsic", &FPI);
7163
7164 switch (FPI.getIntrinsicID()) {
7165 case Intrinsic::experimental_constrained_lrint:
7166 case Intrinsic::experimental_constrained_llrint: {
7167 Type *ValTy = FPI.getArgOperand(0)->getType();
7168 Type *ResultTy = FPI.getType();
7169 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7170 "Intrinsic does not support vectors", &FPI);
7171 break;
7172 }
7173
7174 case Intrinsic::experimental_constrained_lround:
7175 case Intrinsic::experimental_constrained_llround: {
7176 Type *ValTy = FPI.getArgOperand(0)->getType();
7177 Type *ResultTy = FPI.getType();
7178 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7179 "Intrinsic does not support vectors", &FPI);
7180 break;
7181 }
7182
7183 case Intrinsic::experimental_constrained_fcmp:
7184 case Intrinsic::experimental_constrained_fcmps: {
7185 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7187 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7188 break;
7189 }
7190
7191 case Intrinsic::experimental_constrained_fptosi:
7192 case Intrinsic::experimental_constrained_fptoui: {
7193 Value *Operand = FPI.getArgOperand(0);
7194 ElementCount SrcEC;
7195 Check(Operand->getType()->isFPOrFPVectorTy(),
7196 "Intrinsic first argument must be floating point", &FPI);
7197 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7198 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7199 }
7200
7201 Operand = &FPI;
7202 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7203 "Intrinsic first argument and result disagree on vector use", &FPI);
7204 Check(Operand->getType()->isIntOrIntVectorTy(),
7205 "Intrinsic result must be an integer", &FPI);
7206 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7207 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7208 "Intrinsic first argument and result vector lengths must be equal",
7209 &FPI);
7210 }
7211 break;
7212 }
7213
7214 case Intrinsic::experimental_constrained_sitofp:
7215 case Intrinsic::experimental_constrained_uitofp: {
7216 Value *Operand = FPI.getArgOperand(0);
7217 ElementCount SrcEC;
7218 Check(Operand->getType()->isIntOrIntVectorTy(),
7219 "Intrinsic first argument must be integer", &FPI);
7220 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7221 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7222 }
7223
7224 Operand = &FPI;
7225 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7226 "Intrinsic first argument and result disagree on vector use", &FPI);
7227 Check(Operand->getType()->isFPOrFPVectorTy(),
7228 "Intrinsic result must be a floating point", &FPI);
7229 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7230 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7231 "Intrinsic first argument and result vector lengths must be equal",
7232 &FPI);
7233 }
7234 break;
7235 }
7236
7237 case Intrinsic::experimental_constrained_fptrunc:
7238 case Intrinsic::experimental_constrained_fpext: {
7239 Value *Operand = FPI.getArgOperand(0);
7240 Type *OperandTy = Operand->getType();
7241 Value *Result = &FPI;
7242 Type *ResultTy = Result->getType();
7243 Check(OperandTy->isFPOrFPVectorTy(),
7244 "Intrinsic first argument must be FP or FP vector", &FPI);
7245 Check(ResultTy->isFPOrFPVectorTy(),
7246 "Intrinsic result must be FP or FP vector", &FPI);
7247 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7248 "Intrinsic first argument and result disagree on vector use", &FPI);
7249 if (OperandTy->isVectorTy()) {
7250 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7251 cast<VectorType>(ResultTy)->getElementCount(),
7252 "Intrinsic first argument and result vector lengths must be equal",
7253 &FPI);
7254 }
7255 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7256 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7257 "Intrinsic first argument's type must be larger than result type",
7258 &FPI);
7259 } else {
7260 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7261 "Intrinsic first argument's type must be smaller than result type",
7262 &FPI);
7263 }
7264 break;
7265 }
7266
7267 default:
7268 break;
7269 }
7270
7271 // If a non-metadata argument is passed in a metadata slot then the
7272 // error will be caught earlier when the incorrect argument doesn't
7273 // match the specification in the intrinsic call table. Thus, no
7274 // argument type check is needed here.
7275
7276 Check(FPI.getExceptionBehavior().has_value(),
7277 "invalid exception behavior argument", &FPI);
7278 if (HasRoundingMD) {
7279 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7280 &FPI);
7281 }
7282}
7283
7284void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7285 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7286 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7287
7288 // We don't know whether this intrinsic verified correctly.
7289 if (!V || !E || !E->isValid())
7290 return;
7291
7292 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7293 auto Fragment = E->getFragmentInfo();
7294 if (!Fragment)
7295 return;
7296
7297 // The frontend helps out GDB by emitting the members of local anonymous
7298 // unions as artificial local variables with shared storage. When SROA splits
7299 // the storage for artificial local variables that are smaller than the entire
7300 // union, the overhang piece will be outside of the allotted space for the
7301 // variable and this check fails.
7302 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7303 if (V->isArtificial())
7304 return;
7305
7306 verifyFragmentExpression(*V, *Fragment, &DVR);
7307}
7308
7309template <typename ValueOrMetadata>
7310void Verifier::verifyFragmentExpression(const DIVariable &V,
7312 ValueOrMetadata *Desc) {
7313 // If there's no size, the type is broken, but that should be checked
7314 // elsewhere.
7315 auto VarSize = V.getSizeInBits();
7316 if (!VarSize)
7317 return;
7318
7319 unsigned FragSize = Fragment.SizeInBits;
7320 unsigned FragOffset = Fragment.OffsetInBits;
7321 CheckDI(FragSize + FragOffset <= *VarSize,
7322 "fragment is larger than or outside of variable", Desc, &V);
7323 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7324}
7325
7326void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7327 // This function does not take the scope of noninlined function arguments into
7328 // account. Don't run it if current function is nodebug, because it may
7329 // contain inlined debug intrinsics.
7330 if (!HasDebugInfo)
7331 return;
7332
7333 // For performance reasons only check non-inlined ones.
7334 if (DVR.getDebugLoc()->getInlinedAt())
7335 return;
7336
7337 DILocalVariable *Var = DVR.getVariable();
7338 CheckDI(Var, "#dbg record without variable");
7339
7340 unsigned ArgNo = Var->getArg();
7341 if (!ArgNo)
7342 return;
7343
7344 // Verify there are no duplicate function argument debug info entries.
7345 // These will cause hard-to-debug assertions in the DWARF backend.
7346 if (DebugFnArgs.size() < ArgNo)
7347 DebugFnArgs.resize(ArgNo, nullptr);
7348
7349 auto *Prev = DebugFnArgs[ArgNo - 1];
7350 DebugFnArgs[ArgNo - 1] = Var;
7351 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7352 Prev, Var);
7353}
7354
7355void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7356 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7357
7358 // We don't know whether this intrinsic verified correctly.
7359 if (!E || !E->isValid())
7360 return;
7361
7362 if (isa<ValueAsMetadata>(DVR.getRawLocation())) {
7363 Value *VarValue = DVR.getVariableLocationOp(0);
7364 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7365 return;
7366 // We allow EntryValues for swift async arguments, as they have an
7367 // ABI-guarantee to be turned into a specific register.
7368 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7369 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7370 return;
7371 }
7372
7373 CheckDI(!E->isEntryValue(),
7374 "Entry values are only allowed in MIR unless they target a "
7375 "swiftasync Argument",
7376 &DVR);
7377}
7378
7379void Verifier::verifyCompileUnits() {
7380 // When more than one Module is imported into the same context, such as during
7381 // an LTO build before linking the modules, ODR type uniquing may cause types
7382 // to point to a different CU. This check does not make sense in this case.
7383 if (M.getContext().isODRUniquingDebugTypes())
7384 return;
7385 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7387 if (CUs)
7388 Listed.insert_range(CUs->operands());
7389 for (const auto *CU : CUVisited)
7390 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7391 CUVisited.clear();
7392}
7393
7394void Verifier::verifyDeoptimizeCallingConvs() {
7395 if (DeoptimizeDeclarations.empty())
7396 return;
7397
7398 const Function *First = DeoptimizeDeclarations[0];
7399 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7400 Check(First->getCallingConv() == F->getCallingConv(),
7401 "All llvm.experimental.deoptimize declarations must have the same "
7402 "calling convention",
7403 First, F);
7404 }
7405}
7406
7407void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7408 const OperandBundleUse &BU) {
7409 FunctionType *FTy = Call.getFunctionType();
7410
7411 Check((FTy->getReturnType()->isPointerTy() ||
7412 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7413 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7414 "function returning a pointer or a non-returning function that has a "
7415 "void return type",
7416 Call);
7417
7418 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7419 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7420 "an argument",
7421 Call);
7422
7423 auto *Fn = cast<Function>(BU.Inputs.front());
7424 Intrinsic::ID IID = Fn->getIntrinsicID();
7425
7426 if (IID) {
7427 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7428 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7429 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7430 "invalid function argument", Call);
7431 } else {
7432 StringRef FnName = Fn->getName();
7433 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7434 FnName == "objc_claimAutoreleasedReturnValue" ||
7435 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7436 "invalid function argument", Call);
7437 }
7438}
7439
7440void Verifier::verifyNoAliasScopeDecl() {
7441 if (NoAliasScopeDecls.empty())
7442 return;
7443
7444 // only a single scope must be declared at a time.
7445 for (auto *II : NoAliasScopeDecls) {
7446 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7447 "Not a llvm.experimental.noalias.scope.decl ?");
7448 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7450 Check(ScopeListMV != nullptr,
7451 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7452 "argument",
7453 II);
7454
7455 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7456 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7457 Check(ScopeListMD->getNumOperands() == 1,
7458 "!id.scope.list must point to a list with a single scope", II);
7459 visitAliasScopeListMetadata(ScopeListMD);
7460 }
7461
7462 // Only check the domination rule when requested. Once all passes have been
7463 // adapted this option can go away.
7465 return;
7466
7467 // Now sort the intrinsics based on the scope MDNode so that declarations of
7468 // the same scopes are next to each other.
7469 auto GetScope = [](IntrinsicInst *II) {
7470 const auto *ScopeListMV = cast<MetadataAsValue>(
7472 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7473 };
7474
7475 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7476 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7477 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7478 return GetScope(Lhs) < GetScope(Rhs);
7479 };
7480
7481 llvm::sort(NoAliasScopeDecls, Compare);
7482
7483 // Go over the intrinsics and check that for the same scope, they are not
7484 // dominating each other.
7485 auto ItCurrent = NoAliasScopeDecls.begin();
7486 while (ItCurrent != NoAliasScopeDecls.end()) {
7487 auto CurScope = GetScope(*ItCurrent);
7488 auto ItNext = ItCurrent;
7489 do {
7490 ++ItNext;
7491 } while (ItNext != NoAliasScopeDecls.end() &&
7492 GetScope(*ItNext) == CurScope);
7493
7494 // [ItCurrent, ItNext) represents the declarations for the same scope.
7495 // Ensure they are not dominating each other.. but only if it is not too
7496 // expensive.
7497 if (ItNext - ItCurrent < 32)
7498 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7499 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7500 if (I != J)
7501 Check(!DT.dominates(I, J),
7502 "llvm.experimental.noalias.scope.decl dominates another one "
7503 "with the same scope",
7504 I);
7505 ItCurrent = ItNext;
7506 }
7507}
7508
7509//===----------------------------------------------------------------------===//
7510// Implement the public interfaces to this file...
7511//===----------------------------------------------------------------------===//
7512
7514 Function &F = const_cast<Function &>(f);
7515
7516 // Don't use a raw_null_ostream. Printing IR is expensive.
7517 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7518
7519 // Note that this function's return value is inverted from what you would
7520 // expect of a function called "verify".
7521 return !V.verify(F);
7522}
7523
7525 bool *BrokenDebugInfo) {
7526 // Don't use a raw_null_ostream. Printing IR is expensive.
7527 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7528
7529 bool Broken = false;
7530 for (const Function &F : M)
7531 Broken |= !V.verify(F);
7532
7533 Broken |= !V.verify();
7534 if (BrokenDebugInfo)
7535 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7536 // Note that this function's return value is inverted from what you would
7537 // expect of a function called "verify".
7538 return Broken;
7539}
7540
7541namespace {
7542
7543struct VerifierLegacyPass : public FunctionPass {
7544 static char ID;
7545
7546 std::unique_ptr<Verifier> V;
7547 bool FatalErrors = true;
7548
7549 VerifierLegacyPass() : FunctionPass(ID) {
7551 }
7552 explicit VerifierLegacyPass(bool FatalErrors)
7553 : FunctionPass(ID),
7554 FatalErrors(FatalErrors) {
7556 }
7557
7558 bool doInitialization(Module &M) override {
7559 V = std::make_unique<Verifier>(
7560 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7561 return false;
7562 }
7563
7564 bool runOnFunction(Function &F) override {
7565 if (!V->verify(F) && FatalErrors) {
7566 errs() << "in function " << F.getName() << '\n';
7567 report_fatal_error("Broken function found, compilation aborted!");
7568 }
7569 return false;
7570 }
7571
7572 bool doFinalization(Module &M) override {
7573 bool HasErrors = false;
7574 for (Function &F : M)
7575 if (F.isDeclaration())
7576 HasErrors |= !V->verify(F);
7577
7578 HasErrors |= !V->verify();
7579 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7580 report_fatal_error("Broken module found, compilation aborted!");
7581 return false;
7582 }
7583
7584 void getAnalysisUsage(AnalysisUsage &AU) const override {
7585 AU.setPreservesAll();
7586 }
7587};
7588
7589} // end anonymous namespace
7590
7591/// Helper to issue failure from the TBAA verification
7592template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7593 if (Diagnostic)
7594 return Diagnostic->CheckFailed(Args...);
7595}
7596
7597#define CheckTBAA(C, ...) \
7598 do { \
7599 if (!(C)) { \
7600 CheckFailed(__VA_ARGS__); \
7601 return false; \
7602 } \
7603 } while (false)
7604
7605/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7606/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7607/// struct-type node describing an aggregate data structure (like a struct).
7608TBAAVerifier::TBAABaseNodeSummary
7609TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
7610 bool IsNewFormat) {
7611 if (BaseNode->getNumOperands() < 2) {
7612 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
7613 return {true, ~0u};
7614 }
7615
7616 auto Itr = TBAABaseNodes.find(BaseNode);
7617 if (Itr != TBAABaseNodes.end())
7618 return Itr->second;
7619
7620 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7621 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7622 (void)InsertResult;
7623 assert(InsertResult.second && "We just checked!");
7624 return Result;
7625}
7626
7627TBAAVerifier::TBAABaseNodeSummary
7628TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
7629 bool IsNewFormat) {
7630 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7631
7632 if (BaseNode->getNumOperands() == 2) {
7633 // Scalar nodes can only be accessed at offset 0.
7634 return isValidScalarTBAANode(BaseNode)
7635 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7636 : InvalidNode;
7637 }
7638
7639 if (IsNewFormat) {
7640 if (BaseNode->getNumOperands() % 3 != 0) {
7641 CheckFailed("Access tag nodes must have the number of operands that is a "
7642 "multiple of 3!", BaseNode);
7643 return InvalidNode;
7644 }
7645 } else {
7646 if (BaseNode->getNumOperands() % 2 != 1) {
7647 CheckFailed("Struct tag nodes must have an odd number of operands!",
7648 BaseNode);
7649 return InvalidNode;
7650 }
7651 }
7652
7653 // Check the type size field.
7654 if (IsNewFormat) {
7655 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7656 BaseNode->getOperand(1));
7657 if (!TypeSizeNode) {
7658 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
7659 return InvalidNode;
7660 }
7661 }
7662
7663 // Check the type name field. In the new format it can be anything.
7664 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7665 CheckFailed("Struct tag nodes have a string as their first operand",
7666 BaseNode);
7667 return InvalidNode;
7668 }
7669
7670 bool Failed = false;
7671
7672 std::optional<APInt> PrevOffset;
7673 unsigned BitWidth = ~0u;
7674
7675 // We've already checked that BaseNode is not a degenerate root node with one
7676 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7677 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7678 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7679 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7680 Idx += NumOpsPerField) {
7681 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7682 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7683 if (!isa<MDNode>(FieldTy)) {
7684 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
7685 Failed = true;
7686 continue;
7687 }
7688
7689 auto *OffsetEntryCI =
7690 mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
7691 if (!OffsetEntryCI) {
7692 CheckFailed("Offset entries must be constants!", &I, BaseNode);
7693 Failed = true;
7694 continue;
7695 }
7696
7697 if (BitWidth == ~0u)
7698 BitWidth = OffsetEntryCI->getBitWidth();
7699
7700 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7701 CheckFailed(
7702 "Bitwidth between the offsets and struct type entries must match", &I,
7703 BaseNode);
7704 Failed = true;
7705 continue;
7706 }
7707
7708 // NB! As far as I can tell, we generate a non-strictly increasing offset
7709 // sequence only from structs that have zero size bit fields. When
7710 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7711 // pick the field lexically the latest in struct type metadata node. This
7712 // mirrors the actual behavior of the alias analysis implementation.
7713 bool IsAscending =
7714 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7715
7716 if (!IsAscending) {
7717 CheckFailed("Offsets must be increasing!", &I, BaseNode);
7718 Failed = true;
7719 }
7720
7721 PrevOffset = OffsetEntryCI->getValue();
7722
7723 if (IsNewFormat) {
7724 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7725 BaseNode->getOperand(Idx + 2));
7726 if (!MemberSizeNode) {
7727 CheckFailed("Member size entries must be constants!", &I, BaseNode);
7728 Failed = true;
7729 continue;
7730 }
7731 }
7732 }
7733
7734 return Failed ? InvalidNode
7735 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7736}
7737
7738static bool IsRootTBAANode(const MDNode *MD) {
7739 return MD->getNumOperands() < 2;
7740}
7741
7742static bool IsScalarTBAANodeImpl(const MDNode *MD,
7744 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7745 return false;
7746
7747 if (!isa<MDString>(MD->getOperand(0)))
7748 return false;
7749
7750 if (MD->getNumOperands() == 3) {
7751 auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
7752 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7753 return false;
7754 }
7755
7756 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7757 return Parent && Visited.insert(Parent).second &&
7758 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7759}
7760
7761bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7762 auto ResultIt = TBAAScalarNodes.find(MD);
7763 if (ResultIt != TBAAScalarNodes.end())
7764 return ResultIt->second;
7765
7767 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7768 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7769 (void)InsertResult;
7770 assert(InsertResult.second && "Just checked!");
7771
7772 return Result;
7773}
7774
7775/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7776/// Offset in place to be the offset within the field node returned.
7777///
7778/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7779MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
7780 const MDNode *BaseNode,
7781 APInt &Offset,
7782 bool IsNewFormat) {
7783 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7784
7785 // Scalar nodes have only one possible "field" -- their parent in the access
7786 // hierarchy. Offset must be zero at this point, but our caller is supposed
7787 // to check that.
7788 if (BaseNode->getNumOperands() == 2)
7789 return cast<MDNode>(BaseNode->getOperand(1));
7790
7791 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7792 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7793 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7794 Idx += NumOpsPerField) {
7795 auto *OffsetEntryCI =
7796 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7797 if (OffsetEntryCI->getValue().ugt(Offset)) {
7798 if (Idx == FirstFieldOpNo) {
7799 CheckFailed("Could not find TBAA parent in struct type node", &I,
7800 BaseNode, &Offset);
7801 return nullptr;
7802 }
7803
7804 unsigned PrevIdx = Idx - NumOpsPerField;
7805 auto *PrevOffsetEntryCI =
7806 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7807 Offset -= PrevOffsetEntryCI->getValue();
7808 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7809 }
7810 }
7811
7812 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7813 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7814 BaseNode->getOperand(LastIdx + 1));
7815 Offset -= LastOffsetEntryCI->getValue();
7816 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7817}
7818
7820 if (!Type || Type->getNumOperands() < 3)
7821 return false;
7822
7823 // In the new format type nodes shall have a reference to the parent type as
7824 // its first operand.
7825 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7826}
7827
7829 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands",
7830 &I, MD);
7831
7832 CheckTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
7833 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
7834 isa<AtomicCmpXchgInst>(I),
7835 "This instruction shall not have a TBAA access tag!", &I);
7836
7837 bool IsStructPathTBAA =
7838 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7839
7840 CheckTBAA(IsStructPathTBAA,
7841 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7842 &I);
7843
7844 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7845 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7846
7847 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7848
7849 if (IsNewFormat) {
7850 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7851 "Access tag metadata must have either 4 or 5 operands", &I, MD);
7852 } else {
7853 CheckTBAA(MD->getNumOperands() < 5,
7854 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
7855 }
7856
7857 // Check the access size field.
7858 if (IsNewFormat) {
7859 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7860 MD->getOperand(3));
7861 CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
7862 }
7863
7864 // Check the immutability flag.
7865 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7866 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7867 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7868 MD->getOperand(ImmutabilityFlagOpNo));
7869 CheckTBAA(IsImmutableCI,
7870 "Immutability tag on struct tag metadata must be a constant", &I,
7871 MD);
7872 CheckTBAA(
7873 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7874 "Immutability part of the struct tag metadata must be either 0 or 1",
7875 &I, MD);
7876 }
7877
7878 CheckTBAA(BaseNode && AccessType,
7879 "Malformed struct tag metadata: base and access-type "
7880 "should be non-null and point to Metadata nodes",
7881 &I, MD, BaseNode, AccessType);
7882
7883 if (!IsNewFormat) {
7884 CheckTBAA(isValidScalarTBAANode(AccessType),
7885 "Access type node must be a valid scalar type", &I, MD,
7886 AccessType);
7887 }
7888
7889 auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
7890 CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
7891
7892 APInt Offset = OffsetCI->getValue();
7893 bool SeenAccessTypeInPath = false;
7894
7895 SmallPtrSet<MDNode *, 4> StructPath;
7896
7897 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7898 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
7899 IsNewFormat)) {
7900 if (!StructPath.insert(BaseNode).second) {
7901 CheckFailed("Cycle detected in struct path", &I, MD);
7902 return false;
7903 }
7904
7905 bool Invalid;
7906 unsigned BaseNodeBitWidth;
7907 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
7908 IsNewFormat);
7909
7910 // If the base node is invalid in itself, then we've already printed all the
7911 // errors we wanted to print.
7912 if (Invalid)
7913 return false;
7914
7915 SeenAccessTypeInPath |= BaseNode == AccessType;
7916
7917 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
7918 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
7919 &I, MD, &Offset);
7920
7921 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
7922 (BaseNodeBitWidth == 0 && Offset == 0) ||
7923 (IsNewFormat && BaseNodeBitWidth == ~0u),
7924 "Access bit-width not the same as description bit-width", &I, MD,
7925 BaseNodeBitWidth, Offset.getBitWidth());
7926
7927 if (IsNewFormat && SeenAccessTypeInPath)
7928 break;
7929 }
7930
7931 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
7932 MD);
7933 return true;
7934}
7935
7936char VerifierLegacyPass::ID = 0;
7937INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
7938
7940 return new VerifierLegacyPass(FatalErrors);
7941}
7942
7943AnalysisKey VerifierAnalysis::Key;
7946 Result Res;
7948 return Res;
7949}
7950
7953 return { llvm::verifyFunction(F, &dbgs()), false };
7954}
7955
7957 auto Res = AM.getResult<VerifierAnalysis>(M);
7958 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
7959 report_fatal_error("Broken module found, compilation aborted!");
7960
7961 return PreservedAnalyses::all();
7962}
7963
7965 auto res = AM.getResult<VerifierAnalysis>(F);
7966 if (res.IRBroken && FatalErrors)
7967 report_fatal_error("Broken function found, compilation aborted!");
7968
7969 return PreservedAnalyses::all();
7970}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
Definition: Attributes.cpp:763
@ FnAttr
Definition: Attributes.cpp:761
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
std::string Name
uint64_t Size
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
Definition: Verifier.cpp:7742
static bool isType(const Metadata *MD)
Definition: Verifier.cpp:1132
static Instruction * getSuccPad(Instruction *Terminator)
Definition: Verifier.cpp:2824
#define Check(C,...)
We know that cond should be true, if not print an error message.
Definition: Verifier.cpp:666
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
Definition: Verifier.cpp:7819
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition: Verifier.cpp:676
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition: Verifier.cpp:717
static bool isDINode(const Metadata *MD)
Definition: Verifier.cpp:1134
static bool isScope(const Metadata *MD)
Definition: Verifier.cpp:1133
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static DISubprogram * getSubprogram(Metadata *LocalScope)
Carefully grab the subprogram from a local scope.
Definition: Verifier.cpp:6890
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
Definition: Verifier.cpp:3976
#define CheckTBAA(C,...)
Definition: Verifier.cpp:7597
static bool isConstantIntMetadataOperand(const Metadata *MD)
Definition: Verifier.cpp:5243
static bool IsRootTBAANode(const MDNode *MD)
Definition: Verifier.cpp:7738
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
Definition: Verifier.cpp:4302
static Value * getParentPad(Value *EHPad)
Definition: Verifier.cpp:4572
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
Definition: Verifier.cpp:1335
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
Definition: Verifier.cpp:3986
bool isFiniteNonZero() const
Definition: APFloat.h:1459
bool isNegative() const
Definition: APFloat.h:1449
const fltSemantics & getSemantics() const
Definition: APFloat.h:1457
Class for arbitrary precision integers.
Definition: APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1201
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:380
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition: APInt.h:417
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:440
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1562
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition: APInt.h:399
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:64
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:153
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:128
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:121
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:106
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:97
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:255
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:412
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:32
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition: Function.cpp:293
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:142
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:506
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:709
static bool isFPOperation(BinOp Op)
Definition: Instructions.h:823
BinOp getOperation() const
Definition: Instructions.h:819
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:863
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
Definition: AttributeMask.h:67
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
static LLVM_ABI Attribute::AttrKind getAttrKindFromName(StringRef AttrName)
Definition: Attributes.cpp:313
static LLVM_ABI bool canUseAsRetAttr(AttrKind Kind)
Definition: Attributes.cpp:793
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:400
static LLVM_ABI bool isExistingAttribute(StringRef Name)
Return true if the provided string matches the IR name of an attribute.
Definition: Attributes.cpp:336
static LLVM_ABI bool canUseAsFnAttr(AttrKind Kind)
Definition: Attributes.cpp:785
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:88
@ None
No attributes have been set.
Definition: Attributes.h:90
static bool isIntAttrKind(AttrKind Kind)
Definition: Attributes.h:104
static LLVM_ABI bool canUseAsParamAttr(AttrKind Kind)
Definition: Attributes.cpp:789
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:223
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:528
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:337
const Instruction & front() const
Definition: BasicBlock.h:482
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:549
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:445
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:213
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Definition: Constants.cpp:1922
Conditional or Unconditional Branch instruction.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1116
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1415
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1406
Value * getCalledOperand() const
Definition: InstrTypes.h:1340
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1292
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1205
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
unsigned arg_size() const
Definition: InstrTypes.h:1290
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1424
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition: InstrTypes.h:784
bool isIntPredicate() const
Definition: InstrTypes.h:785
static bool isIntPredicate(Predicate P)
Definition: InstrTypes.h:778
ConstantArray - Constant Array Declarations.
Definition: Constants.h:433
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1120
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:277
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition: Constants.h:226
bool isNegative() const
Definition: Constants.h:209
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:214
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:163
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:154
A signed pointer, in the ptrauth sense.
Definition: Constants.h:1032
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition: Constants.h:1072
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition: Constants.h:1059
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition: Constants.h:1062
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition: Constants.h:1065
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
Definition: ConstantRange.h:47
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1526
This is an important base class in LLVM.
Definition: Constant.h:43
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
List of ValueAsMetadata, to be used as an argument to a dbg.value intrinsic.
Assignment ID.
Basic type, like 'int' or 'float'.
Debug common block.
Enumeration value.
DWARF expression.
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI bool isValid() const
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
A pair of DIGlobalVariable and DIExpression.
DIGlobalVariable * getVariable() const
DIExpression * getExpression() const
An imported module (C++ using directive or similar).
Debug lexical block.
A scope for locals.
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Debug location.
Metadata * getRawScope() const
Represents a module in the programming language, for example, a Clang module, or a Fortran module.
Debug lexical block.
Tagged DWARF-like metadata node.
Base class for scope-like contexts.
String type, Fortran CHARACTER(n)
Subprogram description. Uses SubclassData1.
Array subrange.
Type array for a subprogram.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
This represents the llvm.dbg.label instruction.
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI Function * getFunction()
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
MDNode * getRawAddressExpression() const
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition: DebugLoc.h:291
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:187
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:165
bool empty() const
Definition: DenseMap.h:107
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:214
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:334
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:135
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
An instruction for ordering other memory operations.
Definition: Instructions.h:429
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:454
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:592
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2391
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:314
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:244
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:903
const std::string & getGC() const
Definition: Function.cpp:831
Represents calls to the gc.relocate intrinsic.
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
Generic tagged DWARF-like metadata node.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:949
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalAlias.h:98
const Constant * getAliasee() const
Definition: GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition: Globals.cpp:652
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalIFunc.h:86
const Constant * getResolver() const
Definition: GlobalIFunc.h:73
bool hasComdat() const
Definition: GlobalObject.h:130
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition: Value.h:576
bool hasExternalLinkage() const
Definition: GlobalValue.h:513
bool isDSOLocal() const
Definition: GlobalValue.h:307
bool isImplicitDSOLocal() const
Definition: GlobalValue.h:300
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:316
bool hasValidDeclarationLinkage() const
Definition: GlobalValue.h:535
LinkageTypes getLinkage() const
Definition: GlobalValue.h:548
bool hasDefaultVisibility() const
Definition: GlobalValue.h:251
bool hasPrivateLinkage() const
Definition: GlobalValue.h:529
bool hasHiddenVisibility() const
Definition: GlobalValue.h:252
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:531
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:280
bool hasDLLExportStorageClass() const
Definition: GlobalValue.h:283
bool isDeclarationForLinker() const
Definition: GlobalValue.h:625
unsigned getAddressSpace() const
Definition: GlobalValue.h:207
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:663
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:296
bool hasComdat() const
Definition: GlobalValue.h:243
bool hasCommonLinkage() const
Definition: GlobalValue.h:534
bool hasGlobalUnnamedAddr() const
Definition: GlobalValue.h:217
bool hasAppendingLinkage() const
Definition: GlobalValue.h:527
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:514
Type * getValueType() const
Definition: GlobalValue.h:298
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitTerminator(Instruction &I)
Definition: InstVisitor.h:248
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:262
void visitFunction(Function &F)
Definition: InstVisitor.h:142
void visitBasicBlock(BasicBlock &BB)
Definition: InstVisitor.h:143
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
RetTy visitFuncletPadInst(FuncletPadInst &I)
Definition: InstVisitor.h:198
void visitInstruction(Instruction &I)
Definition: InstVisitor.h:275
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:82
This class represents a cast from an integer to a pointer.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:49
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:56
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
Definition: Instructions.h:180
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:224
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:234
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:215
Metadata node.
Definition: Metadata.h:1077
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1445
bool isTemporary() const
Definition: Metadata.h:1261
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1443
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1451
bool isDistinct() const
Definition: Metadata.h:1260
bool isResolved() const
Check if node is fully resolved.
Definition: Metadata.h:1257
LLVMContext & getContext() const
Definition: Metadata.h:1241
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:899
Metadata * get() const
Definition: Metadata.h:928
A single uniqued string.
Definition: Metadata.h:720
LLVM_ABI StringRef getString() const
Definition: Metadata.cpp:617
Typed, array-like tuple of metadata.
Definition: Metadata.h:1651
Tuple of metadata.
Definition: Metadata.h:1493
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
void clear()
Definition: MapVector.h:84
Metadata wrapper in the Value hierarchy.
Definition: Metadata.h:182
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:111
Metadata * getMetadata() const
Definition: Metadata.h:200
Root of the metadata hierarchy.
Definition: Metadata.h:63
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
Definition: AsmWriter.cpp:5421
unsigned getMetadataID() const
Definition: Metadata.h:103
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
ModFlagBehavior
This enumeration defines the supported behaviors of module flags.
Definition: Module.h:117
@ AppendUnique
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:146
@ Override
Uses the specified value, regardless of the behavior or value of the other module.
Definition: Module.h:138
@ Warning
Emits a warning if two values disagree.
Definition: Module.h:124
@ Error
Emits an error if two values disagree, otherwise the resulting value is that of the operands.
Definition: Module.h:120
@ Min
Takes the min of the two values, which are required to be integers.
Definition: Module.h:152
@ Append
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:141
@ Max
Takes the max of the two values, which are required to be integers.
Definition: Module.h:149
@ Require
Adds a requirement that another module flag be present and have a specified value after linking is pe...
Definition: Module.h:133
const std::string & getModuleIdentifier() const
Get the module identifier which is, essentially, the name of the module.
Definition: Module.h:252
static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB)
Checks if Metadata represents a valid ModFlagBehavior, and stores the converted result in MFB.
Definition: Module.cpp:323
A tuple of MDNodes.
Definition: Metadata.h:1753
LLVM_ABI StringRef getName() const
Definition: Metadata.cpp:1482
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
Definition: AsmWriter.cpp:5082
iterator_range< op_iterator > operands()
Definition: Metadata.h:1849
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:118
Simple wrapper around std::function<void(raw_ostream&)>.
Definition: Printable.h:38
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Interface for looking up the initializer for a variable name, used by Init::resolveReferences.
Definition: Record.h:2196
Resume the propagation of an exception.
Value * getValue() const
Convenience accessor.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:380
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:470
void insert_range(Range &&R)
Definition: SmallPtrSet.h:490
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:401
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:82
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
void reserve(size_type N)
Definition: SmallVector.h:664
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:806
void resize(size_type N)
Definition: SmallVector.h:639
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
An instruction for storing to memory.
Definition: Instructions.h:296
StringMapEntry - This is used to represent one value that is inserted into a StringMap.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:480
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:269
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:151
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:434
static constexpr size_t npos
Definition: StringRef.h:57
Class to represent struct types.
Definition: DerivedTypes.h:218
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:368
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition: Type.cpp:719
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition: Type.cpp:441
Multiway switch.
Verify that the TBAA Metadatas are valid.
Definition: Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(Instruction &I, const MDNode *MD)
Visit an instruction and return true if it is valid, return false if an invalid TBAA is attached.
Definition: Verifier.cpp:7828
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:273
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:264
bool isLabelTy() const
Return true if this is 'label'.
Definition: Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:267
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:311
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:255
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:225
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:352
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition: Type.h:231
This class represents a cast unsigned integer to floating point.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
op_range operands()
Definition: User.h:292
Value * getOperand(unsigned i) const
Definition: User.h:232
unsigned getNumOperands() const
Definition: User.h:254
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
This is the common base class for vector predication intrinsics.
Value wrapper in the Metadata hierarchy.
Definition: Metadata.h:457
Value * getValue() const
Definition: Metadata.h:497
LLVM Value Representation.
Definition: Value.h:75
iterator_range< user_iterator > materialized_users()
Definition: Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
static constexpr uint64_t MaximumAlignment
Definition: Value.h:830
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition: Value.cpp:705
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:812
iterator_range< user_iterator > users()
Definition: Value.h:426
bool materialized_use_empty() const
Definition: Value.h:351
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1101
bool hasName() const
Definition: Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition: Verifier.h:108
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
Definition: Verifier.cpp:7944
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Definition: Verifier.cpp:7956
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition: TypeSize.h:159
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:172
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:169
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:359
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
LLVM_ABI AttributeMask typeIncompatible(Type *Ty, AttributeSet AS, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:126
@ Entry
Definition: COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
Definition: CallingConv.h:197
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
Definition: CallingConv.h:188
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
Definition: CallingConv.h:200
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:249
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
Definition: CallingConv.h:206
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
Definition: CallingConv.h:191
@ X86_INTR
x86 hardware interrupt context.
Definition: CallingConv.h:173
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:245
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:194
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PTX_Device
Call to a PTX device function.
Definition: CallingConv.h:129
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:144
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
Definition: CallingConv.h:147
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ PTX_Kernel
Call to a PTX kernel. Passes all arguments in parameter space.
Definition: CallingConv.h:125
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
Definition: Intrinsics.cpp:458
@ MatchIntrinsicTypes_NoMatchRet
Definition: Intrinsics.h:240
@ MatchIntrinsicTypes_NoMatchArg
Definition: Intrinsics.h:241
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "Constrained Floating-Point Intrinsics" that take ...
Definition: Intrinsics.cpp:794
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Intrinsics.cpp:49
static const int NoAliasScopeDeclScopeArg
Definition: Intrinsics.h:39
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition: DwarfDebug.h:190
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:149
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
Definition: DebugInfo.cpp:1887
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
@ DW_MACINFO_undef
Definition: Dwarf.h:804
@ DW_MACINFO_start_file
Definition: Dwarf.h:805
@ DW_MACINFO_define
Definition: Dwarf.h:803
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:338
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1744
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
@ Write
Definition: CodeGenData.h:109
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition: STLExtras.h:870
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2491
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:7513
AllocFnKind
Definition: Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition: Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2155
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:293
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:288
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1669
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition: Error.cpp:167
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ ArgMem
Access to memory via argument pointers.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
Definition: Verifier.cpp:7939
@ Dynamic
Denotes mode unknown at compile time.
@ MaskAll
A bitmask that includes all valid flags.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:223
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition: FPEnv.cpp:24
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
Definition: GCStrategy.cpp:24
auto predecessors(const MachineBasicBlock *BB)
LLVM_ABI bool isExplicitlyUnknownBranchWeightsMetadata(const MDNode &MD)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1916
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:119
constexpr bool isCallableCC(CallingConv::ID CC)
Definition: CallingConv.h:298
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
Definition: Verifier.cpp:7524
#define N
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:266
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:29
Description of the encoding of one expression Op.
static LLVM_ABI const char * SyntheticFunctionEntryCount
Definition: ProfDataUtils.h:28
static LLVM_ABI const char * BranchWeights
Definition: ProfDataUtils.h:25
static LLVM_ABI const char * FunctionEntryCount
Definition: ProfDataUtils.h:27
static LLVM_ABI const char * UnknownBranchWeightsMarker
Definition: ProfDataUtils.h:30
static LLVM_ABI const char * ValueProfile
Definition: ProfDataUtils.h:26
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1011
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1039
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1012
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition: Verifier.cpp:303
VerifierSupport(raw_ostream *OS, const Module &M)
Definition: Verifier.cpp:155
bool Broken
Track the brokenness of the module while recursively visiting.
Definition: Verifier.cpp:149
raw_ostream * OS
Definition: Verifier.cpp:141
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition: Verifier.cpp:296
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition: Verifier.cpp:151
LLVMContext & Context
Definition: Verifier.cpp:146
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition: Verifier.cpp:153
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition: Verifier.cpp:285
const Module & M
Definition: Verifier.cpp:142
const DataLayout & DL
Definition: Verifier.cpp:145
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition: Verifier.cpp:312
const Triple & TT
Definition: Verifier.cpp:144
ModuleSlotTracker MST
Definition: Verifier.cpp:143