LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
139namespace llvm {
140
143 const Module &M;
145 const Triple &TT;
148
149 /// Track the brokenness of the module while recursively visiting.
150 bool Broken = false;
151 /// Broken debug info can be "recovered" from by stripping the debug info.
152 bool BrokenDebugInfo = false;
153 /// Whether to treat broken debug info as an error.
155
157 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
158 Context(M.getContext()) {}
159
160private:
161 void Write(const Module *M) {
162 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
163 }
164
165 void Write(const Value *V) {
166 if (V)
167 Write(*V);
168 }
169
170 void Write(const Value &V) {
171 if (isa<Instruction>(V)) {
172 V.print(*OS, MST);
173 *OS << '\n';
174 } else {
175 V.printAsOperand(*OS, true, MST);
176 *OS << '\n';
177 }
178 }
179
180 void Write(const DbgRecord *DR) {
181 if (DR) {
182 DR->print(*OS, MST, false);
183 *OS << '\n';
184 }
185 }
186
188 switch (Type) {
190 *OS << "value";
191 break;
193 *OS << "declare";
194 break;
196 *OS << "assign";
197 break;
199 *OS << "end";
200 break;
202 *OS << "any";
203 break;
204 };
205 }
206
207 void Write(const Metadata *MD) {
208 if (!MD)
209 return;
210 MD->print(*OS, MST, &M);
211 *OS << '\n';
212 }
213
214 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
215 Write(MD.get());
216 }
217
218 void Write(const NamedMDNode *NMD) {
219 if (!NMD)
220 return;
221 NMD->print(*OS, MST);
222 *OS << '\n';
223 }
224
225 void Write(Type *T) {
226 if (!T)
227 return;
228 *OS << ' ' << *T;
229 }
230
231 void Write(const Comdat *C) {
232 if (!C)
233 return;
234 *OS << *C;
235 }
236
237 void Write(const APInt *AI) {
238 if (!AI)
239 return;
240 *OS << *AI << '\n';
241 }
242
243 void Write(const unsigned i) { *OS << i << '\n'; }
244
245 // NOLINTNEXTLINE(readability-identifier-naming)
246 void Write(const Attribute *A) {
247 if (!A)
248 return;
249 *OS << A->getAsString() << '\n';
250 }
251
252 // NOLINTNEXTLINE(readability-identifier-naming)
253 void Write(const AttributeSet *AS) {
254 if (!AS)
255 return;
256 *OS << AS->getAsString() << '\n';
257 }
258
259 // NOLINTNEXTLINE(readability-identifier-naming)
260 void Write(const AttributeList *AL) {
261 if (!AL)
262 return;
263 AL->print(*OS);
264 }
265
266 void Write(Printable P) { *OS << P << '\n'; }
267
268 template <typename T> void Write(ArrayRef<T> Vs) {
269 for (const T &V : Vs)
270 Write(V);
271 }
272
273 template <typename T1, typename... Ts>
274 void WriteTs(const T1 &V1, const Ts &... Vs) {
275 Write(V1);
276 WriteTs(Vs...);
277 }
278
279 template <typename... Ts> void WriteTs() {}
280
281public:
282 /// A check failed, so printout out the condition and the message.
283 ///
284 /// This provides a nice place to put a breakpoint if you want to see why
285 /// something is not correct.
286 void CheckFailed(const Twine &Message) {
287 if (OS)
288 *OS << Message << '\n';
289 Broken = true;
290 }
291
292 /// A check failed (with values to print).
293 ///
294 /// This calls the Message-only version so that the above is easier to set a
295 /// breakpoint on.
296 template <typename T1, typename... Ts>
297 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
298 CheckFailed(Message);
299 if (OS)
300 WriteTs(V1, Vs...);
301 }
302
303 /// A debug info check failed.
304 void DebugInfoCheckFailed(const Twine &Message) {
305 if (OS)
306 *OS << Message << '\n';
308 BrokenDebugInfo = true;
309 }
310
311 /// A debug info check failed (with values to print).
312 template <typename T1, typename... Ts>
313 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
314 const Ts &... Vs) {
315 DebugInfoCheckFailed(Message);
316 if (OS)
317 WriteTs(V1, Vs...);
318 }
319};
320
321} // namespace llvm
322
323namespace {
324
325class Verifier : public InstVisitor<Verifier>, VerifierSupport {
326 friend class InstVisitor<Verifier>;
327 DominatorTree DT;
328
329 /// When verifying a basic block, keep track of all of the
330 /// instructions we have seen so far.
331 ///
332 /// This allows us to do efficient dominance checks for the case when an
333 /// instruction has an operand that is an instruction in the same block.
334 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
335
336 /// Keep track of the metadata nodes that have been checked already.
337 SmallPtrSet<const Metadata *, 32> MDNodes;
338
339 /// Keep track which DISubprogram is attached to which function.
340 DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
341
342 /// Track all DICompileUnits visited.
343 SmallPtrSet<const Metadata *, 2> CUVisited;
344
345 /// The result type for a landingpad.
346 Type *LandingPadResultTy;
347
348 /// Whether we've seen a call to @llvm.localescape in this function
349 /// already.
350 bool SawFrameEscape;
351
352 /// Whether the current function has a DISubprogram attached to it.
353 bool HasDebugInfo = false;
354
355 /// Stores the count of how many objects were passed to llvm.localescape for a
356 /// given function and the largest index passed to llvm.localrecover.
357 DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
358
359 // Maps catchswitches and cleanuppads that unwind to siblings to the
360 // terminators that indicate the unwind, used to detect cycles therein.
361 MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
362
363 /// Cache which blocks are in which funclet, if an EH funclet personality is
364 /// in use. Otherwise empty.
365 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
366
367 /// Cache of constants visited in search of ConstantExprs.
368 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
369
370 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
371 SmallVector<const Function *, 4> DeoptimizeDeclarations;
372
373 /// Cache of attribute lists verified.
374 SmallPtrSet<const void *, 32> AttributeListsVisited;
375
376 // Verify that this GlobalValue is only used in this module.
377 // This map is used to avoid visiting uses twice. We can arrive at a user
378 // twice, if they have multiple operands. In particular for very large
379 // constant expressions, we can arrive at a particular user many times.
380 SmallPtrSet<const Value *, 32> GlobalValueVisited;
381
382 // Keeps track of duplicate function argument debug info.
384
385 TBAAVerifier TBAAVerifyHelper;
386 ConvergenceVerifier ConvergenceVerifyHelper;
387
388 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
389
390 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
391
392public:
393 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
394 const Module &M)
395 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
396 SawFrameEscape(false), TBAAVerifyHelper(this) {
397 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
398 }
399
400 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
401
402 bool verify(const Function &F) {
403 llvm::TimeTraceScope timeScope("Verifier");
404 assert(F.getParent() == &M &&
405 "An instance of this class only works with a specific module!");
406
407 // First ensure the function is well-enough formed to compute dominance
408 // information, and directly compute a dominance tree. We don't rely on the
409 // pass manager to provide this as it isolates us from a potentially
410 // out-of-date dominator tree and makes it significantly more complex to run
411 // this code outside of a pass manager.
412 // FIXME: It's really gross that we have to cast away constness here.
413 if (!F.empty())
414 DT.recalculate(const_cast<Function &>(F));
415
416 for (const BasicBlock &BB : F) {
417 if (!BB.empty() && BB.back().isTerminator())
418 continue;
419
420 if (OS) {
421 *OS << "Basic Block in function '" << F.getName()
422 << "' does not have terminator!\n";
423 BB.printAsOperand(*OS, true, MST);
424 *OS << "\n";
425 }
426 return false;
427 }
428
429 auto FailureCB = [this](const Twine &Message) {
430 this->CheckFailed(Message);
431 };
432 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
433
434 Broken = false;
435 // FIXME: We strip const here because the inst visitor strips const.
436 visit(const_cast<Function &>(F));
437 verifySiblingFuncletUnwinds();
438
439 if (ConvergenceVerifyHelper.sawTokens())
440 ConvergenceVerifyHelper.verify(DT);
441
442 InstsInThisBlock.clear();
443 DebugFnArgs.clear();
444 LandingPadResultTy = nullptr;
445 SawFrameEscape = false;
446 SiblingFuncletInfo.clear();
447 verifyNoAliasScopeDecl();
448 NoAliasScopeDecls.clear();
449
450 return !Broken;
451 }
452
453 /// Verify the module that this instance of \c Verifier was initialized with.
454 bool verify() {
455 Broken = false;
456
457 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
458 for (const Function &F : M)
459 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
460 DeoptimizeDeclarations.push_back(&F);
461
462 // Now that we've visited every function, verify that we never asked to
463 // recover a frame index that wasn't escaped.
464 verifyFrameRecoverIndices();
465 for (const GlobalVariable &GV : M.globals())
466 visitGlobalVariable(GV);
467
468 for (const GlobalAlias &GA : M.aliases())
469 visitGlobalAlias(GA);
470
471 for (const GlobalIFunc &GI : M.ifuncs())
472 visitGlobalIFunc(GI);
473
474 for (const NamedMDNode &NMD : M.named_metadata())
475 visitNamedMDNode(NMD);
476
477 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
478 visitComdat(SMEC.getValue());
479
480 visitModuleFlags();
481 visitModuleIdents();
482 visitModuleCommandLines();
483 visitModuleErrnoTBAA();
484
485 verifyCompileUnits();
486
487 verifyDeoptimizeCallingConvs();
488 DISubprogramAttachments.clear();
489 return !Broken;
490 }
491
492private:
493 /// Whether a metadata node is allowed to be, or contain, a DILocation.
494 enum class AreDebugLocsAllowed { No, Yes };
495
496 /// Metadata that should be treated as a range, with slightly different
497 /// requirements.
498 enum class RangeLikeMetadataKind {
499 Range, // MD_range
500 AbsoluteSymbol, // MD_absolute_symbol
501 NoaliasAddrspace // MD_noalias_addrspace
502 };
503
504 // Verification methods...
505 void visitGlobalValue(const GlobalValue &GV);
506 void visitGlobalVariable(const GlobalVariable &GV);
507 void visitGlobalAlias(const GlobalAlias &GA);
508 void visitGlobalIFunc(const GlobalIFunc &GI);
509 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
510 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
511 const GlobalAlias &A, const Constant &C);
512 void visitNamedMDNode(const NamedMDNode &NMD);
513 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
514 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
515 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
516 void visitDIArgList(const DIArgList &AL, Function *F);
517 void visitComdat(const Comdat &C);
518 void visitModuleIdents();
519 void visitModuleCommandLines();
520 void visitModuleErrnoTBAA();
521 void visitModuleFlags();
522 void visitModuleFlag(const MDNode *Op,
523 DenseMap<const MDString *, const MDNode *> &SeenIDs,
524 SmallVectorImpl<const MDNode *> &Requirements);
525 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
526 void visitFunction(const Function &F);
527 void visitBasicBlock(BasicBlock &BB);
528 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
529 RangeLikeMetadataKind Kind);
530 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
531 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
532 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
533 void visitNofreeMetadata(Instruction &I, MDNode *MD);
534 void visitProfMetadata(Instruction &I, MDNode *MD);
535 void visitCallStackMetadata(MDNode *MD);
536 void visitMemProfMetadata(Instruction &I, MDNode *MD);
537 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
538 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
539 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
540 void visitMMRAMetadata(Instruction &I, MDNode *MD);
541 void visitAnnotationMetadata(MDNode *Annotation);
542 void visitAliasScopeMetadata(const MDNode *MD);
543 void visitAliasScopeListMetadata(const MDNode *MD);
544 void visitAccessGroupMetadata(const MDNode *MD);
545 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
546 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
547
548 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
549#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
550#include "llvm/IR/Metadata.def"
551 void visitDIScope(const DIScope &N);
552 void visitDIVariable(const DIVariable &N);
553 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
554 void visitDITemplateParameter(const DITemplateParameter &N);
555
556 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
557
558 void visit(DbgLabelRecord &DLR);
559 void visit(DbgVariableRecord &DVR);
560 // InstVisitor overrides...
561 using InstVisitor<Verifier>::visit;
562 void visitDbgRecords(Instruction &I);
563 void visit(Instruction &I);
564
565 void visitTruncInst(TruncInst &I);
566 void visitZExtInst(ZExtInst &I);
567 void visitSExtInst(SExtInst &I);
568 void visitFPTruncInst(FPTruncInst &I);
569 void visitFPExtInst(FPExtInst &I);
570 void visitFPToUIInst(FPToUIInst &I);
571 void visitFPToSIInst(FPToSIInst &I);
572 void visitUIToFPInst(UIToFPInst &I);
573 void visitSIToFPInst(SIToFPInst &I);
574 void visitIntToPtrInst(IntToPtrInst &I);
575 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
576 void visitPtrToAddrInst(PtrToAddrInst &I);
577 void visitPtrToIntInst(PtrToIntInst &I);
578 void visitBitCastInst(BitCastInst &I);
579 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
580 void visitPHINode(PHINode &PN);
581 void visitCallBase(CallBase &Call);
582 void visitUnaryOperator(UnaryOperator &U);
583 void visitBinaryOperator(BinaryOperator &B);
584 void visitICmpInst(ICmpInst &IC);
585 void visitFCmpInst(FCmpInst &FC);
586 void visitExtractElementInst(ExtractElementInst &EI);
587 void visitInsertElementInst(InsertElementInst &EI);
588 void visitShuffleVectorInst(ShuffleVectorInst &EI);
589 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
590 void visitCallInst(CallInst &CI);
591 void visitInvokeInst(InvokeInst &II);
592 void visitGetElementPtrInst(GetElementPtrInst &GEP);
593 void visitLoadInst(LoadInst &LI);
594 void visitStoreInst(StoreInst &SI);
595 void verifyDominatesUse(Instruction &I, unsigned i);
596 void visitInstruction(Instruction &I);
597 void visitTerminator(Instruction &I);
598 void visitBranchInst(BranchInst &BI);
599 void visitReturnInst(ReturnInst &RI);
600 void visitSwitchInst(SwitchInst &SI);
601 void visitIndirectBrInst(IndirectBrInst &BI);
602 void visitCallBrInst(CallBrInst &CBI);
603 void visitSelectInst(SelectInst &SI);
604 void visitUserOp1(Instruction &I);
605 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
606 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
607 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
608 void visitVPIntrinsic(VPIntrinsic &VPI);
609 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
610 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
611 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
612 void visitFenceInst(FenceInst &FI);
613 void visitAllocaInst(AllocaInst &AI);
614 void visitExtractValueInst(ExtractValueInst &EVI);
615 void visitInsertValueInst(InsertValueInst &IVI);
616 void visitEHPadPredecessors(Instruction &I);
617 void visitLandingPadInst(LandingPadInst &LPI);
618 void visitResumeInst(ResumeInst &RI);
619 void visitCatchPadInst(CatchPadInst &CPI);
620 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
621 void visitCleanupPadInst(CleanupPadInst &CPI);
622 void visitFuncletPadInst(FuncletPadInst &FPI);
623 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
624 void visitCleanupReturnInst(CleanupReturnInst &CRI);
625
626 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
627 void verifySwiftErrorValue(const Value *SwiftErrorVal);
628 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
629 void verifyMustTailCall(CallInst &CI);
630 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
631 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
632 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
633 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
634 const Value *V);
635 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
636 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
637 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
638 void verifyUnknownProfileMetadata(MDNode *MD);
639 void visitConstantExprsRecursively(const Constant *EntryC);
640 void visitConstantExpr(const ConstantExpr *CE);
641 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
642 void verifyInlineAsmCall(const CallBase &Call);
643 void verifyStatepoint(const CallBase &Call);
644 void verifyFrameRecoverIndices();
645 void verifySiblingFuncletUnwinds();
646
647 void verifyFragmentExpression(const DbgVariableRecord &I);
648 template <typename ValueOrMetadata>
649 void verifyFragmentExpression(const DIVariable &V,
651 ValueOrMetadata *Desc);
652 void verifyFnArgs(const DbgVariableRecord &DVR);
653 void verifyNotEntryValue(const DbgVariableRecord &I);
654
655 /// Module-level debug info verification...
656 void verifyCompileUnits();
657
658 /// Module-level verification that all @llvm.experimental.deoptimize
659 /// declarations share the same calling convention.
660 void verifyDeoptimizeCallingConvs();
661
662 void verifyAttachedCallBundle(const CallBase &Call,
663 const OperandBundleUse &BU);
664
665 /// Verify the llvm.experimental.noalias.scope.decl declarations
666 void verifyNoAliasScopeDecl();
667};
668
669} // end anonymous namespace
670
671/// We know that cond should be true, if not print an error message.
672#define Check(C, ...) \
673 do { \
674 if (!(C)) { \
675 CheckFailed(__VA_ARGS__); \
676 return; \
677 } \
678 } while (false)
679
680/// We know that a debug info condition should be true, if not print
681/// an error message.
682#define CheckDI(C, ...) \
683 do { \
684 if (!(C)) { \
685 DebugInfoCheckFailed(__VA_ARGS__); \
686 return; \
687 } \
688 } while (false)
689
690void Verifier::visitDbgRecords(Instruction &I) {
691 if (!I.DebugMarker)
692 return;
693 CheckDI(I.DebugMarker->MarkedInstr == &I,
694 "Instruction has invalid DebugMarker", &I);
695 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
696 "PHI Node must not have any attached DbgRecords", &I);
697 for (DbgRecord &DR : I.getDbgRecordRange()) {
698 CheckDI(DR.getMarker() == I.DebugMarker,
699 "DbgRecord had invalid DebugMarker", &I, &DR);
700 if (auto *Loc =
702 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
703 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
704 visit(*DVR);
705 // These have to appear after `visit` for consistency with existing
706 // intrinsic behaviour.
707 verifyFragmentExpression(*DVR);
708 verifyNotEntryValue(*DVR);
709 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
710 visit(*DLR);
711 }
712 }
713}
714
715void Verifier::visit(Instruction &I) {
716 visitDbgRecords(I);
717 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
718 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
720}
721
722// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
723static void forEachUser(const Value *User,
725 llvm::function_ref<bool(const Value *)> Callback) {
726 if (!Visited.insert(User).second)
727 return;
728
730 while (!WorkList.empty()) {
731 const Value *Cur = WorkList.pop_back_val();
732 if (!Visited.insert(Cur).second)
733 continue;
734 if (Callback(Cur))
735 append_range(WorkList, Cur->materialized_users());
736 }
737}
738
739void Verifier::visitGlobalValue(const GlobalValue &GV) {
741 "Global is external, but doesn't have external or weak linkage!", &GV);
742
743 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
744 if (const MDNode *Associated =
745 GO->getMetadata(LLVMContext::MD_associated)) {
746 Check(Associated->getNumOperands() == 1,
747 "associated metadata must have one operand", &GV, Associated);
748 const Metadata *Op = Associated->getOperand(0).get();
749 Check(Op, "associated metadata must have a global value", GO, Associated);
750
751 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
752 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
753 if (VM) {
754 Check(isa<PointerType>(VM->getValue()->getType()),
755 "associated value must be pointer typed", GV, Associated);
756
757 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
758 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
759 "associated metadata must point to a GlobalObject", GO, Stripped);
760 Check(Stripped != GO,
761 "global values should not associate to themselves", GO,
762 Associated);
763 }
764 }
765
766 // FIXME: Why is getMetadata on GlobalValue protected?
767 if (const MDNode *AbsoluteSymbol =
768 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
769 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
770 DL.getIntPtrType(GO->getType()),
771 RangeLikeMetadataKind::AbsoluteSymbol);
772 }
773 }
774
776 "Only global variables can have appending linkage!", &GV);
777
778 if (GV.hasAppendingLinkage()) {
779 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
780 Check(GVar && GVar->getValueType()->isArrayTy(),
781 "Only global arrays can have appending linkage!", GVar);
782 }
783
784 if (GV.isDeclarationForLinker())
785 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
786
787 if (GV.hasDLLExportStorageClass()) {
789 "dllexport GlobalValue must have default or protected visibility",
790 &GV);
791 }
792 if (GV.hasDLLImportStorageClass()) {
794 "dllimport GlobalValue must have default visibility", &GV);
795 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
796 &GV);
797
798 Check((GV.isDeclaration() &&
801 "Global is marked as dllimport, but not external", &GV);
802 }
803
804 if (GV.isImplicitDSOLocal())
805 Check(GV.isDSOLocal(),
806 "GlobalValue with local linkage or non-default "
807 "visibility must be dso_local!",
808 &GV);
809
810 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
811 if (const Instruction *I = dyn_cast<Instruction>(V)) {
812 if (!I->getParent() || !I->getParent()->getParent())
813 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
814 I);
815 else if (I->getParent()->getParent()->getParent() != &M)
816 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
817 I->getParent()->getParent(),
818 I->getParent()->getParent()->getParent());
819 return false;
820 } else if (const Function *F = dyn_cast<Function>(V)) {
821 if (F->getParent() != &M)
822 CheckFailed("Global is used by function in a different module", &GV, &M,
823 F, F->getParent());
824 return false;
825 }
826 return true;
827 });
828}
829
830void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
831 Type *GVType = GV.getValueType();
832
833 if (MaybeAlign A = GV.getAlign()) {
834 Check(A->value() <= Value::MaximumAlignment,
835 "huge alignment values are unsupported", &GV);
836 }
837
838 if (GV.hasInitializer()) {
839 Check(GV.getInitializer()->getType() == GVType,
840 "Global variable initializer type does not match global "
841 "variable type!",
842 &GV);
844 "Global variable initializer must be sized", &GV);
845 visitConstantExprsRecursively(GV.getInitializer());
846 // If the global has common linkage, it must have a zero initializer and
847 // cannot be constant.
848 if (GV.hasCommonLinkage()) {
850 "'common' global must have a zero initializer!", &GV);
851 Check(!GV.isConstant(), "'common' global may not be marked constant!",
852 &GV);
853 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
854 }
855 }
856
857 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
858 GV.getName() == "llvm.global_dtors")) {
860 "invalid linkage for intrinsic global variable", &GV);
862 "invalid uses of intrinsic global variable", &GV);
863
864 // Don't worry about emitting an error for it not being an array,
865 // visitGlobalValue will complain on appending non-array.
866 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
867 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
868 PointerType *FuncPtrTy =
869 PointerType::get(Context, DL.getProgramAddressSpace());
870 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
871 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
872 STy->getTypeAtIndex(1) == FuncPtrTy,
873 "wrong type for intrinsic global variable", &GV);
874 Check(STy->getNumElements() == 3,
875 "the third field of the element type is mandatory, "
876 "specify ptr null to migrate from the obsoleted 2-field form");
877 Type *ETy = STy->getTypeAtIndex(2);
878 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
879 &GV);
880 }
881 }
882
883 if (GV.hasName() && (GV.getName() == "llvm.used" ||
884 GV.getName() == "llvm.compiler.used")) {
886 "invalid linkage for intrinsic global variable", &GV);
888 "invalid uses of intrinsic global variable", &GV);
889
890 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
891 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
892 Check(PTy, "wrong type for intrinsic global variable", &GV);
893 if (GV.hasInitializer()) {
894 const Constant *Init = GV.getInitializer();
895 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
896 Check(InitArray, "wrong initalizer for intrinsic global variable",
897 Init);
898 for (Value *Op : InitArray->operands()) {
899 Value *V = Op->stripPointerCasts();
902 Twine("invalid ") + GV.getName() + " member", V);
903 Check(V->hasName(),
904 Twine("members of ") + GV.getName() + " must be named", V);
905 }
906 }
907 }
908 }
909
910 // Visit any debug info attachments.
912 GV.getMetadata(LLVMContext::MD_dbg, MDs);
913 for (auto *MD : MDs) {
914 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
915 visitDIGlobalVariableExpression(*GVE);
916 else
917 CheckDI(false, "!dbg attachment of global variable must be a "
918 "DIGlobalVariableExpression");
919 }
920
921 // Scalable vectors cannot be global variables, since we don't know
922 // the runtime size.
923 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
924
925 // Check if it is or contains a target extension type that disallows being
926 // used as a global.
928 "Global @" + GV.getName() + " has illegal target extension type",
929 GVType);
930
931 if (!GV.hasInitializer()) {
932 visitGlobalValue(GV);
933 return;
934 }
935
936 // Walk any aggregate initializers looking for bitcasts between address spaces
937 visitConstantExprsRecursively(GV.getInitializer());
938
939 visitGlobalValue(GV);
940}
941
942void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
943 SmallPtrSet<const GlobalAlias*, 4> Visited;
944 Visited.insert(&GA);
945 visitAliaseeSubExpr(Visited, GA, C);
946}
947
948void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
949 const GlobalAlias &GA, const Constant &C) {
952 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
953 "available_externally alias must point to available_externally "
954 "global value",
955 &GA);
956 }
957 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
959 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
960 &GA);
961 }
962
963 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
964 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
965
966 Check(!GA2->isInterposable(),
967 "Alias cannot point to an interposable alias", &GA);
968 } else {
969 // Only continue verifying subexpressions of GlobalAliases.
970 // Do not recurse into global initializers.
971 return;
972 }
973 }
974
975 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
976 visitConstantExprsRecursively(CE);
977
978 for (const Use &U : C.operands()) {
979 Value *V = &*U;
980 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
981 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
982 else if (const auto *C2 = dyn_cast<Constant>(V))
983 visitAliaseeSubExpr(Visited, GA, *C2);
984 }
985}
986
987void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
989 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
990 "weak_odr, external, or available_externally linkage!",
991 &GA);
992 const Constant *Aliasee = GA.getAliasee();
993 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
994 Check(GA.getType() == Aliasee->getType(),
995 "Alias and aliasee types should match!", &GA);
996
997 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
998 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
999
1000 visitAliaseeSubExpr(GA, *Aliasee);
1001
1002 visitGlobalValue(GA);
1003}
1004
1005void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1006 visitGlobalValue(GI);
1007
1009 GI.getAllMetadata(MDs);
1010 for (const auto &I : MDs) {
1011 CheckDI(I.first != LLVMContext::MD_dbg,
1012 "an ifunc may not have a !dbg attachment", &GI);
1013 Check(I.first != LLVMContext::MD_prof,
1014 "an ifunc may not have a !prof attachment", &GI);
1015 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1016 }
1017
1019 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1020 "weak_odr, or external linkage!",
1021 &GI);
1022 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1023 // is a Function definition.
1024 const Function *Resolver = GI.getResolverFunction();
1025 Check(Resolver, "IFunc must have a Function resolver", &GI);
1026 Check(!Resolver->isDeclarationForLinker(),
1027 "IFunc resolver must be a definition", &GI);
1028
1029 // Check that the immediate resolver operand (prior to any bitcasts) has the
1030 // correct type.
1031 const Type *ResolverTy = GI.getResolver()->getType();
1032
1034 "IFunc resolver must return a pointer", &GI);
1035
1036 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1037 "IFunc resolver has incorrect type", &GI);
1038}
1039
1040void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1041 // There used to be various other llvm.dbg.* nodes, but we don't support
1042 // upgrading them and we want to reserve the namespace for future uses.
1043 if (NMD.getName().starts_with("llvm.dbg."))
1044 CheckDI(NMD.getName() == "llvm.dbg.cu",
1045 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1046 for (const MDNode *MD : NMD.operands()) {
1047 if (NMD.getName() == "llvm.dbg.cu")
1048 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1049
1050 if (!MD)
1051 continue;
1052
1053 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1054 }
1055}
1056
1057void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1058 // Only visit each node once. Metadata can be mutually recursive, so this
1059 // avoids infinite recursion here, as well as being an optimization.
1060 if (!MDNodes.insert(&MD).second)
1061 return;
1062
1063 Check(&MD.getContext() == &Context,
1064 "MDNode context does not match Module context!", &MD);
1065
1066 switch (MD.getMetadataID()) {
1067 default:
1068 llvm_unreachable("Invalid MDNode subclass");
1069 case Metadata::MDTupleKind:
1070 break;
1071#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1072 case Metadata::CLASS##Kind: \
1073 visit##CLASS(cast<CLASS>(MD)); \
1074 break;
1075#include "llvm/IR/Metadata.def"
1076 }
1077
1078 for (const Metadata *Op : MD.operands()) {
1079 if (!Op)
1080 continue;
1081 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1082 &MD, Op);
1083 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1084 "DILocation not allowed within this metadata node", &MD, Op);
1085 if (auto *N = dyn_cast<MDNode>(Op)) {
1086 visitMDNode(*N, AllowLocs);
1087 continue;
1088 }
1089 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1090 visitValueAsMetadata(*V, nullptr);
1091 continue;
1092 }
1093 }
1094
1095 // Check llvm.loop.estimated_trip_count.
1096 if (MD.getNumOperands() > 0 &&
1098 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1100 Check(Count && Count->getType()->isIntegerTy() &&
1101 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1102 "Expected second operand to be an integer constant of type i32 or "
1103 "smaller",
1104 &MD);
1105 }
1106
1107 // Check these last, so we diagnose problems in operands first.
1108 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1109 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1110}
1111
1112void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1113 Check(MD.getValue(), "Expected valid value", &MD);
1114 Check(!MD.getValue()->getType()->isMetadataTy(),
1115 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1116
1117 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1118 if (!L)
1119 return;
1120
1121 Check(F, "function-local metadata used outside a function", L);
1122
1123 // If this was an instruction, bb, or argument, verify that it is in the
1124 // function that we expect.
1125 Function *ActualF = nullptr;
1126 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1127 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1128 ActualF = I->getParent()->getParent();
1129 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1130 ActualF = BB->getParent();
1131 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1132 ActualF = A->getParent();
1133 assert(ActualF && "Unimplemented function local metadata case!");
1134
1135 Check(ActualF == F, "function-local metadata used in wrong function", L);
1136}
1137
1138void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1139 for (const ValueAsMetadata *VAM : AL.getArgs())
1140 visitValueAsMetadata(*VAM, F);
1141}
1142
1143void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1144 Metadata *MD = MDV.getMetadata();
1145 if (auto *N = dyn_cast<MDNode>(MD)) {
1146 visitMDNode(*N, AreDebugLocsAllowed::No);
1147 return;
1148 }
1149
1150 // Only visit each node once. Metadata can be mutually recursive, so this
1151 // avoids infinite recursion here, as well as being an optimization.
1152 if (!MDNodes.insert(MD).second)
1153 return;
1154
1155 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1156 visitValueAsMetadata(*V, F);
1157
1158 if (auto *AL = dyn_cast<DIArgList>(MD))
1159 visitDIArgList(*AL, F);
1160}
1161
1162static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1163static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1164static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1165
1166void Verifier::visitDILocation(const DILocation &N) {
1167 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1168 "location requires a valid scope", &N, N.getRawScope());
1169 if (auto *IA = N.getRawInlinedAt())
1170 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1171 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1172 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1173}
1174
1175void Verifier::visitGenericDINode(const GenericDINode &N) {
1176 CheckDI(N.getTag(), "invalid tag", &N);
1177}
1178
1179void Verifier::visitDIScope(const DIScope &N) {
1180 if (auto *F = N.getRawFile())
1181 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1182}
1183
1184void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1185 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1186 auto *BaseType = N.getRawBaseType();
1187 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1188 auto *LBound = N.getRawLowerBound();
1189 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1190 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1191 "LowerBound must be signed constant or DIVariable or DIExpression",
1192 &N);
1193 auto *UBound = N.getRawUpperBound();
1194 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1195 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1196 "UpperBound must be signed constant or DIVariable or DIExpression",
1197 &N);
1198 auto *Stride = N.getRawStride();
1199 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1200 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1201 "Stride must be signed constant or DIVariable or DIExpression", &N);
1202 auto *Bias = N.getRawBias();
1203 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1204 isa<DIExpression>(Bias),
1205 "Bias must be signed constant or DIVariable or DIExpression", &N);
1206 // Subrange types currently only support constant size.
1207 auto *Size = N.getRawSizeInBits();
1209 "SizeInBits must be a constant");
1210}
1211
1212void Verifier::visitDISubrange(const DISubrange &N) {
1213 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1214 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1215 "Subrange can have any one of count or upperBound", &N);
1216 auto *CBound = N.getRawCountNode();
1217 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1218 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1219 "Count must be signed constant or DIVariable or DIExpression", &N);
1220 auto Count = N.getCount();
1222 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1223 "invalid subrange count", &N);
1224 auto *LBound = N.getRawLowerBound();
1225 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1226 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1227 "LowerBound must be signed constant or DIVariable or DIExpression",
1228 &N);
1229 auto *UBound = N.getRawUpperBound();
1230 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1231 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1232 "UpperBound must be signed constant or DIVariable or DIExpression",
1233 &N);
1234 auto *Stride = N.getRawStride();
1235 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1236 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1237 "Stride must be signed constant or DIVariable or DIExpression", &N);
1238}
1239
1240void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1241 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1242 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1243 "GenericSubrange can have any one of count or upperBound", &N);
1244 auto *CBound = N.getRawCountNode();
1245 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1246 "Count must be signed constant or DIVariable or DIExpression", &N);
1247 auto *LBound = N.getRawLowerBound();
1248 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1249 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1250 "LowerBound must be signed constant or DIVariable or DIExpression",
1251 &N);
1252 auto *UBound = N.getRawUpperBound();
1253 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1254 "UpperBound must be signed constant or DIVariable or DIExpression",
1255 &N);
1256 auto *Stride = N.getRawStride();
1257 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1258 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1259 "Stride must be signed constant or DIVariable or DIExpression", &N);
1260}
1261
1262void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1263 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1264}
1265
1266void Verifier::visitDIBasicType(const DIBasicType &N) {
1267 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1268 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1269 N.getTag() == dwarf::DW_TAG_string_type,
1270 "invalid tag", &N);
1271 // Basic types currently only support constant size.
1272 auto *Size = N.getRawSizeInBits();
1274 "SizeInBits must be a constant");
1275}
1276
1277void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1278 visitDIBasicType(N);
1279
1280 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1281 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1282 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1283 "invalid encoding", &N);
1287 "invalid kind", &N);
1289 N.getFactorRaw() == 0,
1290 "factor should be 0 for rationals", &N);
1292 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1293 "numerator and denominator should be 0 for non-rationals", &N);
1294}
1295
1296void Verifier::visitDIStringType(const DIStringType &N) {
1297 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1298 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1299 &N);
1300}
1301
1302void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1303 // Common scope checks.
1304 visitDIScope(N);
1305
1306 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1307 N.getTag() == dwarf::DW_TAG_pointer_type ||
1308 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1309 N.getTag() == dwarf::DW_TAG_reference_type ||
1310 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1311 N.getTag() == dwarf::DW_TAG_const_type ||
1312 N.getTag() == dwarf::DW_TAG_immutable_type ||
1313 N.getTag() == dwarf::DW_TAG_volatile_type ||
1314 N.getTag() == dwarf::DW_TAG_restrict_type ||
1315 N.getTag() == dwarf::DW_TAG_atomic_type ||
1316 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1317 N.getTag() == dwarf::DW_TAG_member ||
1318 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1319 N.getTag() == dwarf::DW_TAG_inheritance ||
1320 N.getTag() == dwarf::DW_TAG_friend ||
1321 N.getTag() == dwarf::DW_TAG_set_type ||
1322 N.getTag() == dwarf::DW_TAG_template_alias,
1323 "invalid tag", &N);
1324 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1325 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1326 N.getRawExtraData());
1327 }
1328
1329 if (N.getTag() == dwarf::DW_TAG_set_type) {
1330 if (auto *T = N.getRawBaseType()) {
1334 CheckDI(
1335 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1336 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1337 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1338 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1339 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1340 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1341 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1342 "invalid set base type", &N, T);
1343 }
1344 }
1345
1346 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1347 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1348 N.getRawBaseType());
1349
1350 if (N.getDWARFAddressSpace()) {
1351 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1352 N.getTag() == dwarf::DW_TAG_reference_type ||
1353 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1354 "DWARF address space only applies to pointer or reference types",
1355 &N);
1356 }
1357
1358 auto *Size = N.getRawSizeInBits();
1361 "SizeInBits must be a constant or DIVariable or DIExpression");
1362}
1363
1364/// Detect mutually exclusive flags.
1365static bool hasConflictingReferenceFlags(unsigned Flags) {
1366 return ((Flags & DINode::FlagLValueReference) &&
1367 (Flags & DINode::FlagRValueReference)) ||
1368 ((Flags & DINode::FlagTypePassByValue) &&
1369 (Flags & DINode::FlagTypePassByReference));
1370}
1371
1372void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1373 auto *Params = dyn_cast<MDTuple>(&RawParams);
1374 CheckDI(Params, "invalid template params", &N, &RawParams);
1375 for (Metadata *Op : Params->operands()) {
1376 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1377 &N, Params, Op);
1378 }
1379}
1380
1381void Verifier::visitDICompositeType(const DICompositeType &N) {
1382 // Common scope checks.
1383 visitDIScope(N);
1384
1385 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1386 N.getTag() == dwarf::DW_TAG_structure_type ||
1387 N.getTag() == dwarf::DW_TAG_union_type ||
1388 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1389 N.getTag() == dwarf::DW_TAG_class_type ||
1390 N.getTag() == dwarf::DW_TAG_variant_part ||
1391 N.getTag() == dwarf::DW_TAG_variant ||
1392 N.getTag() == dwarf::DW_TAG_namelist,
1393 "invalid tag", &N);
1394
1395 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1396 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1397 N.getRawBaseType());
1398
1399 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1400 "invalid composite elements", &N, N.getRawElements());
1401 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1402 N.getRawVTableHolder());
1404 "invalid reference flags", &N);
1405 unsigned DIBlockByRefStruct = 1 << 4;
1406 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1407 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1408 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1409 "DISubprogram contains null entry in `elements` field", &N);
1410
1411 if (N.isVector()) {
1412 const DINodeArray Elements = N.getElements();
1413 CheckDI(Elements.size() == 1 &&
1414 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1415 "invalid vector, expected one element of type subrange", &N);
1416 }
1417
1418 if (auto *Params = N.getRawTemplateParams())
1419 visitTemplateParams(N, *Params);
1420
1421 if (auto *D = N.getRawDiscriminator()) {
1422 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1423 "discriminator can only appear on variant part");
1424 }
1425
1426 if (N.getRawDataLocation()) {
1427 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1428 "dataLocation can only appear in array type");
1429 }
1430
1431 if (N.getRawAssociated()) {
1432 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1433 "associated can only appear in array type");
1434 }
1435
1436 if (N.getRawAllocated()) {
1437 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1438 "allocated can only appear in array type");
1439 }
1440
1441 if (N.getRawRank()) {
1442 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1443 "rank can only appear in array type");
1444 }
1445
1446 if (N.getTag() == dwarf::DW_TAG_array_type) {
1447 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1448 }
1449
1450 auto *Size = N.getRawSizeInBits();
1453 "SizeInBits must be a constant or DIVariable or DIExpression");
1454}
1455
1456void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1457 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1458 if (auto *Types = N.getRawTypeArray()) {
1459 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1460 for (Metadata *Ty : N.getTypeArray()->operands()) {
1461 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1462 }
1463 }
1465 "invalid reference flags", &N);
1466}
1467
1468void Verifier::visitDIFile(const DIFile &N) {
1469 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1470 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1471 if (Checksum) {
1472 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1473 "invalid checksum kind", &N);
1474 size_t Size;
1475 switch (Checksum->Kind) {
1476 case DIFile::CSK_MD5:
1477 Size = 32;
1478 break;
1479 case DIFile::CSK_SHA1:
1480 Size = 40;
1481 break;
1482 case DIFile::CSK_SHA256:
1483 Size = 64;
1484 break;
1485 }
1486 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1487 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1488 "invalid checksum", &N);
1489 }
1490}
1491
1492void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1493 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1494 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1495
1496 // Don't bother verifying the compilation directory or producer string
1497 // as those could be empty.
1498 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1499 N.getRawFile());
1500 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1501 N.getFile());
1502
1503 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1504 "invalid emission kind", &N);
1505
1506 if (auto *Array = N.getRawEnumTypes()) {
1507 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1508 for (Metadata *Op : N.getEnumTypes()->operands()) {
1510 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1511 "invalid enum type", &N, N.getEnumTypes(), Op);
1512 }
1513 }
1514 if (auto *Array = N.getRawRetainedTypes()) {
1515 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1516 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1517 CheckDI(
1518 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1519 !cast<DISubprogram>(Op)->isDefinition())),
1520 "invalid retained type", &N, Op);
1521 }
1522 }
1523 if (auto *Array = N.getRawGlobalVariables()) {
1524 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1525 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1527 "invalid global variable ref", &N, Op);
1528 }
1529 }
1530 if (auto *Array = N.getRawImportedEntities()) {
1531 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1532 for (Metadata *Op : N.getImportedEntities()->operands()) {
1533 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1534 &N, Op);
1535 }
1536 }
1537 if (auto *Array = N.getRawMacros()) {
1538 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1539 for (Metadata *Op : N.getMacros()->operands()) {
1540 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1541 }
1542 }
1543 CUVisited.insert(&N);
1544}
1545
1546void Verifier::visitDISubprogram(const DISubprogram &N) {
1547 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1548 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1549 if (auto *F = N.getRawFile())
1550 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1551 else
1552 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1553 if (auto *T = N.getRawType())
1554 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1555 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1556 N.getRawContainingType());
1557 if (auto *Params = N.getRawTemplateParams())
1558 visitTemplateParams(N, *Params);
1559 if (auto *S = N.getRawDeclaration())
1560 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1561 "invalid subprogram declaration", &N, S);
1562 if (auto *RawNode = N.getRawRetainedNodes()) {
1563 auto *Node = dyn_cast<MDTuple>(RawNode);
1564 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1565 for (Metadata *Op : Node->operands()) {
1568 "invalid retained nodes, expected DILocalVariable, DILabel or "
1569 "DIImportedEntity",
1570 &N, Node, Op);
1571 }
1572 }
1574 "invalid reference flags", &N);
1575
1576 auto *Unit = N.getRawUnit();
1577 if (N.isDefinition()) {
1578 // Subprogram definitions (not part of the type hierarchy).
1579 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1580 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1581 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1582 // There's no good way to cross the CU boundary to insert a nested
1583 // DISubprogram definition in one CU into a type defined in another CU.
1584 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1585 if (CT && CT->getRawIdentifier() &&
1586 M.getContext().isODRUniquingDebugTypes())
1587 CheckDI(N.getDeclaration(),
1588 "definition subprograms cannot be nested within DICompositeType "
1589 "when enabling ODR",
1590 &N);
1591 } else {
1592 // Subprogram declarations (part of the type hierarchy).
1593 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1594 CheckDI(!N.getRawDeclaration(),
1595 "subprogram declaration must not have a declaration field");
1596 }
1597
1598 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1599 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1600 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1601 for (Metadata *Op : ThrownTypes->operands())
1602 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1603 Op);
1604 }
1605
1606 if (N.areAllCallsDescribed())
1607 CheckDI(N.isDefinition(),
1608 "DIFlagAllCallsDescribed must be attached to a definition");
1609}
1610
1611void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1612 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1613 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1614 "invalid local scope", &N, N.getRawScope());
1615 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1616 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1617}
1618
1619void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1620 visitDILexicalBlockBase(N);
1621
1622 CheckDI(N.getLine() || !N.getColumn(),
1623 "cannot have column info without line info", &N);
1624}
1625
1626void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1627 visitDILexicalBlockBase(N);
1628}
1629
1630void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1631 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1632 if (auto *S = N.getRawScope())
1633 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1634 if (auto *S = N.getRawDecl())
1635 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1636}
1637
1638void Verifier::visitDINamespace(const DINamespace &N) {
1639 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1640 if (auto *S = N.getRawScope())
1641 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1642}
1643
1644void Verifier::visitDIMacro(const DIMacro &N) {
1645 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1646 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1647 "invalid macinfo type", &N);
1648 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1649 if (!N.getValue().empty()) {
1650 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1651 }
1652}
1653
1654void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1655 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1656 "invalid macinfo type", &N);
1657 if (auto *F = N.getRawFile())
1658 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1659
1660 if (auto *Array = N.getRawElements()) {
1661 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1662 for (Metadata *Op : N.getElements()->operands()) {
1663 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1664 }
1665 }
1666}
1667
1668void Verifier::visitDIModule(const DIModule &N) {
1669 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1670 CheckDI(!N.getName().empty(), "anonymous module", &N);
1671}
1672
1673void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1674 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1675}
1676
1677void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1678 visitDITemplateParameter(N);
1679
1680 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1681 &N);
1682}
1683
1684void Verifier::visitDITemplateValueParameter(
1685 const DITemplateValueParameter &N) {
1686 visitDITemplateParameter(N);
1687
1688 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1689 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1690 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1691 "invalid tag", &N);
1692}
1693
1694void Verifier::visitDIVariable(const DIVariable &N) {
1695 if (auto *S = N.getRawScope())
1696 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1697 if (auto *F = N.getRawFile())
1698 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1699}
1700
1701void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1702 // Checks common to all variables.
1703 visitDIVariable(N);
1704
1705 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1706 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1707 // Check only if the global variable is not an extern
1708 if (N.isDefinition())
1709 CheckDI(N.getType(), "missing global variable type", &N);
1710 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1712 "invalid static data member declaration", &N, Member);
1713 }
1714}
1715
1716void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1717 // Checks common to all variables.
1718 visitDIVariable(N);
1719
1720 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1721 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1722 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1723 "local variable requires a valid scope", &N, N.getRawScope());
1724 if (auto Ty = N.getType())
1725 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1726}
1727
1728void Verifier::visitDIAssignID(const DIAssignID &N) {
1729 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1730 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1731}
1732
1733void Verifier::visitDILabel(const DILabel &N) {
1734 if (auto *S = N.getRawScope())
1735 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1736 if (auto *F = N.getRawFile())
1737 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1738
1739 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1740 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1741 "label requires a valid scope", &N, N.getRawScope());
1742}
1743
1744void Verifier::visitDIExpression(const DIExpression &N) {
1745 CheckDI(N.isValid(), "invalid expression", &N);
1746}
1747
1748void Verifier::visitDIGlobalVariableExpression(
1749 const DIGlobalVariableExpression &GVE) {
1750 CheckDI(GVE.getVariable(), "missing variable");
1751 if (auto *Var = GVE.getVariable())
1752 visitDIGlobalVariable(*Var);
1753 if (auto *Expr = GVE.getExpression()) {
1754 visitDIExpression(*Expr);
1755 if (auto Fragment = Expr->getFragmentInfo())
1756 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1757 }
1758}
1759
1760void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1761 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1762 if (auto *T = N.getRawType())
1763 CheckDI(isType(T), "invalid type ref", &N, T);
1764 if (auto *F = N.getRawFile())
1765 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1766}
1767
1768void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1769 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1770 N.getTag() == dwarf::DW_TAG_imported_declaration,
1771 "invalid tag", &N);
1772 if (auto *S = N.getRawScope())
1773 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1774 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1775 N.getRawEntity());
1776}
1777
1778void Verifier::visitComdat(const Comdat &C) {
1779 // In COFF the Module is invalid if the GlobalValue has private linkage.
1780 // Entities with private linkage don't have entries in the symbol table.
1781 if (TT.isOSBinFormatCOFF())
1782 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1783 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1784 GV);
1785}
1786
1787void Verifier::visitModuleIdents() {
1788 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1789 if (!Idents)
1790 return;
1791
1792 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1793 // Scan each llvm.ident entry and make sure that this requirement is met.
1794 for (const MDNode *N : Idents->operands()) {
1795 Check(N->getNumOperands() == 1,
1796 "incorrect number of operands in llvm.ident metadata", N);
1797 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1798 ("invalid value for llvm.ident metadata entry operand"
1799 "(the operand should be a string)"),
1800 N->getOperand(0));
1801 }
1802}
1803
1804void Verifier::visitModuleCommandLines() {
1805 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1806 if (!CommandLines)
1807 return;
1808
1809 // llvm.commandline takes a list of metadata entry. Each entry has only one
1810 // string. Scan each llvm.commandline entry and make sure that this
1811 // requirement is met.
1812 for (const MDNode *N : CommandLines->operands()) {
1813 Check(N->getNumOperands() == 1,
1814 "incorrect number of operands in llvm.commandline metadata", N);
1815 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1816 ("invalid value for llvm.commandline metadata entry operand"
1817 "(the operand should be a string)"),
1818 N->getOperand(0));
1819 }
1820}
1821
1822void Verifier::visitModuleErrnoTBAA() {
1823 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1824 if (!ErrnoTBAA)
1825 return;
1826
1827 Check(ErrnoTBAA->getNumOperands() >= 1,
1828 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1829
1830 for (const MDNode *N : ErrnoTBAA->operands())
1831 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1832}
1833
1834void Verifier::visitModuleFlags() {
1835 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1836 if (!Flags) return;
1837
1838 // Scan each flag, and track the flags and requirements.
1839 DenseMap<const MDString*, const MDNode*> SeenIDs;
1840 SmallVector<const MDNode*, 16> Requirements;
1841 uint64_t PAuthABIPlatform = -1;
1842 uint64_t PAuthABIVersion = -1;
1843 for (const MDNode *MDN : Flags->operands()) {
1844 visitModuleFlag(MDN, SeenIDs, Requirements);
1845 if (MDN->getNumOperands() != 3)
1846 continue;
1847 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1848 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1849 if (const auto *PAP =
1851 PAuthABIPlatform = PAP->getZExtValue();
1852 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1853 if (const auto *PAV =
1855 PAuthABIVersion = PAV->getZExtValue();
1856 }
1857 }
1858 }
1859
1860 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1861 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1862 "'aarch64-elf-pauthabi-version' module flags must be present");
1863
1864 // Validate that the requirements in the module are valid.
1865 for (const MDNode *Requirement : Requirements) {
1866 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1867 const Metadata *ReqValue = Requirement->getOperand(1);
1868
1869 const MDNode *Op = SeenIDs.lookup(Flag);
1870 if (!Op) {
1871 CheckFailed("invalid requirement on flag, flag is not present in module",
1872 Flag);
1873 continue;
1874 }
1875
1876 if (Op->getOperand(2) != ReqValue) {
1877 CheckFailed(("invalid requirement on flag, "
1878 "flag does not have the required value"),
1879 Flag);
1880 continue;
1881 }
1882 }
1883}
1884
1885void
1886Verifier::visitModuleFlag(const MDNode *Op,
1887 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1888 SmallVectorImpl<const MDNode *> &Requirements) {
1889 // Each module flag should have three arguments, the merge behavior (a
1890 // constant int), the flag ID (an MDString), and the value.
1891 Check(Op->getNumOperands() == 3,
1892 "incorrect number of operands in module flag", Op);
1893 Module::ModFlagBehavior MFB;
1894 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1896 "invalid behavior operand in module flag (expected constant integer)",
1897 Op->getOperand(0));
1898 Check(false,
1899 "invalid behavior operand in module flag (unexpected constant)",
1900 Op->getOperand(0));
1901 }
1902 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1903 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1904 Op->getOperand(1));
1905
1906 // Check the values for behaviors with additional requirements.
1907 switch (MFB) {
1908 case Module::Error:
1909 case Module::Warning:
1910 case Module::Override:
1911 // These behavior types accept any value.
1912 break;
1913
1914 case Module::Min: {
1915 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1916 Check(V && V->getValue().isNonNegative(),
1917 "invalid value for 'min' module flag (expected constant non-negative "
1918 "integer)",
1919 Op->getOperand(2));
1920 break;
1921 }
1922
1923 case Module::Max: {
1925 "invalid value for 'max' module flag (expected constant integer)",
1926 Op->getOperand(2));
1927 break;
1928 }
1929
1930 case Module::Require: {
1931 // The value should itself be an MDNode with two operands, a flag ID (an
1932 // MDString), and a value.
1933 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1934 Check(Value && Value->getNumOperands() == 2,
1935 "invalid value for 'require' module flag (expected metadata pair)",
1936 Op->getOperand(2));
1937 Check(isa<MDString>(Value->getOperand(0)),
1938 ("invalid value for 'require' module flag "
1939 "(first value operand should be a string)"),
1940 Value->getOperand(0));
1941
1942 // Append it to the list of requirements, to check once all module flags are
1943 // scanned.
1944 Requirements.push_back(Value);
1945 break;
1946 }
1947
1948 case Module::Append:
1949 case Module::AppendUnique: {
1950 // These behavior types require the operand be an MDNode.
1951 Check(isa<MDNode>(Op->getOperand(2)),
1952 "invalid value for 'append'-type module flag "
1953 "(expected a metadata node)",
1954 Op->getOperand(2));
1955 break;
1956 }
1957 }
1958
1959 // Unless this is a "requires" flag, check the ID is unique.
1960 if (MFB != Module::Require) {
1961 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1962 Check(Inserted,
1963 "module flag identifiers must be unique (or of 'require' type)", ID);
1964 }
1965
1966 if (ID->getString() == "wchar_size") {
1967 ConstantInt *Value
1969 Check(Value, "wchar_size metadata requires constant integer argument");
1970 }
1971
1972 if (ID->getString() == "Linker Options") {
1973 // If the llvm.linker.options named metadata exists, we assume that the
1974 // bitcode reader has upgraded the module flag. Otherwise the flag might
1975 // have been created by a client directly.
1976 Check(M.getNamedMetadata("llvm.linker.options"),
1977 "'Linker Options' named metadata no longer supported");
1978 }
1979
1980 if (ID->getString() == "SemanticInterposition") {
1981 ConstantInt *Value =
1983 Check(Value,
1984 "SemanticInterposition metadata requires constant integer argument");
1985 }
1986
1987 if (ID->getString() == "CG Profile") {
1988 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1989 visitModuleFlagCGProfileEntry(MDO);
1990 }
1991}
1992
1993void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1994 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1995 if (!FuncMDO)
1996 return;
1997 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1998 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1999 "expected a Function or null", FuncMDO);
2000 };
2001 auto Node = dyn_cast_or_null<MDNode>(MDO);
2002 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2003 CheckFunction(Node->getOperand(0));
2004 CheckFunction(Node->getOperand(1));
2005 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2006 Check(Count && Count->getType()->isIntegerTy(),
2007 "expected an integer constant", Node->getOperand(2));
2008}
2009
2010void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2011 for (Attribute A : Attrs) {
2012
2013 if (A.isStringAttribute()) {
2014#define GET_ATTR_NAMES
2015#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2016#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2017 if (A.getKindAsString() == #DISPLAY_NAME) { \
2018 auto V = A.getValueAsString(); \
2019 if (!(V.empty() || V == "true" || V == "false")) \
2020 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2021 ""); \
2022 }
2023
2024#include "llvm/IR/Attributes.inc"
2025 continue;
2026 }
2027
2028 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2029 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2030 V);
2031 return;
2032 }
2033 }
2034}
2035
2036// VerifyParameterAttrs - Check the given attributes for an argument or return
2037// value of the specified type. The value V is printed in error messages.
2038void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2039 const Value *V) {
2040 if (!Attrs.hasAttributes())
2041 return;
2042
2043 verifyAttributeTypes(Attrs, V);
2044
2045 for (Attribute Attr : Attrs)
2046 Check(Attr.isStringAttribute() ||
2047 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2048 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2049 V);
2050
2051 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2052 unsigned AttrCount =
2053 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2054 Check(AttrCount == 1,
2055 "Attribute 'immarg' is incompatible with other attributes except the "
2056 "'range' attribute",
2057 V);
2058 }
2059
2060 // Check for mutually incompatible attributes. Only inreg is compatible with
2061 // sret.
2062 unsigned AttrCount = 0;
2063 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2064 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2065 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2066 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2067 Attrs.hasAttribute(Attribute::InReg);
2068 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2069 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2070 Check(AttrCount <= 1,
2071 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2072 "'byref', and 'sret' are incompatible!",
2073 V);
2074
2075 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2076 Attrs.hasAttribute(Attribute::ReadOnly)),
2077 "Attributes "
2078 "'inalloca and readonly' are incompatible!",
2079 V);
2080
2081 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2082 Attrs.hasAttribute(Attribute::Returned)),
2083 "Attributes "
2084 "'sret and returned' are incompatible!",
2085 V);
2086
2087 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2088 Attrs.hasAttribute(Attribute::SExt)),
2089 "Attributes "
2090 "'zeroext and signext' are incompatible!",
2091 V);
2092
2093 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2094 Attrs.hasAttribute(Attribute::ReadOnly)),
2095 "Attributes "
2096 "'readnone and readonly' are incompatible!",
2097 V);
2098
2099 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2100 Attrs.hasAttribute(Attribute::WriteOnly)),
2101 "Attributes "
2102 "'readnone and writeonly' are incompatible!",
2103 V);
2104
2105 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2106 Attrs.hasAttribute(Attribute::WriteOnly)),
2107 "Attributes "
2108 "'readonly and writeonly' are incompatible!",
2109 V);
2110
2111 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2112 Attrs.hasAttribute(Attribute::AlwaysInline)),
2113 "Attributes "
2114 "'noinline and alwaysinline' are incompatible!",
2115 V);
2116
2117 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2118 Attrs.hasAttribute(Attribute::ReadNone)),
2119 "Attributes writable and readnone are incompatible!", V);
2120
2121 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2122 Attrs.hasAttribute(Attribute::ReadOnly)),
2123 "Attributes writable and readonly are incompatible!", V);
2124
2125 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2126 for (Attribute Attr : Attrs) {
2127 if (!Attr.isStringAttribute() &&
2128 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2129 CheckFailed("Attribute '" + Attr.getAsString() +
2130 "' applied to incompatible type!", V);
2131 return;
2132 }
2133 }
2134
2135 if (isa<PointerType>(Ty)) {
2136 if (Attrs.hasAttribute(Attribute::Alignment)) {
2137 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2138 Check(AttrAlign.value() <= Value::MaximumAlignment,
2139 "huge alignment values are unsupported", V);
2140 }
2141 if (Attrs.hasAttribute(Attribute::ByVal)) {
2142 Type *ByValTy = Attrs.getByValType();
2143 SmallPtrSet<Type *, 4> Visited;
2144 Check(ByValTy->isSized(&Visited),
2145 "Attribute 'byval' does not support unsized types!", V);
2146 // Check if it is or contains a target extension type that disallows being
2147 // used on the stack.
2149 "'byval' argument has illegal target extension type", V);
2150 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2151 "huge 'byval' arguments are unsupported", V);
2152 }
2153 if (Attrs.hasAttribute(Attribute::ByRef)) {
2154 SmallPtrSet<Type *, 4> Visited;
2155 Check(Attrs.getByRefType()->isSized(&Visited),
2156 "Attribute 'byref' does not support unsized types!", V);
2157 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2158 (1ULL << 32),
2159 "huge 'byref' arguments are unsupported", V);
2160 }
2161 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2162 SmallPtrSet<Type *, 4> Visited;
2163 Check(Attrs.getInAllocaType()->isSized(&Visited),
2164 "Attribute 'inalloca' does not support unsized types!", V);
2165 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2166 (1ULL << 32),
2167 "huge 'inalloca' arguments are unsupported", V);
2168 }
2169 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2170 SmallPtrSet<Type *, 4> Visited;
2171 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2172 "Attribute 'preallocated' does not support unsized types!", V);
2173 Check(
2174 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2175 (1ULL << 32),
2176 "huge 'preallocated' arguments are unsupported", V);
2177 }
2178 }
2179
2180 if (Attrs.hasAttribute(Attribute::Initializes)) {
2181 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2182 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2183 V);
2185 "Attribute 'initializes' does not support unordered ranges", V);
2186 }
2187
2188 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2189 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2190 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2191 V);
2192 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2193 "Invalid value for 'nofpclass' test mask", V);
2194 }
2195 if (Attrs.hasAttribute(Attribute::Range)) {
2196 const ConstantRange &CR =
2197 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2199 "Range bit width must match type bit width!", V);
2200 }
2201}
2202
2203void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2204 const Value *V) {
2205 if (Attrs.hasFnAttr(Attr)) {
2206 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2207 unsigned N;
2208 if (S.getAsInteger(10, N))
2209 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2210 }
2211}
2212
2213// Check parameter attributes against a function type.
2214// The value V is printed in error messages.
2215void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2216 const Value *V, bool IsIntrinsic,
2217 bool IsInlineAsm) {
2218 if (Attrs.isEmpty())
2219 return;
2220
2221 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2222 Check(Attrs.hasParentContext(Context),
2223 "Attribute list does not match Module context!", &Attrs, V);
2224 for (const auto &AttrSet : Attrs) {
2225 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2226 "Attribute set does not match Module context!", &AttrSet, V);
2227 for (const auto &A : AttrSet) {
2228 Check(A.hasParentContext(Context),
2229 "Attribute does not match Module context!", &A, V);
2230 }
2231 }
2232 }
2233
2234 bool SawNest = false;
2235 bool SawReturned = false;
2236 bool SawSRet = false;
2237 bool SawSwiftSelf = false;
2238 bool SawSwiftAsync = false;
2239 bool SawSwiftError = false;
2240
2241 // Verify return value attributes.
2242 AttributeSet RetAttrs = Attrs.getRetAttrs();
2243 for (Attribute RetAttr : RetAttrs)
2244 Check(RetAttr.isStringAttribute() ||
2245 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2246 "Attribute '" + RetAttr.getAsString() +
2247 "' does not apply to function return values",
2248 V);
2249
2250 unsigned MaxParameterWidth = 0;
2251 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2252 if (Ty->isVectorTy()) {
2253 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2254 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2255 if (Size > MaxParameterWidth)
2256 MaxParameterWidth = Size;
2257 }
2258 }
2259 };
2260 GetMaxParameterWidth(FT->getReturnType());
2261 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2262
2263 // Verify parameter attributes.
2264 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2265 Type *Ty = FT->getParamType(i);
2266 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2267
2268 if (!IsIntrinsic) {
2269 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2270 "immarg attribute only applies to intrinsics", V);
2271 if (!IsInlineAsm)
2272 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2273 "Attribute 'elementtype' can only be applied to intrinsics"
2274 " and inline asm.",
2275 V);
2276 }
2277
2278 verifyParameterAttrs(ArgAttrs, Ty, V);
2279 GetMaxParameterWidth(Ty);
2280
2281 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2282 Check(!SawNest, "More than one parameter has attribute nest!", V);
2283 SawNest = true;
2284 }
2285
2286 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2287 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2288 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2289 "Incompatible argument and return types for 'returned' attribute",
2290 V);
2291 SawReturned = true;
2292 }
2293
2294 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2295 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2296 Check(i == 0 || i == 1,
2297 "Attribute 'sret' is not on first or second parameter!", V);
2298 SawSRet = true;
2299 }
2300
2301 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2302 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2303 SawSwiftSelf = true;
2304 }
2305
2306 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2307 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2308 SawSwiftAsync = true;
2309 }
2310
2311 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2312 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2313 SawSwiftError = true;
2314 }
2315
2316 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2317 Check(i == FT->getNumParams() - 1,
2318 "inalloca isn't on the last parameter!", V);
2319 }
2320 }
2321
2322 if (!Attrs.hasFnAttrs())
2323 return;
2324
2325 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2326 for (Attribute FnAttr : Attrs.getFnAttrs())
2327 Check(FnAttr.isStringAttribute() ||
2328 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2329 "Attribute '" + FnAttr.getAsString() +
2330 "' does not apply to functions!",
2331 V);
2332
2333 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2334 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2335 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2336
2337 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2338 Check(Attrs.hasFnAttr(Attribute::NoInline),
2339 "Attribute 'optnone' requires 'noinline'!", V);
2340
2341 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2342 "Attributes 'optsize and optnone' are incompatible!", V);
2343
2344 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2345 "Attributes 'minsize and optnone' are incompatible!", V);
2346
2347 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2348 "Attributes 'optdebug and optnone' are incompatible!", V);
2349 }
2350
2351 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2352 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2353 "Attributes "
2354 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2355 V);
2356
2357 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2358 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2359 "Attributes 'optsize and optdebug' are incompatible!", V);
2360
2361 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2362 "Attributes 'minsize and optdebug' are incompatible!", V);
2363 }
2364
2365 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2366 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2367 "Attribute writable and memory without argmem: write are incompatible!",
2368 V);
2369
2370 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2371 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2372 "Attributes 'aarch64_pstate_sm_enabled and "
2373 "aarch64_pstate_sm_compatible' are incompatible!",
2374 V);
2375 }
2376
2377 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2378 Attrs.hasFnAttr("aarch64_inout_za") +
2379 Attrs.hasFnAttr("aarch64_out_za") +
2380 Attrs.hasFnAttr("aarch64_preserves_za") +
2381 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2382 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2383 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2384 "'aarch64_za_state_agnostic' are mutually exclusive",
2385 V);
2386
2387 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2388 Attrs.hasFnAttr("aarch64_in_zt0") +
2389 Attrs.hasFnAttr("aarch64_inout_zt0") +
2390 Attrs.hasFnAttr("aarch64_out_zt0") +
2391 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2392 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2393 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2394 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2395 "'aarch64_za_state_agnostic' are mutually exclusive",
2396 V);
2397
2398 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2399 const GlobalValue *GV = cast<GlobalValue>(V);
2401 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2402 }
2403
2404 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2405 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2406 if (ParamNo >= FT->getNumParams()) {
2407 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2408 return false;
2409 }
2410
2411 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2412 CheckFailed("'allocsize' " + Name +
2413 " argument must refer to an integer parameter",
2414 V);
2415 return false;
2416 }
2417
2418 return true;
2419 };
2420
2421 if (!CheckParam("element size", Args->first))
2422 return;
2423
2424 if (Args->second && !CheckParam("number of elements", *Args->second))
2425 return;
2426 }
2427
2428 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2429 AllocFnKind K = Attrs.getAllocKind();
2431 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2432 if (!is_contained(
2433 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2434 Type))
2435 CheckFailed(
2436 "'allockind()' requires exactly one of alloc, realloc, and free");
2437 if ((Type == AllocFnKind::Free) &&
2438 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2439 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2440 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2441 "or aligned modifiers.");
2442 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2443 if ((K & ZeroedUninit) == ZeroedUninit)
2444 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2445 }
2446
2447 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2448 StringRef S = A.getValueAsString();
2449 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2450 Function *Variant = M.getFunction(S);
2451 if (Variant) {
2452 Attribute Family = Attrs.getFnAttr("alloc-family");
2453 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2454 if (Family.isValid())
2455 Check(VariantFamily.isValid() &&
2456 VariantFamily.getValueAsString() == Family.getValueAsString(),
2457 "'alloc-variant-zeroed' must name a function belonging to the "
2458 "same 'alloc-family'");
2459
2460 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2461 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2462 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2463 "'alloc-variant-zeroed' must name a function with "
2464 "'allockind(\"zeroed\")'");
2465
2466 Check(FT == Variant->getFunctionType(),
2467 "'alloc-variant-zeroed' must name a function with the same "
2468 "signature");
2469 }
2470 }
2471
2472 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2473 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2474 if (VScaleMin == 0)
2475 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2476 else if (!isPowerOf2_32(VScaleMin))
2477 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2478 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2479 if (VScaleMax && VScaleMin > VScaleMax)
2480 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2481 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2482 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2483 }
2484
2485 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2486 StringRef FP = FPAttr.getValueAsString();
2487 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved")
2488 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2489 }
2490
2491 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2492 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2493 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2494 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2495 .getValueAsString()
2496 .empty(),
2497 "\"patchable-function-entry-section\" must not be empty");
2498 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2499
2500 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2501 StringRef S = A.getValueAsString();
2502 if (S != "none" && S != "all" && S != "non-leaf")
2503 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2504 }
2505
2506 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2507 StringRef S = A.getValueAsString();
2508 if (S != "a_key" && S != "b_key")
2509 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2510 V);
2511 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2512 CheckFailed(
2513 "'sign-return-address-key' present without `sign-return-address`");
2514 }
2515 }
2516
2517 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2518 StringRef S = A.getValueAsString();
2519 if (S != "" && S != "true" && S != "false")
2520 CheckFailed(
2521 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2522 }
2523
2524 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2525 StringRef S = A.getValueAsString();
2526 if (S != "" && S != "true" && S != "false")
2527 CheckFailed(
2528 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2529 }
2530
2531 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2532 StringRef S = A.getValueAsString();
2533 if (S != "" && S != "true" && S != "false")
2534 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2535 V);
2536 }
2537
2538 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2539 StringRef S = A.getValueAsString();
2540 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2541 if (!Info)
2542 CheckFailed("invalid name for a VFABI variant: " + S, V);
2543 }
2544
2545 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2546 StringRef S = A.getValueAsString();
2548 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2549 }
2550
2551 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2552 StringRef S = A.getValueAsString();
2554 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2555 V);
2556 }
2557}
2558void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2559 Check(MD->getNumOperands() == 2,
2560 "'unknown' !prof should have a single additional operand", MD);
2561 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2562 Check(PassName != nullptr,
2563 "'unknown' !prof should have an additional operand of type "
2564 "string");
2565 Check(!PassName->getString().empty(),
2566 "the 'unknown' !prof operand should not be an empty string");
2567}
2568
2569void Verifier::verifyFunctionMetadata(
2570 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2571 for (const auto &Pair : MDs) {
2572 if (Pair.first == LLVMContext::MD_prof) {
2573 MDNode *MD = Pair.second;
2574 Check(MD->getNumOperands() >= 2,
2575 "!prof annotations should have no less than 2 operands", MD);
2576 // We may have functions that are synthesized by the compiler, e.g. in
2577 // WPD, that we can't currently determine the entry count.
2578 if (MD->getOperand(0).equalsStr(
2580 verifyUnknownProfileMetadata(MD);
2581 continue;
2582 }
2583
2584 // Check first operand.
2585 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2586 MD);
2588 "expected string with name of the !prof annotation", MD);
2589 MDString *MDS = cast<MDString>(MD->getOperand(0));
2590 StringRef ProfName = MDS->getString();
2593 "first operand should be 'function_entry_count'"
2594 " or 'synthetic_function_entry_count'",
2595 MD);
2596
2597 // Check second operand.
2598 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2599 MD);
2601 "expected integer argument to function_entry_count", MD);
2602 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2603 MDNode *MD = Pair.second;
2604 Check(MD->getNumOperands() == 1,
2605 "!kcfi_type must have exactly one operand", MD);
2606 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2607 MD);
2609 "expected a constant operand for !kcfi_type", MD);
2610 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2611 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2612 "expected a constant integer operand for !kcfi_type", MD);
2614 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2615 }
2616 }
2617}
2618
2619void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2620 if (!ConstantExprVisited.insert(EntryC).second)
2621 return;
2622
2624 Stack.push_back(EntryC);
2625
2626 while (!Stack.empty()) {
2627 const Constant *C = Stack.pop_back_val();
2628
2629 // Check this constant expression.
2630 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2631 visitConstantExpr(CE);
2632
2633 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2634 visitConstantPtrAuth(CPA);
2635
2636 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2637 // Global Values get visited separately, but we do need to make sure
2638 // that the global value is in the correct module
2639 Check(GV->getParent() == &M, "Referencing global in another module!",
2640 EntryC, &M, GV, GV->getParent());
2641 continue;
2642 }
2643
2644 // Visit all sub-expressions.
2645 for (const Use &U : C->operands()) {
2646 const auto *OpC = dyn_cast<Constant>(U);
2647 if (!OpC)
2648 continue;
2649 if (!ConstantExprVisited.insert(OpC).second)
2650 continue;
2651 Stack.push_back(OpC);
2652 }
2653 }
2654}
2655
2656void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2657 if (CE->getOpcode() == Instruction::BitCast)
2658 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2659 CE->getType()),
2660 "Invalid bitcast", CE);
2661 else if (CE->getOpcode() == Instruction::PtrToAddr)
2662 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2663}
2664
2665void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2666 Check(CPA->getPointer()->getType()->isPointerTy(),
2667 "signed ptrauth constant base pointer must have pointer type");
2668
2669 Check(CPA->getType() == CPA->getPointer()->getType(),
2670 "signed ptrauth constant must have same type as its base pointer");
2671
2672 Check(CPA->getKey()->getBitWidth() == 32,
2673 "signed ptrauth constant key must be i32 constant integer");
2674
2676 "signed ptrauth constant address discriminator must be a pointer");
2677
2678 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2679 "signed ptrauth constant discriminator must be i64 constant integer");
2680}
2681
2682bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2683 // There shouldn't be more attribute sets than there are parameters plus the
2684 // function and return value.
2685 return Attrs.getNumAttrSets() <= Params + 2;
2686}
2687
2688void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2689 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2690 unsigned ArgNo = 0;
2691 unsigned LabelNo = 0;
2692 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2693 if (CI.Type == InlineAsm::isLabel) {
2694 ++LabelNo;
2695 continue;
2696 }
2697
2698 // Only deal with constraints that correspond to call arguments.
2699 if (!CI.hasArg())
2700 continue;
2701
2702 if (CI.isIndirect) {
2703 const Value *Arg = Call.getArgOperand(ArgNo);
2704 Check(Arg->getType()->isPointerTy(),
2705 "Operand for indirect constraint must have pointer type", &Call);
2706
2708 "Operand for indirect constraint must have elementtype attribute",
2709 &Call);
2710 } else {
2711 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2712 "Elementtype attribute can only be applied for indirect "
2713 "constraints",
2714 &Call);
2715 }
2716
2717 ArgNo++;
2718 }
2719
2720 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2721 Check(LabelNo == CallBr->getNumIndirectDests(),
2722 "Number of label constraints does not match number of callbr dests",
2723 &Call);
2724 } else {
2725 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2726 &Call);
2727 }
2728}
2729
2730/// Verify that statepoint intrinsic is well formed.
2731void Verifier::verifyStatepoint(const CallBase &Call) {
2732 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2733
2736 "gc.statepoint must read and write all memory to preserve "
2737 "reordering restrictions required by safepoint semantics",
2738 Call);
2739
2740 const int64_t NumPatchBytes =
2741 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2742 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2743 Check(NumPatchBytes >= 0,
2744 "gc.statepoint number of patchable bytes must be "
2745 "positive",
2746 Call);
2747
2748 Type *TargetElemType = Call.getParamElementType(2);
2749 Check(TargetElemType,
2750 "gc.statepoint callee argument must have elementtype attribute", Call);
2751 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2752 Check(TargetFuncType,
2753 "gc.statepoint callee elementtype must be function type", Call);
2754
2755 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2756 Check(NumCallArgs >= 0,
2757 "gc.statepoint number of arguments to underlying call "
2758 "must be positive",
2759 Call);
2760 const int NumParams = (int)TargetFuncType->getNumParams();
2761 if (TargetFuncType->isVarArg()) {
2762 Check(NumCallArgs >= NumParams,
2763 "gc.statepoint mismatch in number of vararg call args", Call);
2764
2765 // TODO: Remove this limitation
2766 Check(TargetFuncType->getReturnType()->isVoidTy(),
2767 "gc.statepoint doesn't support wrapping non-void "
2768 "vararg functions yet",
2769 Call);
2770 } else
2771 Check(NumCallArgs == NumParams,
2772 "gc.statepoint mismatch in number of call args", Call);
2773
2774 const uint64_t Flags
2775 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2776 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2777 "unknown flag used in gc.statepoint flags argument", Call);
2778
2779 // Verify that the types of the call parameter arguments match
2780 // the type of the wrapped callee.
2781 AttributeList Attrs = Call.getAttributes();
2782 for (int i = 0; i < NumParams; i++) {
2783 Type *ParamType = TargetFuncType->getParamType(i);
2784 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2785 Check(ArgType == ParamType,
2786 "gc.statepoint call argument does not match wrapped "
2787 "function type",
2788 Call);
2789
2790 if (TargetFuncType->isVarArg()) {
2791 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2792 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2793 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2794 }
2795 }
2796
2797 const int EndCallArgsInx = 4 + NumCallArgs;
2798
2799 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2800 Check(isa<ConstantInt>(NumTransitionArgsV),
2801 "gc.statepoint number of transition arguments "
2802 "must be constant integer",
2803 Call);
2804 const int NumTransitionArgs =
2805 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2806 Check(NumTransitionArgs == 0,
2807 "gc.statepoint w/inline transition bundle is deprecated", Call);
2808 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2809
2810 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2811 Check(isa<ConstantInt>(NumDeoptArgsV),
2812 "gc.statepoint number of deoptimization arguments "
2813 "must be constant integer",
2814 Call);
2815 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2816 Check(NumDeoptArgs == 0,
2817 "gc.statepoint w/inline deopt operands is deprecated", Call);
2818
2819 const int ExpectedNumArgs = 7 + NumCallArgs;
2820 Check(ExpectedNumArgs == (int)Call.arg_size(),
2821 "gc.statepoint too many arguments", Call);
2822
2823 // Check that the only uses of this gc.statepoint are gc.result or
2824 // gc.relocate calls which are tied to this statepoint and thus part
2825 // of the same statepoint sequence
2826 for (const User *U : Call.users()) {
2827 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2828 Check(UserCall, "illegal use of statepoint token", Call, U);
2829 if (!UserCall)
2830 continue;
2831 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2832 "gc.result or gc.relocate are the only value uses "
2833 "of a gc.statepoint",
2834 Call, U);
2835 if (isa<GCResultInst>(UserCall)) {
2836 Check(UserCall->getArgOperand(0) == &Call,
2837 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2838 } else if (isa<GCRelocateInst>(Call)) {
2839 Check(UserCall->getArgOperand(0) == &Call,
2840 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2841 }
2842 }
2843
2844 // Note: It is legal for a single derived pointer to be listed multiple
2845 // times. It's non-optimal, but it is legal. It can also happen after
2846 // insertion if we strip a bitcast away.
2847 // Note: It is really tempting to check that each base is relocated and
2848 // that a derived pointer is never reused as a base pointer. This turns
2849 // out to be problematic since optimizations run after safepoint insertion
2850 // can recognize equality properties that the insertion logic doesn't know
2851 // about. See example statepoint.ll in the verifier subdirectory
2852}
2853
2854void Verifier::verifyFrameRecoverIndices() {
2855 for (auto &Counts : FrameEscapeInfo) {
2856 Function *F = Counts.first;
2857 unsigned EscapedObjectCount = Counts.second.first;
2858 unsigned MaxRecoveredIndex = Counts.second.second;
2859 Check(MaxRecoveredIndex <= EscapedObjectCount,
2860 "all indices passed to llvm.localrecover must be less than the "
2861 "number of arguments passed to llvm.localescape in the parent "
2862 "function",
2863 F);
2864 }
2865}
2866
2867static Instruction *getSuccPad(Instruction *Terminator) {
2868 BasicBlock *UnwindDest;
2869 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2870 UnwindDest = II->getUnwindDest();
2871 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2872 UnwindDest = CSI->getUnwindDest();
2873 else
2874 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2875 return &*UnwindDest->getFirstNonPHIIt();
2876}
2877
2878void Verifier::verifySiblingFuncletUnwinds() {
2879 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2880 SmallPtrSet<Instruction *, 8> Visited;
2881 SmallPtrSet<Instruction *, 8> Active;
2882 for (const auto &Pair : SiblingFuncletInfo) {
2883 Instruction *PredPad = Pair.first;
2884 if (Visited.count(PredPad))
2885 continue;
2886 Active.insert(PredPad);
2887 Instruction *Terminator = Pair.second;
2888 do {
2889 Instruction *SuccPad = getSuccPad(Terminator);
2890 if (Active.count(SuccPad)) {
2891 // Found a cycle; report error
2892 Instruction *CyclePad = SuccPad;
2893 SmallVector<Instruction *, 8> CycleNodes;
2894 do {
2895 CycleNodes.push_back(CyclePad);
2896 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2897 if (CycleTerminator != CyclePad)
2898 CycleNodes.push_back(CycleTerminator);
2899 CyclePad = getSuccPad(CycleTerminator);
2900 } while (CyclePad != SuccPad);
2901 Check(false, "EH pads can't handle each other's exceptions",
2902 ArrayRef<Instruction *>(CycleNodes));
2903 }
2904 // Don't re-walk a node we've already checked
2905 if (!Visited.insert(SuccPad).second)
2906 break;
2907 // Walk to this successor if it has a map entry.
2908 PredPad = SuccPad;
2909 auto TermI = SiblingFuncletInfo.find(PredPad);
2910 if (TermI == SiblingFuncletInfo.end())
2911 break;
2912 Terminator = TermI->second;
2913 Active.insert(PredPad);
2914 } while (true);
2915 // Each node only has one successor, so we've walked all the active
2916 // nodes' successors.
2917 Active.clear();
2918 }
2919}
2920
2921// visitFunction - Verify that a function is ok.
2922//
2923void Verifier::visitFunction(const Function &F) {
2924 visitGlobalValue(F);
2925
2926 // Check function arguments.
2927 FunctionType *FT = F.getFunctionType();
2928 unsigned NumArgs = F.arg_size();
2929
2930 Check(&Context == &F.getContext(),
2931 "Function context does not match Module context!", &F);
2932
2933 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2934 Check(FT->getNumParams() == NumArgs,
2935 "# formal arguments must match # of arguments for function type!", &F,
2936 FT);
2937 Check(F.getReturnType()->isFirstClassType() ||
2938 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2939 "Functions cannot return aggregate values!", &F);
2940
2941 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2942 "Invalid struct return type!", &F);
2943
2944 if (MaybeAlign A = F.getAlign()) {
2945 Check(A->value() <= Value::MaximumAlignment,
2946 "huge alignment values are unsupported", &F);
2947 }
2948
2949 AttributeList Attrs = F.getAttributes();
2950
2951 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2952 "Attribute after last parameter!", &F);
2953
2954 bool IsIntrinsic = F.isIntrinsic();
2955
2956 // Check function attributes.
2957 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2958
2959 // On function declarations/definitions, we do not support the builtin
2960 // attribute. We do not check this in VerifyFunctionAttrs since that is
2961 // checking for Attributes that can/can not ever be on functions.
2962 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2963 "Attribute 'builtin' can only be applied to a callsite.", &F);
2964
2965 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2966 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2967
2968 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
2969 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
2970
2971 if (Attrs.hasFnAttr(Attribute::Naked))
2972 for (const Argument &Arg : F.args())
2973 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
2974
2975 // Check that this function meets the restrictions on this calling convention.
2976 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2977 // restrictions can be lifted.
2978 switch (F.getCallingConv()) {
2979 default:
2980 case CallingConv::C:
2981 break;
2982 case CallingConv::X86_INTR: {
2983 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2984 "Calling convention parameter requires byval", &F);
2985 break;
2986 }
2987 case CallingConv::AMDGPU_KERNEL:
2988 case CallingConv::SPIR_KERNEL:
2989 case CallingConv::AMDGPU_CS_Chain:
2990 case CallingConv::AMDGPU_CS_ChainPreserve:
2991 Check(F.getReturnType()->isVoidTy(),
2992 "Calling convention requires void return type", &F);
2993 [[fallthrough]];
2994 case CallingConv::AMDGPU_VS:
2995 case CallingConv::AMDGPU_HS:
2996 case CallingConv::AMDGPU_GS:
2997 case CallingConv::AMDGPU_PS:
2998 case CallingConv::AMDGPU_CS:
2999 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3000 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3001 const unsigned StackAS = DL.getAllocaAddrSpace();
3002 unsigned i = 0;
3003 for (const Argument &Arg : F.args()) {
3004 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3005 "Calling convention disallows byval", &F);
3006 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3007 "Calling convention disallows preallocated", &F);
3008 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3009 "Calling convention disallows inalloca", &F);
3010
3011 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3012 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3013 // value here.
3014 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3015 "Calling convention disallows stack byref", &F);
3016 }
3017
3018 ++i;
3019 }
3020 }
3021
3022 [[fallthrough]];
3023 case CallingConv::Fast:
3024 case CallingConv::Cold:
3025 case CallingConv::Intel_OCL_BI:
3026 case CallingConv::PTX_Kernel:
3027 case CallingConv::PTX_Device:
3028 Check(!F.isVarArg(),
3029 "Calling convention does not support varargs or "
3030 "perfect forwarding!",
3031 &F);
3032 break;
3033 case CallingConv::AMDGPU_Gfx_WholeWave:
3034 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3035 "Calling convention requires first argument to be i1", &F);
3036 Check(!F.arg_begin()->hasInRegAttr(),
3037 "Calling convention requires first argument to not be inreg", &F);
3038 Check(!F.isVarArg(),
3039 "Calling convention does not support varargs or "
3040 "perfect forwarding!",
3041 &F);
3042 break;
3043 }
3044
3045 // Check that the argument values match the function type for this function...
3046 unsigned i = 0;
3047 for (const Argument &Arg : F.args()) {
3048 Check(Arg.getType() == FT->getParamType(i),
3049 "Argument value does not match function argument type!", &Arg,
3050 FT->getParamType(i));
3051 Check(Arg.getType()->isFirstClassType(),
3052 "Function arguments must have first-class types!", &Arg);
3053 if (!IsIntrinsic) {
3054 Check(!Arg.getType()->isMetadataTy(),
3055 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3056 Check(!Arg.getType()->isTokenLikeTy(),
3057 "Function takes token but isn't an intrinsic", &Arg, &F);
3058 Check(!Arg.getType()->isX86_AMXTy(),
3059 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3060 }
3061
3062 // Check that swifterror argument is only used by loads and stores.
3063 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3064 verifySwiftErrorValue(&Arg);
3065 }
3066 ++i;
3067 }
3068
3069 if (!IsIntrinsic) {
3070 Check(!F.getReturnType()->isTokenLikeTy(),
3071 "Function returns a token but isn't an intrinsic", &F);
3072 Check(!F.getReturnType()->isX86_AMXTy(),
3073 "Function returns a x86_amx but isn't an intrinsic", &F);
3074 }
3075
3076 // Get the function metadata attachments.
3078 F.getAllMetadata(MDs);
3079 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3080 verifyFunctionMetadata(MDs);
3081
3082 // Check validity of the personality function
3083 if (F.hasPersonalityFn()) {
3084 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3085 if (Per)
3086 Check(Per->getParent() == F.getParent(),
3087 "Referencing personality function in another module!", &F,
3088 F.getParent(), Per, Per->getParent());
3089 }
3090
3091 // EH funclet coloring can be expensive, recompute on-demand
3092 BlockEHFuncletColors.clear();
3093
3094 if (F.isMaterializable()) {
3095 // Function has a body somewhere we can't see.
3096 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3097 MDs.empty() ? nullptr : MDs.front().second);
3098 } else if (F.isDeclaration()) {
3099 for (const auto &I : MDs) {
3100 // This is used for call site debug information.
3101 CheckDI(I.first != LLVMContext::MD_dbg ||
3102 !cast<DISubprogram>(I.second)->isDistinct(),
3103 "function declaration may only have a unique !dbg attachment",
3104 &F);
3105 Check(I.first != LLVMContext::MD_prof,
3106 "function declaration may not have a !prof attachment", &F);
3107
3108 // Verify the metadata itself.
3109 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3110 }
3111 Check(!F.hasPersonalityFn(),
3112 "Function declaration shouldn't have a personality routine", &F);
3113 } else {
3114 // Verify that this function (which has a body) is not named "llvm.*". It
3115 // is not legal to define intrinsics.
3116 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3117
3118 // Check the entry node
3119 const BasicBlock *Entry = &F.getEntryBlock();
3120 Check(pred_empty(Entry),
3121 "Entry block to function must not have predecessors!", Entry);
3122
3123 // The address of the entry block cannot be taken, unless it is dead.
3124 if (Entry->hasAddressTaken()) {
3125 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3126 "blockaddress may not be used with the entry block!", Entry);
3127 }
3128
3129 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3130 NumKCFIAttachments = 0;
3131 // Visit metadata attachments.
3132 for (const auto &I : MDs) {
3133 // Verify that the attachment is legal.
3134 auto AllowLocs = AreDebugLocsAllowed::No;
3135 switch (I.first) {
3136 default:
3137 break;
3138 case LLVMContext::MD_dbg: {
3139 ++NumDebugAttachments;
3140 CheckDI(NumDebugAttachments == 1,
3141 "function must have a single !dbg attachment", &F, I.second);
3142 CheckDI(isa<DISubprogram>(I.second),
3143 "function !dbg attachment must be a subprogram", &F, I.second);
3144 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3145 "function definition may only have a distinct !dbg attachment",
3146 &F);
3147
3148 auto *SP = cast<DISubprogram>(I.second);
3149 const Function *&AttachedTo = DISubprogramAttachments[SP];
3150 CheckDI(!AttachedTo || AttachedTo == &F,
3151 "DISubprogram attached to more than one function", SP, &F);
3152 AttachedTo = &F;
3153 AllowLocs = AreDebugLocsAllowed::Yes;
3154 break;
3155 }
3156 case LLVMContext::MD_prof:
3157 ++NumProfAttachments;
3158 Check(NumProfAttachments == 1,
3159 "function must have a single !prof attachment", &F, I.second);
3160 break;
3161 case LLVMContext::MD_kcfi_type:
3162 ++NumKCFIAttachments;
3163 Check(NumKCFIAttachments == 1,
3164 "function must have a single !kcfi_type attachment", &F,
3165 I.second);
3166 break;
3167 }
3168
3169 // Verify the metadata itself.
3170 visitMDNode(*I.second, AllowLocs);
3171 }
3172 }
3173
3174 // If this function is actually an intrinsic, verify that it is only used in
3175 // direct call/invokes, never having its "address taken".
3176 // Only do this if the module is materialized, otherwise we don't have all the
3177 // uses.
3178 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3179 const User *U;
3180 if (F.hasAddressTaken(&U, false, true, false,
3181 /*IgnoreARCAttachedCall=*/true))
3182 Check(false, "Invalid user of intrinsic instruction!", U);
3183 }
3184
3185 // Check intrinsics' signatures.
3186 switch (F.getIntrinsicID()) {
3187 case Intrinsic::experimental_gc_get_pointer_base: {
3188 FunctionType *FT = F.getFunctionType();
3189 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3190 Check(isa<PointerType>(F.getReturnType()),
3191 "gc.get.pointer.base must return a pointer", F);
3192 Check(FT->getParamType(0) == F.getReturnType(),
3193 "gc.get.pointer.base operand and result must be of the same type", F);
3194 break;
3195 }
3196 case Intrinsic::experimental_gc_get_pointer_offset: {
3197 FunctionType *FT = F.getFunctionType();
3198 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3199 Check(isa<PointerType>(FT->getParamType(0)),
3200 "gc.get.pointer.offset operand must be a pointer", F);
3201 Check(F.getReturnType()->isIntegerTy(),
3202 "gc.get.pointer.offset must return integer", F);
3203 break;
3204 }
3205 }
3206
3207 auto *N = F.getSubprogram();
3208 HasDebugInfo = (N != nullptr);
3209 if (!HasDebugInfo)
3210 return;
3211
3212 // Check that all !dbg attachments lead to back to N.
3213 //
3214 // FIXME: Check this incrementally while visiting !dbg attachments.
3215 // FIXME: Only check when N is the canonical subprogram for F.
3216 SmallPtrSet<const MDNode *, 32> Seen;
3217 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3218 // Be careful about using DILocation here since we might be dealing with
3219 // broken code (this is the Verifier after all).
3220 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3221 if (!DL)
3222 return;
3223 if (!Seen.insert(DL).second)
3224 return;
3225
3226 Metadata *Parent = DL->getRawScope();
3227 CheckDI(Parent && isa<DILocalScope>(Parent),
3228 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3229
3230 DILocalScope *Scope = DL->getInlinedAtScope();
3231 Check(Scope, "Failed to find DILocalScope", DL);
3232
3233 if (!Seen.insert(Scope).second)
3234 return;
3235
3236 DISubprogram *SP = Scope->getSubprogram();
3237
3238 // Scope and SP could be the same MDNode and we don't want to skip
3239 // validation in that case
3240 if ((Scope != SP) && !Seen.insert(SP).second)
3241 return;
3242
3243 CheckDI(SP->describes(&F),
3244 "!dbg attachment points at wrong subprogram for function", N, &F,
3245 &I, DL, Scope, SP);
3246 };
3247 for (auto &BB : F)
3248 for (auto &I : BB) {
3249 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3250 // The llvm.loop annotations also contain two DILocations.
3251 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3252 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3253 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3254 if (BrokenDebugInfo)
3255 return;
3256 }
3257}
3258
3259// verifyBasicBlock - Verify that a basic block is well formed...
3260//
3261void Verifier::visitBasicBlock(BasicBlock &BB) {
3262 InstsInThisBlock.clear();
3263 ConvergenceVerifyHelper.visit(BB);
3264
3265 // Ensure that basic blocks have terminators!
3266 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3267
3268 // Check constraints that this basic block imposes on all of the PHI nodes in
3269 // it.
3270 if (isa<PHINode>(BB.front())) {
3271 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3273 llvm::sort(Preds);
3274 for (const PHINode &PN : BB.phis()) {
3275 Check(PN.getNumIncomingValues() == Preds.size(),
3276 "PHINode should have one entry for each predecessor of its "
3277 "parent basic block!",
3278 &PN);
3279
3280 // Get and sort all incoming values in the PHI node...
3281 Values.clear();
3282 Values.reserve(PN.getNumIncomingValues());
3283 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3284 Values.push_back(
3285 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3286 llvm::sort(Values);
3287
3288 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3289 // Check to make sure that if there is more than one entry for a
3290 // particular basic block in this PHI node, that the incoming values are
3291 // all identical.
3292 //
3293 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3294 Values[i].second == Values[i - 1].second,
3295 "PHI node has multiple entries for the same basic block with "
3296 "different incoming values!",
3297 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3298
3299 // Check to make sure that the predecessors and PHI node entries are
3300 // matched up.
3301 Check(Values[i].first == Preds[i],
3302 "PHI node entries do not match predecessors!", &PN,
3303 Values[i].first, Preds[i]);
3304 }
3305 }
3306 }
3307
3308 // Check that all instructions have their parent pointers set up correctly.
3309 for (auto &I : BB)
3310 {
3311 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3312 }
3313
3314 // Confirm that no issues arise from the debug program.
3315 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3316 &BB);
3317}
3318
3319void Verifier::visitTerminator(Instruction &I) {
3320 // Ensure that terminators only exist at the end of the basic block.
3321 Check(&I == I.getParent()->getTerminator(),
3322 "Terminator found in the middle of a basic block!", I.getParent());
3323 visitInstruction(I);
3324}
3325
3326void Verifier::visitBranchInst(BranchInst &BI) {
3327 if (BI.isConditional()) {
3329 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3330 }
3331 visitTerminator(BI);
3332}
3333
3334void Verifier::visitReturnInst(ReturnInst &RI) {
3335 Function *F = RI.getParent()->getParent();
3336 unsigned N = RI.getNumOperands();
3337 if (F->getReturnType()->isVoidTy())
3338 Check(N == 0,
3339 "Found return instr that returns non-void in Function of void "
3340 "return type!",
3341 &RI, F->getReturnType());
3342 else
3343 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3344 "Function return type does not match operand "
3345 "type of return inst!",
3346 &RI, F->getReturnType());
3347
3348 // Check to make sure that the return value has necessary properties for
3349 // terminators...
3350 visitTerminator(RI);
3351}
3352
3353void Verifier::visitSwitchInst(SwitchInst &SI) {
3354 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3355 // Check to make sure that all of the constants in the switch instruction
3356 // have the same type as the switched-on value.
3357 Type *SwitchTy = SI.getCondition()->getType();
3358 SmallPtrSet<ConstantInt*, 32> Constants;
3359 for (auto &Case : SI.cases()) {
3360 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3361 "Case value is not a constant integer.", &SI);
3362 Check(Case.getCaseValue()->getType() == SwitchTy,
3363 "Switch constants must all be same type as switch value!", &SI);
3364 Check(Constants.insert(Case.getCaseValue()).second,
3365 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3366 }
3367
3368 visitTerminator(SI);
3369}
3370
3371void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3373 "Indirectbr operand must have pointer type!", &BI);
3374 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3376 "Indirectbr destinations must all have pointer type!", &BI);
3377
3378 visitTerminator(BI);
3379}
3380
3381void Verifier::visitCallBrInst(CallBrInst &CBI) {
3382 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3383 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3384 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3385
3386 verifyInlineAsmCall(CBI);
3387 visitTerminator(CBI);
3388}
3389
3390void Verifier::visitSelectInst(SelectInst &SI) {
3391 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3392 SI.getOperand(2)),
3393 "Invalid operands for select instruction!", &SI);
3394
3395 Check(SI.getTrueValue()->getType() == SI.getType(),
3396 "Select values must have same type as select instruction!", &SI);
3397 visitInstruction(SI);
3398}
3399
3400/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3401/// a pass, if any exist, it's an error.
3402///
3403void Verifier::visitUserOp1(Instruction &I) {
3404 Check(false, "User-defined operators should not live outside of a pass!", &I);
3405}
3406
3407void Verifier::visitTruncInst(TruncInst &I) {
3408 // Get the source and destination types
3409 Type *SrcTy = I.getOperand(0)->getType();
3410 Type *DestTy = I.getType();
3411
3412 // Get the size of the types in bits, we'll need this later
3413 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3414 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3415
3416 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3417 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3418 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3419 "trunc source and destination must both be a vector or neither", &I);
3420 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3421
3422 visitInstruction(I);
3423}
3424
3425void Verifier::visitZExtInst(ZExtInst &I) {
3426 // Get the source and destination types
3427 Type *SrcTy = I.getOperand(0)->getType();
3428 Type *DestTy = I.getType();
3429
3430 // Get the size of the types in bits, we'll need this later
3431 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3432 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3433 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3434 "zext source and destination must both be a vector or neither", &I);
3435 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3436 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3437
3438 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3439
3440 visitInstruction(I);
3441}
3442
3443void Verifier::visitSExtInst(SExtInst &I) {
3444 // Get the source and destination types
3445 Type *SrcTy = I.getOperand(0)->getType();
3446 Type *DestTy = I.getType();
3447
3448 // Get the size of the types in bits, we'll need this later
3449 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3450 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3451
3452 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3453 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3454 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3455 "sext source and destination must both be a vector or neither", &I);
3456 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3457
3458 visitInstruction(I);
3459}
3460
3461void Verifier::visitFPTruncInst(FPTruncInst &I) {
3462 // Get the source and destination types
3463 Type *SrcTy = I.getOperand(0)->getType();
3464 Type *DestTy = I.getType();
3465 // Get the size of the types in bits, we'll need this later
3466 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3467 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3468
3469 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3470 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3471 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3472 "fptrunc source and destination must both be a vector or neither", &I);
3473 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3474
3475 visitInstruction(I);
3476}
3477
3478void Verifier::visitFPExtInst(FPExtInst &I) {
3479 // Get the source and destination types
3480 Type *SrcTy = I.getOperand(0)->getType();
3481 Type *DestTy = I.getType();
3482
3483 // Get the size of the types in bits, we'll need this later
3484 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3485 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3486
3487 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3488 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3489 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3490 "fpext source and destination must both be a vector or neither", &I);
3491 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3492
3493 visitInstruction(I);
3494}
3495
3496void Verifier::visitUIToFPInst(UIToFPInst &I) {
3497 // Get the source and destination types
3498 Type *SrcTy = I.getOperand(0)->getType();
3499 Type *DestTy = I.getType();
3500
3501 bool SrcVec = SrcTy->isVectorTy();
3502 bool DstVec = DestTy->isVectorTy();
3503
3504 Check(SrcVec == DstVec,
3505 "UIToFP source and dest must both be vector or scalar", &I);
3506 Check(SrcTy->isIntOrIntVectorTy(),
3507 "UIToFP source must be integer or integer vector", &I);
3508 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3509 &I);
3510
3511 if (SrcVec && DstVec)
3512 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3513 cast<VectorType>(DestTy)->getElementCount(),
3514 "UIToFP source and dest vector length mismatch", &I);
3515
3516 visitInstruction(I);
3517}
3518
3519void Verifier::visitSIToFPInst(SIToFPInst &I) {
3520 // Get the source and destination types
3521 Type *SrcTy = I.getOperand(0)->getType();
3522 Type *DestTy = I.getType();
3523
3524 bool SrcVec = SrcTy->isVectorTy();
3525 bool DstVec = DestTy->isVectorTy();
3526
3527 Check(SrcVec == DstVec,
3528 "SIToFP source and dest must both be vector or scalar", &I);
3529 Check(SrcTy->isIntOrIntVectorTy(),
3530 "SIToFP source must be integer or integer vector", &I);
3531 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3532 &I);
3533
3534 if (SrcVec && DstVec)
3535 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3536 cast<VectorType>(DestTy)->getElementCount(),
3537 "SIToFP source and dest vector length mismatch", &I);
3538
3539 visitInstruction(I);
3540}
3541
3542void Verifier::visitFPToUIInst(FPToUIInst &I) {
3543 // Get the source and destination types
3544 Type *SrcTy = I.getOperand(0)->getType();
3545 Type *DestTy = I.getType();
3546
3547 bool SrcVec = SrcTy->isVectorTy();
3548 bool DstVec = DestTy->isVectorTy();
3549
3550 Check(SrcVec == DstVec,
3551 "FPToUI source and dest must both be vector or scalar", &I);
3552 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3553 Check(DestTy->isIntOrIntVectorTy(),
3554 "FPToUI result must be integer or integer vector", &I);
3555
3556 if (SrcVec && DstVec)
3557 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3558 cast<VectorType>(DestTy)->getElementCount(),
3559 "FPToUI source and dest vector length mismatch", &I);
3560
3561 visitInstruction(I);
3562}
3563
3564void Verifier::visitFPToSIInst(FPToSIInst &I) {
3565 // Get the source and destination types
3566 Type *SrcTy = I.getOperand(0)->getType();
3567 Type *DestTy = I.getType();
3568
3569 bool SrcVec = SrcTy->isVectorTy();
3570 bool DstVec = DestTy->isVectorTy();
3571
3572 Check(SrcVec == DstVec,
3573 "FPToSI source and dest must both be vector or scalar", &I);
3574 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3575 Check(DestTy->isIntOrIntVectorTy(),
3576 "FPToSI result must be integer or integer vector", &I);
3577
3578 if (SrcVec && DstVec)
3579 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3580 cast<VectorType>(DestTy)->getElementCount(),
3581 "FPToSI source and dest vector length mismatch", &I);
3582
3583 visitInstruction(I);
3584}
3585
3586void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3587 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3588 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3589 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3590 V);
3591
3592 if (SrcTy->isVectorTy()) {
3593 auto *VSrc = cast<VectorType>(SrcTy);
3594 auto *VDest = cast<VectorType>(DestTy);
3595 Check(VSrc->getElementCount() == VDest->getElementCount(),
3596 "PtrToAddr vector length mismatch", V);
3597 }
3598
3599 Type *AddrTy = DL.getAddressType(SrcTy);
3600 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3601}
3602
3603void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3604 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3605 visitInstruction(I);
3606}
3607
3608void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3609 // Get the source and destination types
3610 Type *SrcTy = I.getOperand(0)->getType();
3611 Type *DestTy = I.getType();
3612
3613 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3614
3615 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3616 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3617 &I);
3618
3619 if (SrcTy->isVectorTy()) {
3620 auto *VSrc = cast<VectorType>(SrcTy);
3621 auto *VDest = cast<VectorType>(DestTy);
3622 Check(VSrc->getElementCount() == VDest->getElementCount(),
3623 "PtrToInt Vector length mismatch", &I);
3624 }
3625
3626 visitInstruction(I);
3627}
3628
3629void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3630 // Get the source and destination types
3631 Type *SrcTy = I.getOperand(0)->getType();
3632 Type *DestTy = I.getType();
3633
3634 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3635 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3636
3637 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3638 &I);
3639 if (SrcTy->isVectorTy()) {
3640 auto *VSrc = cast<VectorType>(SrcTy);
3641 auto *VDest = cast<VectorType>(DestTy);
3642 Check(VSrc->getElementCount() == VDest->getElementCount(),
3643 "IntToPtr Vector length mismatch", &I);
3644 }
3645 visitInstruction(I);
3646}
3647
3648void Verifier::visitBitCastInst(BitCastInst &I) {
3649 Check(
3650 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3651 "Invalid bitcast", &I);
3652 visitInstruction(I);
3653}
3654
3655void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3656 Type *SrcTy = I.getOperand(0)->getType();
3657 Type *DestTy = I.getType();
3658
3659 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3660 &I);
3661 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3662 &I);
3664 "AddrSpaceCast must be between different address spaces", &I);
3665 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3666 Check(SrcVTy->getElementCount() ==
3667 cast<VectorType>(DestTy)->getElementCount(),
3668 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3669 visitInstruction(I);
3670}
3671
3672/// visitPHINode - Ensure that a PHI node is well formed.
3673///
3674void Verifier::visitPHINode(PHINode &PN) {
3675 // Ensure that the PHI nodes are all grouped together at the top of the block.
3676 // This can be tested by checking whether the instruction before this is
3677 // either nonexistent (because this is begin()) or is a PHI node. If not,
3678 // then there is some other instruction before a PHI.
3679 Check(&PN == &PN.getParent()->front() ||
3681 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3682
3683 // Check that a PHI doesn't yield a Token.
3684 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3685
3686 // Check that all of the values of the PHI node have the same type as the
3687 // result.
3688 for (Value *IncValue : PN.incoming_values()) {
3689 Check(PN.getType() == IncValue->getType(),
3690 "PHI node operands are not the same type as the result!", &PN);
3691 }
3692
3693 // All other PHI node constraints are checked in the visitBasicBlock method.
3694
3695 visitInstruction(PN);
3696}
3697
3698void Verifier::visitCallBase(CallBase &Call) {
3700 "Called function must be a pointer!", Call);
3701 FunctionType *FTy = Call.getFunctionType();
3702
3703 // Verify that the correct number of arguments are being passed
3704 if (FTy->isVarArg())
3705 Check(Call.arg_size() >= FTy->getNumParams(),
3706 "Called function requires more parameters than were provided!", Call);
3707 else
3708 Check(Call.arg_size() == FTy->getNumParams(),
3709 "Incorrect number of arguments passed to called function!", Call);
3710
3711 // Verify that all arguments to the call match the function type.
3712 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3713 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3714 "Call parameter type does not match function signature!",
3715 Call.getArgOperand(i), FTy->getParamType(i), Call);
3716
3717 AttributeList Attrs = Call.getAttributes();
3718
3719 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3720 "Attribute after last parameter!", Call);
3721
3722 Function *Callee =
3724 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3725 if (IsIntrinsic)
3726 Check(Callee->getValueType() == FTy,
3727 "Intrinsic called with incompatible signature", Call);
3728
3729 // Verify if the calling convention of the callee is callable.
3731 "calling convention does not permit calls", Call);
3732
3733 // Disallow passing/returning values with alignment higher than we can
3734 // represent.
3735 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3736 // necessary.
3737 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3738 if (!Ty->isSized())
3739 return;
3740 Align ABIAlign = DL.getABITypeAlign(Ty);
3741 Check(ABIAlign.value() <= Value::MaximumAlignment,
3742 "Incorrect alignment of " + Message + " to called function!", Call);
3743 };
3744
3745 if (!IsIntrinsic) {
3746 VerifyTypeAlign(FTy->getReturnType(), "return type");
3747 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3748 Type *Ty = FTy->getParamType(i);
3749 VerifyTypeAlign(Ty, "argument passed");
3750 }
3751 }
3752
3753 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3754 // Don't allow speculatable on call sites, unless the underlying function
3755 // declaration is also speculatable.
3756 Check(Callee && Callee->isSpeculatable(),
3757 "speculatable attribute may not apply to call sites", Call);
3758 }
3759
3760 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3761 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3762 "preallocated as a call site attribute can only be on "
3763 "llvm.call.preallocated.arg");
3764 }
3765
3766 // Verify call attributes.
3767 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3768
3769 // Conservatively check the inalloca argument.
3770 // We have a bug if we can find that there is an underlying alloca without
3771 // inalloca.
3772 if (Call.hasInAllocaArgument()) {
3773 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3774 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3775 Check(AI->isUsedWithInAlloca(),
3776 "inalloca argument for call has mismatched alloca", AI, Call);
3777 }
3778
3779 // For each argument of the callsite, if it has the swifterror argument,
3780 // make sure the underlying alloca/parameter it comes from has a swifterror as
3781 // well.
3782 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3783 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3784 Value *SwiftErrorArg = Call.getArgOperand(i);
3785 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3786 Check(AI->isSwiftError(),
3787 "swifterror argument for call has mismatched alloca", AI, Call);
3788 continue;
3789 }
3790 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3791 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3792 SwiftErrorArg, Call);
3793 Check(ArgI->hasSwiftErrorAttr(),
3794 "swifterror argument for call has mismatched parameter", ArgI,
3795 Call);
3796 }
3797
3798 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3799 // Don't allow immarg on call sites, unless the underlying declaration
3800 // also has the matching immarg.
3801 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3802 "immarg may not apply only to call sites", Call.getArgOperand(i),
3803 Call);
3804 }
3805
3806 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3807 Value *ArgVal = Call.getArgOperand(i);
3808 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3809 "immarg operand has non-immediate parameter", ArgVal, Call);
3810
3811 // If the imm-arg is an integer and also has a range attached,
3812 // check if the given value is within the range.
3813 if (Call.paramHasAttr(i, Attribute::Range)) {
3814 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3815 const ConstantRange &CR =
3816 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3817 Check(CR.contains(CI->getValue()),
3818 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3819 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3820 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3821 Call);
3822 }
3823 }
3824 }
3825
3826 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3827 Value *ArgVal = Call.getArgOperand(i);
3828 bool hasOB =
3830 bool isMustTail = Call.isMustTailCall();
3831 Check(hasOB != isMustTail,
3832 "preallocated operand either requires a preallocated bundle or "
3833 "the call to be musttail (but not both)",
3834 ArgVal, Call);
3835 }
3836 }
3837
3838 if (FTy->isVarArg()) {
3839 // FIXME? is 'nest' even legal here?
3840 bool SawNest = false;
3841 bool SawReturned = false;
3842
3843 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3844 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3845 SawNest = true;
3846 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3847 SawReturned = true;
3848 }
3849
3850 // Check attributes on the varargs part.
3851 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3852 Type *Ty = Call.getArgOperand(Idx)->getType();
3853 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3854 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3855
3856 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3857 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3858 SawNest = true;
3859 }
3860
3861 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3862 Check(!SawReturned, "More than one parameter has attribute returned!",
3863 Call);
3864 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3865 "Incompatible argument and return types for 'returned' "
3866 "attribute",
3867 Call);
3868 SawReturned = true;
3869 }
3870
3871 // Statepoint intrinsic is vararg but the wrapped function may be not.
3872 // Allow sret here and check the wrapped function in verifyStatepoint.
3873 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
3874 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3875 "Attribute 'sret' cannot be used for vararg call arguments!",
3876 Call);
3877
3878 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3879 Check(Idx == Call.arg_size() - 1,
3880 "inalloca isn't on the last argument!", Call);
3881 }
3882 }
3883
3884 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3885 if (!IsIntrinsic) {
3886 for (Type *ParamTy : FTy->params()) {
3887 Check(!ParamTy->isMetadataTy(),
3888 "Function has metadata parameter but isn't an intrinsic", Call);
3889 Check(!ParamTy->isTokenLikeTy(),
3890 "Function has token parameter but isn't an intrinsic", Call);
3891 }
3892 }
3893
3894 // Verify that indirect calls don't return tokens.
3895 if (!Call.getCalledFunction()) {
3896 Check(!FTy->getReturnType()->isTokenLikeTy(),
3897 "Return type cannot be token for indirect call!");
3898 Check(!FTy->getReturnType()->isX86_AMXTy(),
3899 "Return type cannot be x86_amx for indirect call!");
3900 }
3901
3903 visitIntrinsicCall(ID, Call);
3904
3905 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3906 // most one "gc-transition", at most one "cfguardtarget", at most one
3907 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3908 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3909 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3910 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3911 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3912 FoundAttachedCallBundle = false;
3913 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3914 OperandBundleUse BU = Call.getOperandBundleAt(i);
3915 uint32_t Tag = BU.getTagID();
3916 if (Tag == LLVMContext::OB_deopt) {
3917 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3918 FoundDeoptBundle = true;
3919 } else if (Tag == LLVMContext::OB_gc_transition) {
3920 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3921 Call);
3922 FoundGCTransitionBundle = true;
3923 } else if (Tag == LLVMContext::OB_funclet) {
3924 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3925 FoundFuncletBundle = true;
3926 Check(BU.Inputs.size() == 1,
3927 "Expected exactly one funclet bundle operand", Call);
3928 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3929 "Funclet bundle operands should correspond to a FuncletPadInst",
3930 Call);
3931 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3932 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3933 Call);
3934 FoundCFGuardTargetBundle = true;
3935 Check(BU.Inputs.size() == 1,
3936 "Expected exactly one cfguardtarget bundle operand", Call);
3937 } else if (Tag == LLVMContext::OB_ptrauth) {
3938 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3939 FoundPtrauthBundle = true;
3940 Check(BU.Inputs.size() == 2,
3941 "Expected exactly two ptrauth bundle operands", Call);
3942 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3943 BU.Inputs[0]->getType()->isIntegerTy(32),
3944 "Ptrauth bundle key operand must be an i32 constant", Call);
3945 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3946 "Ptrauth bundle discriminator operand must be an i64", Call);
3947 } else if (Tag == LLVMContext::OB_kcfi) {
3948 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3949 FoundKCFIBundle = true;
3950 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3951 Call);
3952 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3953 BU.Inputs[0]->getType()->isIntegerTy(32),
3954 "Kcfi bundle operand must be an i32 constant", Call);
3955 } else if (Tag == LLVMContext::OB_preallocated) {
3956 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3957 Call);
3958 FoundPreallocatedBundle = true;
3959 Check(BU.Inputs.size() == 1,
3960 "Expected exactly one preallocated bundle operand", Call);
3961 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3962 Check(Input &&
3963 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3964 "\"preallocated\" argument must be a token from "
3965 "llvm.call.preallocated.setup",
3966 Call);
3967 } else if (Tag == LLVMContext::OB_gc_live) {
3968 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3969 FoundGCLiveBundle = true;
3971 Check(!FoundAttachedCallBundle,
3972 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3973 FoundAttachedCallBundle = true;
3974 verifyAttachedCallBundle(Call, BU);
3975 }
3976 }
3977
3978 // Verify that callee and callsite agree on whether to use pointer auth.
3979 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3980 "Direct call cannot have a ptrauth bundle", Call);
3981
3982 // Verify that each inlinable callsite of a debug-info-bearing function in a
3983 // debug-info-bearing function has a debug location attached to it. Failure to
3984 // do so causes assertion failures when the inliner sets up inline scope info
3985 // (Interposable functions are not inlinable, neither are functions without
3986 // definitions.)
3992 "inlinable function call in a function with "
3993 "debug info must have a !dbg location",
3994 Call);
3995
3996 if (Call.isInlineAsm())
3997 verifyInlineAsmCall(Call);
3998
3999 ConvergenceVerifyHelper.visit(Call);
4000
4001 visitInstruction(Call);
4002}
4003
4004void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4005 StringRef Context) {
4006 Check(!Attrs.contains(Attribute::InAlloca),
4007 Twine("inalloca attribute not allowed in ") + Context);
4008 Check(!Attrs.contains(Attribute::InReg),
4009 Twine("inreg attribute not allowed in ") + Context);
4010 Check(!Attrs.contains(Attribute::SwiftError),
4011 Twine("swifterror attribute not allowed in ") + Context);
4012 Check(!Attrs.contains(Attribute::Preallocated),
4013 Twine("preallocated attribute not allowed in ") + Context);
4014 Check(!Attrs.contains(Attribute::ByRef),
4015 Twine("byref attribute not allowed in ") + Context);
4016}
4017
4018/// Two types are "congruent" if they are identical, or if they are both pointer
4019/// types with different pointee types and the same address space.
4020static bool isTypeCongruent(Type *L, Type *R) {
4021 if (L == R)
4022 return true;
4025 if (!PL || !PR)
4026 return false;
4027 return PL->getAddressSpace() == PR->getAddressSpace();
4028}
4029
4030static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4031 static const Attribute::AttrKind ABIAttrs[] = {
4032 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4033 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4034 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4035 Attribute::ByRef};
4036 AttrBuilder Copy(C);
4037 for (auto AK : ABIAttrs) {
4038 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4039 if (Attr.isValid())
4040 Copy.addAttribute(Attr);
4041 }
4042
4043 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4044 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4045 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4046 Attrs.hasParamAttr(I, Attribute::ByRef)))
4047 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4048 return Copy;
4049}
4050
4051void Verifier::verifyMustTailCall(CallInst &CI) {
4052 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4053
4054 Function *F = CI.getParent()->getParent();
4055 FunctionType *CallerTy = F->getFunctionType();
4056 FunctionType *CalleeTy = CI.getFunctionType();
4057 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4058 "cannot guarantee tail call due to mismatched varargs", &CI);
4059 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4060 "cannot guarantee tail call due to mismatched return types", &CI);
4061
4062 // - The calling conventions of the caller and callee must match.
4063 Check(F->getCallingConv() == CI.getCallingConv(),
4064 "cannot guarantee tail call due to mismatched calling conv", &CI);
4065
4066 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4067 // or a pointer bitcast followed by a ret instruction.
4068 // - The ret instruction must return the (possibly bitcasted) value
4069 // produced by the call or void.
4070 Value *RetVal = &CI;
4072
4073 // Handle the optional bitcast.
4074 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4075 Check(BI->getOperand(0) == RetVal,
4076 "bitcast following musttail call must use the call", BI);
4077 RetVal = BI;
4078 Next = BI->getNextNode();
4079 }
4080
4081 // Check the return.
4082 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4083 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4084 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4085 isa<UndefValue>(Ret->getReturnValue()),
4086 "musttail call result must be returned", Ret);
4087
4088 AttributeList CallerAttrs = F->getAttributes();
4089 AttributeList CalleeAttrs = CI.getAttributes();
4090 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4091 CI.getCallingConv() == CallingConv::Tail) {
4092 StringRef CCName =
4093 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4094
4095 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4096 // are allowed in swifttailcc call
4097 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4098 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4099 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4100 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4101 }
4102 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4103 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4104 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4105 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4106 }
4107 // - Varargs functions are not allowed
4108 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4109 " tail call for varargs function");
4110 return;
4111 }
4112
4113 // - The caller and callee prototypes must match. Pointer types of
4114 // parameters or return types may differ in pointee type, but not
4115 // address space.
4116 if (!CI.getIntrinsicID()) {
4117 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4118 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4119 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4120 Check(
4121 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4122 "cannot guarantee tail call due to mismatched parameter types", &CI);
4123 }
4124 }
4125
4126 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4127 // returned, preallocated, and inalloca, must match.
4128 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4129 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4130 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4131 Check(CallerABIAttrs == CalleeABIAttrs,
4132 "cannot guarantee tail call due to mismatched ABI impacting "
4133 "function attributes",
4134 &CI, CI.getOperand(I));
4135 }
4136}
4137
4138void Verifier::visitCallInst(CallInst &CI) {
4139 visitCallBase(CI);
4140
4141 if (CI.isMustTailCall())
4142 verifyMustTailCall(CI);
4143}
4144
4145void Verifier::visitInvokeInst(InvokeInst &II) {
4146 visitCallBase(II);
4147
4148 // Verify that the first non-PHI instruction of the unwind destination is an
4149 // exception handling instruction.
4150 Check(
4151 II.getUnwindDest()->isEHPad(),
4152 "The unwind destination does not have an exception handling instruction!",
4153 &II);
4154
4155 visitTerminator(II);
4156}
4157
4158/// visitUnaryOperator - Check the argument to the unary operator.
4159///
4160void Verifier::visitUnaryOperator(UnaryOperator &U) {
4161 Check(U.getType() == U.getOperand(0)->getType(),
4162 "Unary operators must have same type for"
4163 "operands and result!",
4164 &U);
4165
4166 switch (U.getOpcode()) {
4167 // Check that floating-point arithmetic operators are only used with
4168 // floating-point operands.
4169 case Instruction::FNeg:
4170 Check(U.getType()->isFPOrFPVectorTy(),
4171 "FNeg operator only works with float types!", &U);
4172 break;
4173 default:
4174 llvm_unreachable("Unknown UnaryOperator opcode!");
4175 }
4176
4177 visitInstruction(U);
4178}
4179
4180/// visitBinaryOperator - Check that both arguments to the binary operator are
4181/// of the same type!
4182///
4183void Verifier::visitBinaryOperator(BinaryOperator &B) {
4184 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4185 "Both operands to a binary operator are not of the same type!", &B);
4186
4187 switch (B.getOpcode()) {
4188 // Check that integer arithmetic operators are only used with
4189 // integral operands.
4190 case Instruction::Add:
4191 case Instruction::Sub:
4192 case Instruction::Mul:
4193 case Instruction::SDiv:
4194 case Instruction::UDiv:
4195 case Instruction::SRem:
4196 case Instruction::URem:
4197 Check(B.getType()->isIntOrIntVectorTy(),
4198 "Integer arithmetic operators only work with integral types!", &B);
4199 Check(B.getType() == B.getOperand(0)->getType(),
4200 "Integer arithmetic operators must have same type "
4201 "for operands and result!",
4202 &B);
4203 break;
4204 // Check that floating-point arithmetic operators are only used with
4205 // floating-point operands.
4206 case Instruction::FAdd:
4207 case Instruction::FSub:
4208 case Instruction::FMul:
4209 case Instruction::FDiv:
4210 case Instruction::FRem:
4211 Check(B.getType()->isFPOrFPVectorTy(),
4212 "Floating-point arithmetic operators only work with "
4213 "floating-point types!",
4214 &B);
4215 Check(B.getType() == B.getOperand(0)->getType(),
4216 "Floating-point arithmetic operators must have same type "
4217 "for operands and result!",
4218 &B);
4219 break;
4220 // Check that logical operators are only used with integral operands.
4221 case Instruction::And:
4222 case Instruction::Or:
4223 case Instruction::Xor:
4224 Check(B.getType()->isIntOrIntVectorTy(),
4225 "Logical operators only work with integral types!", &B);
4226 Check(B.getType() == B.getOperand(0)->getType(),
4227 "Logical operators must have same type for operands and result!", &B);
4228 break;
4229 case Instruction::Shl:
4230 case Instruction::LShr:
4231 case Instruction::AShr:
4232 Check(B.getType()->isIntOrIntVectorTy(),
4233 "Shifts only work with integral types!", &B);
4234 Check(B.getType() == B.getOperand(0)->getType(),
4235 "Shift return type must be same as operands!", &B);
4236 break;
4237 default:
4238 llvm_unreachable("Unknown BinaryOperator opcode!");
4239 }
4240
4241 visitInstruction(B);
4242}
4243
4244void Verifier::visitICmpInst(ICmpInst &IC) {
4245 // Check that the operands are the same type
4246 Type *Op0Ty = IC.getOperand(0)->getType();
4247 Type *Op1Ty = IC.getOperand(1)->getType();
4248 Check(Op0Ty == Op1Ty,
4249 "Both operands to ICmp instruction are not of the same type!", &IC);
4250 // Check that the operands are the right type
4251 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4252 "Invalid operand types for ICmp instruction", &IC);
4253 // Check that the predicate is valid.
4254 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4255
4256 visitInstruction(IC);
4257}
4258
4259void Verifier::visitFCmpInst(FCmpInst &FC) {
4260 // Check that the operands are the same type
4261 Type *Op0Ty = FC.getOperand(0)->getType();
4262 Type *Op1Ty = FC.getOperand(1)->getType();
4263 Check(Op0Ty == Op1Ty,
4264 "Both operands to FCmp instruction are not of the same type!", &FC);
4265 // Check that the operands are the right type
4266 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4267 &FC);
4268 // Check that the predicate is valid.
4269 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4270
4271 visitInstruction(FC);
4272}
4273
4274void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4276 "Invalid extractelement operands!", &EI);
4277 visitInstruction(EI);
4278}
4279
4280void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4281 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4282 IE.getOperand(2)),
4283 "Invalid insertelement operands!", &IE);
4284 visitInstruction(IE);
4285}
4286
4287void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4289 SV.getShuffleMask()),
4290 "Invalid shufflevector operands!", &SV);
4291 visitInstruction(SV);
4292}
4293
4294void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4295 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4296
4297 Check(isa<PointerType>(TargetTy),
4298 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4299 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4300
4301 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4302 Check(!STy->isScalableTy(),
4303 "getelementptr cannot target structure that contains scalable vector"
4304 "type",
4305 &GEP);
4306 }
4307
4308 SmallVector<Value *, 16> Idxs(GEP.indices());
4309 Check(
4310 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4311 "GEP indexes must be integers", &GEP);
4312 Type *ElTy =
4313 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4314 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4315
4316 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4317
4318 Check(PtrTy && GEP.getResultElementType() == ElTy,
4319 "GEP is not of right type for indices!", &GEP, ElTy);
4320
4321 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4322 // Additional checks for vector GEPs.
4323 ElementCount GEPWidth = GEPVTy->getElementCount();
4324 if (GEP.getPointerOperandType()->isVectorTy())
4325 Check(
4326 GEPWidth ==
4327 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4328 "Vector GEP result width doesn't match operand's", &GEP);
4329 for (Value *Idx : Idxs) {
4330 Type *IndexTy = Idx->getType();
4331 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4332 ElementCount IndexWidth = IndexVTy->getElementCount();
4333 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4334 }
4335 Check(IndexTy->isIntOrIntVectorTy(),
4336 "All GEP indices should be of integer type");
4337 }
4338 }
4339
4340 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4341 "GEP address space doesn't match type", &GEP);
4342
4343 visitInstruction(GEP);
4344}
4345
4346static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4347 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4348}
4349
4350/// Verify !range and !absolute_symbol metadata. These have the same
4351/// restrictions, except !absolute_symbol allows the full set.
4352void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4353 Type *Ty, RangeLikeMetadataKind Kind) {
4354 unsigned NumOperands = Range->getNumOperands();
4355 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4356 unsigned NumRanges = NumOperands / 2;
4357 Check(NumRanges >= 1, "It should have at least one range!", Range);
4358
4359 ConstantRange LastRange(1, true); // Dummy initial value
4360 for (unsigned i = 0; i < NumRanges; ++i) {
4361 ConstantInt *Low =
4362 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4363 Check(Low, "The lower limit must be an integer!", Low);
4364 ConstantInt *High =
4365 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4366 Check(High, "The upper limit must be an integer!", High);
4367
4368 Check(High->getType() == Low->getType(), "Range pair types must match!",
4369 &I);
4370
4371 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4372 Check(High->getType()->isIntegerTy(32),
4373 "noalias.addrspace type must be i32!", &I);
4374 } else {
4375 Check(High->getType() == Ty->getScalarType(),
4376 "Range types must match instruction type!", &I);
4377 }
4378
4379 APInt HighV = High->getValue();
4380 APInt LowV = Low->getValue();
4381
4382 // ConstantRange asserts if the ranges are the same except for the min/max
4383 // value. Leave the cases it tolerates for the empty range error below.
4384 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4385 "The upper and lower limits cannot be the same value", &I);
4386
4387 ConstantRange CurRange(LowV, HighV);
4388 Check(!CurRange.isEmptySet() &&
4389 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4390 !CurRange.isFullSet()),
4391 "Range must not be empty!", Range);
4392 if (i != 0) {
4393 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4394 "Intervals are overlapping", Range);
4395 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4396 Range);
4397 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4398 Range);
4399 }
4400 LastRange = ConstantRange(LowV, HighV);
4401 }
4402 if (NumRanges > 2) {
4403 APInt FirstLow =
4404 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4405 APInt FirstHigh =
4406 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4407 ConstantRange FirstRange(FirstLow, FirstHigh);
4408 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4409 "Intervals are overlapping", Range);
4410 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4411 Range);
4412 }
4413}
4414
4415void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4416 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4417 "precondition violation");
4418 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4419}
4420
4421void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4422 Type *Ty) {
4423 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4424 "precondition violation");
4425 verifyRangeLikeMetadata(I, Range, Ty,
4426 RangeLikeMetadataKind::NoaliasAddrspace);
4427}
4428
4429void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4430 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4431 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4432 Check(!(Size & (Size - 1)),
4433 "atomic memory access' operand must have a power-of-two size", Ty, I);
4434}
4435
4436void Verifier::visitLoadInst(LoadInst &LI) {
4438 Check(PTy, "Load operand must be a pointer.", &LI);
4439 Type *ElTy = LI.getType();
4440 if (MaybeAlign A = LI.getAlign()) {
4441 Check(A->value() <= Value::MaximumAlignment,
4442 "huge alignment values are unsupported", &LI);
4443 }
4444 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4445 if (LI.isAtomic()) {
4446 Check(LI.getOrdering() != AtomicOrdering::Release &&
4447 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4448 "Load cannot have Release ordering", &LI);
4449 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4450 "atomic load operand must have integer, pointer, or floating point "
4451 "type!",
4452 ElTy, &LI);
4453 checkAtomicMemAccessSize(ElTy, &LI);
4454 } else {
4456 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4457 }
4458
4459 visitInstruction(LI);
4460}
4461
4462void Verifier::visitStoreInst(StoreInst &SI) {
4463 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4464 Check(PTy, "Store operand must be a pointer.", &SI);
4465 Type *ElTy = SI.getOperand(0)->getType();
4466 if (MaybeAlign A = SI.getAlign()) {
4467 Check(A->value() <= Value::MaximumAlignment,
4468 "huge alignment values are unsupported", &SI);
4469 }
4470 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4471 if (SI.isAtomic()) {
4472 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4473 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4474 "Store cannot have Acquire ordering", &SI);
4475 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4476 "atomic store operand must have integer, pointer, or floating point "
4477 "type!",
4478 ElTy, &SI);
4479 checkAtomicMemAccessSize(ElTy, &SI);
4480 } else {
4481 Check(SI.getSyncScopeID() == SyncScope::System,
4482 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4483 }
4484 visitInstruction(SI);
4485}
4486
4487/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4488void Verifier::verifySwiftErrorCall(CallBase &Call,
4489 const Value *SwiftErrorVal) {
4490 for (const auto &I : llvm::enumerate(Call.args())) {
4491 if (I.value() == SwiftErrorVal) {
4492 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4493 "swifterror value when used in a callsite should be marked "
4494 "with swifterror attribute",
4495 SwiftErrorVal, Call);
4496 }
4497 }
4498}
4499
4500void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4501 // Check that swifterror value is only used by loads, stores, or as
4502 // a swifterror argument.
4503 for (const User *U : SwiftErrorVal->users()) {
4505 isa<InvokeInst>(U),
4506 "swifterror value can only be loaded and stored from, or "
4507 "as a swifterror argument!",
4508 SwiftErrorVal, U);
4509 // If it is used by a store, check it is the second operand.
4510 if (auto StoreI = dyn_cast<StoreInst>(U))
4511 Check(StoreI->getOperand(1) == SwiftErrorVal,
4512 "swifterror value should be the second operand when used "
4513 "by stores",
4514 SwiftErrorVal, U);
4515 if (auto *Call = dyn_cast<CallBase>(U))
4516 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4517 }
4518}
4519
4520void Verifier::visitAllocaInst(AllocaInst &AI) {
4521 Type *Ty = AI.getAllocatedType();
4522 SmallPtrSet<Type*, 4> Visited;
4523 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4524 // Check if it's a target extension type that disallows being used on the
4525 // stack.
4527 "Alloca has illegal target extension type", &AI);
4529 "Alloca array size must have integer type", &AI);
4530 if (MaybeAlign A = AI.getAlign()) {
4531 Check(A->value() <= Value::MaximumAlignment,
4532 "huge alignment values are unsupported", &AI);
4533 }
4534
4535 if (AI.isSwiftError()) {
4536 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4538 "swifterror alloca must not be array allocation", &AI);
4539 verifySwiftErrorValue(&AI);
4540 }
4541
4542 if (TT.isAMDGPU()) {
4544 "alloca on amdgpu must be in addrspace(5)", &AI);
4545 }
4546
4547 visitInstruction(AI);
4548}
4549
4550void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4551 Type *ElTy = CXI.getOperand(1)->getType();
4552 Check(ElTy->isIntOrPtrTy(),
4553 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4554 checkAtomicMemAccessSize(ElTy, &CXI);
4555 visitInstruction(CXI);
4556}
4557
4558void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4559 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4560 "atomicrmw instructions cannot be unordered.", &RMWI);
4561 auto Op = RMWI.getOperation();
4562 Type *ElTy = RMWI.getOperand(1)->getType();
4563 if (Op == AtomicRMWInst::Xchg) {
4564 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4565 ElTy->isPointerTy(),
4566 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4567 " operand must have integer or floating point type!",
4568 &RMWI, ElTy);
4569 } else if (AtomicRMWInst::isFPOperation(Op)) {
4571 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4572 " operand must have floating-point or fixed vector of floating-point "
4573 "type!",
4574 &RMWI, ElTy);
4575 } else {
4576 Check(ElTy->isIntegerTy(),
4577 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4578 " operand must have integer type!",
4579 &RMWI, ElTy);
4580 }
4581 checkAtomicMemAccessSize(ElTy, &RMWI);
4583 "Invalid binary operation!", &RMWI);
4584 visitInstruction(RMWI);
4585}
4586
4587void Verifier::visitFenceInst(FenceInst &FI) {
4588 const AtomicOrdering Ordering = FI.getOrdering();
4589 Check(Ordering == AtomicOrdering::Acquire ||
4590 Ordering == AtomicOrdering::Release ||
4591 Ordering == AtomicOrdering::AcquireRelease ||
4592 Ordering == AtomicOrdering::SequentiallyConsistent,
4593 "fence instructions may only have acquire, release, acq_rel, or "
4594 "seq_cst ordering.",
4595 &FI);
4596 visitInstruction(FI);
4597}
4598
4599void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4601 EVI.getIndices()) == EVI.getType(),
4602 "Invalid ExtractValueInst operands!", &EVI);
4603
4604 visitInstruction(EVI);
4605}
4606
4607void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4609 IVI.getIndices()) ==
4610 IVI.getOperand(1)->getType(),
4611 "Invalid InsertValueInst operands!", &IVI);
4612
4613 visitInstruction(IVI);
4614}
4615
4616static Value *getParentPad(Value *EHPad) {
4617 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4618 return FPI->getParentPad();
4619
4620 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4621}
4622
4623void Verifier::visitEHPadPredecessors(Instruction &I) {
4624 assert(I.isEHPad());
4625
4626 BasicBlock *BB = I.getParent();
4627 Function *F = BB->getParent();
4628
4629 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4630
4631 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4632 // The landingpad instruction defines its parent as a landing pad block. The
4633 // landing pad block may be branched to only by the unwind edge of an
4634 // invoke.
4635 for (BasicBlock *PredBB : predecessors(BB)) {
4636 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4637 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4638 "Block containing LandingPadInst must be jumped to "
4639 "only by the unwind edge of an invoke.",
4640 LPI);
4641 }
4642 return;
4643 }
4644 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4645 if (!pred_empty(BB))
4646 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4647 "Block containg CatchPadInst must be jumped to "
4648 "only by its catchswitch.",
4649 CPI);
4650 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4651 "Catchswitch cannot unwind to one of its catchpads",
4652 CPI->getCatchSwitch(), CPI);
4653 return;
4654 }
4655
4656 // Verify that each pred has a legal terminator with a legal to/from EH
4657 // pad relationship.
4658 Instruction *ToPad = &I;
4659 Value *ToPadParent = getParentPad(ToPad);
4660 for (BasicBlock *PredBB : predecessors(BB)) {
4661 Instruction *TI = PredBB->getTerminator();
4662 Value *FromPad;
4663 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4664 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4665 "EH pad must be jumped to via an unwind edge", ToPad, II);
4666 auto *CalledFn =
4667 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4668 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4669 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4670 continue;
4671 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4672 FromPad = Bundle->Inputs[0];
4673 else
4674 FromPad = ConstantTokenNone::get(II->getContext());
4675 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4676 FromPad = CRI->getOperand(0);
4677 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4678 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4679 FromPad = CSI;
4680 } else {
4681 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4682 }
4683
4684 // The edge may exit from zero or more nested pads.
4685 SmallPtrSet<Value *, 8> Seen;
4686 for (;; FromPad = getParentPad(FromPad)) {
4687 Check(FromPad != ToPad,
4688 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4689 if (FromPad == ToPadParent) {
4690 // This is a legal unwind edge.
4691 break;
4692 }
4693 Check(!isa<ConstantTokenNone>(FromPad),
4694 "A single unwind edge may only enter one EH pad", TI);
4695 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4696 FromPad);
4697
4698 // This will be diagnosed on the corresponding instruction already. We
4699 // need the extra check here to make sure getParentPad() works.
4700 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4701 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4702 }
4703 }
4704}
4705
4706void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4707 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4708 // isn't a cleanup.
4709 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4710 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4711
4712 visitEHPadPredecessors(LPI);
4713
4714 if (!LandingPadResultTy)
4715 LandingPadResultTy = LPI.getType();
4716 else
4717 Check(LandingPadResultTy == LPI.getType(),
4718 "The landingpad instruction should have a consistent result type "
4719 "inside a function.",
4720 &LPI);
4721
4722 Function *F = LPI.getParent()->getParent();
4723 Check(F->hasPersonalityFn(),
4724 "LandingPadInst needs to be in a function with a personality.", &LPI);
4725
4726 // The landingpad instruction must be the first non-PHI instruction in the
4727 // block.
4728 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4729 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4730
4731 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4732 Constant *Clause = LPI.getClause(i);
4733 if (LPI.isCatch(i)) {
4734 Check(isa<PointerType>(Clause->getType()),
4735 "Catch operand does not have pointer type!", &LPI);
4736 } else {
4737 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4739 "Filter operand is not an array of constants!", &LPI);
4740 }
4741 }
4742
4743 visitInstruction(LPI);
4744}
4745
4746void Verifier::visitResumeInst(ResumeInst &RI) {
4748 "ResumeInst needs to be in a function with a personality.", &RI);
4749
4750 if (!LandingPadResultTy)
4751 LandingPadResultTy = RI.getValue()->getType();
4752 else
4753 Check(LandingPadResultTy == RI.getValue()->getType(),
4754 "The resume instruction should have a consistent result type "
4755 "inside a function.",
4756 &RI);
4757
4758 visitTerminator(RI);
4759}
4760
4761void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4762 BasicBlock *BB = CPI.getParent();
4763
4764 Function *F = BB->getParent();
4765 Check(F->hasPersonalityFn(),
4766 "CatchPadInst needs to be in a function with a personality.", &CPI);
4767
4769 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4770 CPI.getParentPad());
4771
4772 // The catchpad instruction must be the first non-PHI instruction in the
4773 // block.
4774 Check(&*BB->getFirstNonPHIIt() == &CPI,
4775 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4776
4777 visitEHPadPredecessors(CPI);
4778 visitFuncletPadInst(CPI);
4779}
4780
4781void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4782 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4783 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4784 CatchReturn.getOperand(0));
4785
4786 visitTerminator(CatchReturn);
4787}
4788
4789void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4790 BasicBlock *BB = CPI.getParent();
4791
4792 Function *F = BB->getParent();
4793 Check(F->hasPersonalityFn(),
4794 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4795
4796 // The cleanuppad instruction must be the first non-PHI instruction in the
4797 // block.
4798 Check(&*BB->getFirstNonPHIIt() == &CPI,
4799 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4800
4801 auto *ParentPad = CPI.getParentPad();
4802 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4803 "CleanupPadInst has an invalid parent.", &CPI);
4804
4805 visitEHPadPredecessors(CPI);
4806 visitFuncletPadInst(CPI);
4807}
4808
4809void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4810 User *FirstUser = nullptr;
4811 Value *FirstUnwindPad = nullptr;
4812 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4813 SmallPtrSet<FuncletPadInst *, 8> Seen;
4814
4815 while (!Worklist.empty()) {
4816 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4817 Check(Seen.insert(CurrentPad).second,
4818 "FuncletPadInst must not be nested within itself", CurrentPad);
4819 Value *UnresolvedAncestorPad = nullptr;
4820 for (User *U : CurrentPad->users()) {
4821 BasicBlock *UnwindDest;
4822 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4823 UnwindDest = CRI->getUnwindDest();
4824 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4825 // We allow catchswitch unwind to caller to nest
4826 // within an outer pad that unwinds somewhere else,
4827 // because catchswitch doesn't have a nounwind variant.
4828 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4829 if (CSI->unwindsToCaller())
4830 continue;
4831 UnwindDest = CSI->getUnwindDest();
4832 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4833 UnwindDest = II->getUnwindDest();
4834 } else if (isa<CallInst>(U)) {
4835 // Calls which don't unwind may be found inside funclet
4836 // pads that unwind somewhere else. We don't *require*
4837 // such calls to be annotated nounwind.
4838 continue;
4839 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4840 // The unwind dest for a cleanup can only be found by
4841 // recursive search. Add it to the worklist, and we'll
4842 // search for its first use that determines where it unwinds.
4843 Worklist.push_back(CPI);
4844 continue;
4845 } else {
4846 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4847 continue;
4848 }
4849
4850 Value *UnwindPad;
4851 bool ExitsFPI;
4852 if (UnwindDest) {
4853 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4854 if (!cast<Instruction>(UnwindPad)->isEHPad())
4855 continue;
4856 Value *UnwindParent = getParentPad(UnwindPad);
4857 // Ignore unwind edges that don't exit CurrentPad.
4858 if (UnwindParent == CurrentPad)
4859 continue;
4860 // Determine whether the original funclet pad is exited,
4861 // and if we are scanning nested pads determine how many
4862 // of them are exited so we can stop searching their
4863 // children.
4864 Value *ExitedPad = CurrentPad;
4865 ExitsFPI = false;
4866 do {
4867 if (ExitedPad == &FPI) {
4868 ExitsFPI = true;
4869 // Now we can resolve any ancestors of CurrentPad up to
4870 // FPI, but not including FPI since we need to make sure
4871 // to check all direct users of FPI for consistency.
4872 UnresolvedAncestorPad = &FPI;
4873 break;
4874 }
4875 Value *ExitedParent = getParentPad(ExitedPad);
4876 if (ExitedParent == UnwindParent) {
4877 // ExitedPad is the ancestor-most pad which this unwind
4878 // edge exits, so we can resolve up to it, meaning that
4879 // ExitedParent is the first ancestor still unresolved.
4880 UnresolvedAncestorPad = ExitedParent;
4881 break;
4882 }
4883 ExitedPad = ExitedParent;
4884 } while (!isa<ConstantTokenNone>(ExitedPad));
4885 } else {
4886 // Unwinding to caller exits all pads.
4887 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4888 ExitsFPI = true;
4889 UnresolvedAncestorPad = &FPI;
4890 }
4891
4892 if (ExitsFPI) {
4893 // This unwind edge exits FPI. Make sure it agrees with other
4894 // such edges.
4895 if (FirstUser) {
4896 Check(UnwindPad == FirstUnwindPad,
4897 "Unwind edges out of a funclet "
4898 "pad must have the same unwind "
4899 "dest",
4900 &FPI, U, FirstUser);
4901 } else {
4902 FirstUser = U;
4903 FirstUnwindPad = UnwindPad;
4904 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4905 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4906 getParentPad(UnwindPad) == getParentPad(&FPI))
4907 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4908 }
4909 }
4910 // Make sure we visit all uses of FPI, but for nested pads stop as
4911 // soon as we know where they unwind to.
4912 if (CurrentPad != &FPI)
4913 break;
4914 }
4915 if (UnresolvedAncestorPad) {
4916 if (CurrentPad == UnresolvedAncestorPad) {
4917 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4918 // we've found an unwind edge that exits it, because we need to verify
4919 // all direct uses of FPI.
4920 assert(CurrentPad == &FPI);
4921 continue;
4922 }
4923 // Pop off the worklist any nested pads that we've found an unwind
4924 // destination for. The pads on the worklist are the uncles,
4925 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4926 // for all ancestors of CurrentPad up to but not including
4927 // UnresolvedAncestorPad.
4928 Value *ResolvedPad = CurrentPad;
4929 while (!Worklist.empty()) {
4930 Value *UnclePad = Worklist.back();
4931 Value *AncestorPad = getParentPad(UnclePad);
4932 // Walk ResolvedPad up the ancestor list until we either find the
4933 // uncle's parent or the last resolved ancestor.
4934 while (ResolvedPad != AncestorPad) {
4935 Value *ResolvedParent = getParentPad(ResolvedPad);
4936 if (ResolvedParent == UnresolvedAncestorPad) {
4937 break;
4938 }
4939 ResolvedPad = ResolvedParent;
4940 }
4941 // If the resolved ancestor search didn't find the uncle's parent,
4942 // then the uncle is not yet resolved.
4943 if (ResolvedPad != AncestorPad)
4944 break;
4945 // This uncle is resolved, so pop it from the worklist.
4946 Worklist.pop_back();
4947 }
4948 }
4949 }
4950
4951 if (FirstUnwindPad) {
4952 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4953 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4954 Value *SwitchUnwindPad;
4955 if (SwitchUnwindDest)
4956 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
4957 else
4958 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4959 Check(SwitchUnwindPad == FirstUnwindPad,
4960 "Unwind edges out of a catch must have the same unwind dest as "
4961 "the parent catchswitch",
4962 &FPI, FirstUser, CatchSwitch);
4963 }
4964 }
4965
4966 visitInstruction(FPI);
4967}
4968
4969void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4970 BasicBlock *BB = CatchSwitch.getParent();
4971
4972 Function *F = BB->getParent();
4973 Check(F->hasPersonalityFn(),
4974 "CatchSwitchInst needs to be in a function with a personality.",
4975 &CatchSwitch);
4976
4977 // The catchswitch instruction must be the first non-PHI instruction in the
4978 // block.
4979 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
4980 "CatchSwitchInst not the first non-PHI instruction in the block.",
4981 &CatchSwitch);
4982
4983 auto *ParentPad = CatchSwitch.getParentPad();
4984 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4985 "CatchSwitchInst has an invalid parent.", ParentPad);
4986
4987 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4988 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
4989 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4990 "CatchSwitchInst must unwind to an EH block which is not a "
4991 "landingpad.",
4992 &CatchSwitch);
4993
4994 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4995 if (getParentPad(&*I) == ParentPad)
4996 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4997 }
4998
4999 Check(CatchSwitch.getNumHandlers() != 0,
5000 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5001
5002 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5003 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5004 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5005 }
5006
5007 visitEHPadPredecessors(CatchSwitch);
5008 visitTerminator(CatchSwitch);
5009}
5010
5011void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5013 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5014 CRI.getOperand(0));
5015
5016 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5017 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5018 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5019 "CleanupReturnInst must unwind to an EH block which is not a "
5020 "landingpad.",
5021 &CRI);
5022 }
5023
5024 visitTerminator(CRI);
5025}
5026
5027void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5028 Instruction *Op = cast<Instruction>(I.getOperand(i));
5029 // If the we have an invalid invoke, don't try to compute the dominance.
5030 // We already reject it in the invoke specific checks and the dominance
5031 // computation doesn't handle multiple edges.
5032 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5033 if (II->getNormalDest() == II->getUnwindDest())
5034 return;
5035 }
5036
5037 // Quick check whether the def has already been encountered in the same block.
5038 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5039 // uses are defined to happen on the incoming edge, not at the instruction.
5040 //
5041 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5042 // wrapping an SSA value, assert that we've already encountered it. See
5043 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5044 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5045 return;
5046
5047 const Use &U = I.getOperandUse(i);
5048 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5049}
5050
5051void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5052 Check(I.getType()->isPointerTy(),
5053 "dereferenceable, dereferenceable_or_null "
5054 "apply only to pointer types",
5055 &I);
5057 "dereferenceable, dereferenceable_or_null apply only to load"
5058 " and inttoptr instructions, use attributes for calls or invokes",
5059 &I);
5060 Check(MD->getNumOperands() == 1,
5061 "dereferenceable, dereferenceable_or_null "
5062 "take one operand!",
5063 &I);
5064 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5065 Check(CI && CI->getType()->isIntegerTy(64),
5066 "dereferenceable, "
5067 "dereferenceable_or_null metadata value must be an i64!",
5068 &I);
5069}
5070
5071void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5072 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5073 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5074 &I);
5075 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5076}
5077
5078void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5079 auto GetBranchingTerminatorNumOperands = [&]() {
5080 unsigned ExpectedNumOperands = 0;
5081 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5082 ExpectedNumOperands = BI->getNumSuccessors();
5083 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5084 ExpectedNumOperands = SI->getNumSuccessors();
5085 else if (isa<CallInst>(&I))
5086 ExpectedNumOperands = 1;
5087 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5088 ExpectedNumOperands = IBI->getNumDestinations();
5089 else if (isa<SelectInst>(&I))
5090 ExpectedNumOperands = 2;
5091 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5092 ExpectedNumOperands = CI->getNumSuccessors();
5093 return ExpectedNumOperands;
5094 };
5095 Check(MD->getNumOperands() >= 1,
5096 "!prof annotations should have at least 1 operand", MD);
5097 // Check first operand.
5098 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5100 "expected string with name of the !prof annotation", MD);
5101 MDString *MDS = cast<MDString>(MD->getOperand(0));
5102 StringRef ProfName = MDS->getString();
5103
5105 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5106 "'unknown' !prof should only appear on instructions on which "
5107 "'branch_weights' would",
5108 MD);
5109 verifyUnknownProfileMetadata(MD);
5110 return;
5111 }
5112
5113 Check(MD->getNumOperands() >= 2,
5114 "!prof annotations should have no less than 2 operands", MD);
5115
5116 // Check consistency of !prof branch_weights metadata.
5117 if (ProfName == MDProfLabels::BranchWeights) {
5118 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5119 if (isa<InvokeInst>(&I)) {
5120 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5121 "Wrong number of InvokeInst branch_weights operands", MD);
5122 } else {
5123 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5124 if (ExpectedNumOperands == 0)
5125 CheckFailed("!prof branch_weights are not allowed for this instruction",
5126 MD);
5127
5128 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5129 MD);
5130 }
5131 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5132 ++i) {
5133 auto &MDO = MD->getOperand(i);
5134 Check(MDO, "second operand should not be null", MD);
5136 "!prof brunch_weights operand is not a const int");
5137 }
5138 } else if (ProfName == MDProfLabels::ValueProfile) {
5139 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5140 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5141 Check(KindInt, "VP !prof missing kind argument", MD);
5142
5143 auto Kind = KindInt->getZExtValue();
5144 Check(Kind >= InstrProfValueKind::IPVK_First &&
5145 Kind <= InstrProfValueKind::IPVK_Last,
5146 "Invalid VP !prof kind", MD);
5147 Check(MD->getNumOperands() % 2 == 1,
5148 "VP !prof should have an even number "
5149 "of arguments after 'VP'",
5150 MD);
5151 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5152 Kind == InstrProfValueKind::IPVK_MemOPSize)
5154 "VP !prof indirect call or memop size expected to be applied to "
5155 "CallBase instructions only",
5156 MD);
5157 } else {
5158 CheckFailed("expected either branch_weights or VP profile name", MD);
5159 }
5160}
5161
5162void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5163 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5164 // DIAssignID metadata must be attached to either an alloca or some form of
5165 // store/memory-writing instruction.
5166 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5167 // possible store intrinsics.
5168 bool ExpectedInstTy =
5170 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5171 I, MD);
5172 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5173 // only be found as DbgAssignIntrinsic operands.
5174 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5175 for (auto *User : AsValue->users()) {
5177 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5178 MD, User);
5179 // All of the dbg.assign intrinsics should be in the same function as I.
5180 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5181 CheckDI(DAI->getFunction() == I.getFunction(),
5182 "dbg.assign not in same function as inst", DAI, &I);
5183 }
5184 }
5185 for (DbgVariableRecord *DVR :
5186 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5187 CheckDI(DVR->isDbgAssign(),
5188 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5189 CheckDI(DVR->getFunction() == I.getFunction(),
5190 "DVRAssign not in same function as inst", DVR, &I);
5191 }
5192}
5193
5194void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5196 "!mmra metadata attached to unexpected instruction kind", I, MD);
5197
5198 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5199 // list of tags such as !2 in the following example:
5200 // !0 = !{!"a", !"b"}
5201 // !1 = !{!"c", !"d"}
5202 // !2 = !{!0, !1}
5203 if (MMRAMetadata::isTagMD(MD))
5204 return;
5205
5206 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5207 for (const MDOperand &MDOp : MD->operands())
5208 Check(MMRAMetadata::isTagMD(MDOp.get()),
5209 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5210}
5211
5212void Verifier::visitCallStackMetadata(MDNode *MD) {
5213 // Call stack metadata should consist of a list of at least 1 constant int
5214 // (representing a hash of the location).
5215 Check(MD->getNumOperands() >= 1,
5216 "call stack metadata should have at least 1 operand", MD);
5217
5218 for (const auto &Op : MD->operands())
5220 "call stack metadata operand should be constant integer", Op);
5221}
5222
5223void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5224 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5225 Check(MD->getNumOperands() >= 1,
5226 "!memprof annotations should have at least 1 metadata operand "
5227 "(MemInfoBlock)",
5228 MD);
5229
5230 // Check each MIB
5231 for (auto &MIBOp : MD->operands()) {
5232 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5233 // The first operand of an MIB should be the call stack metadata.
5234 // There rest of the operands should be MDString tags, and there should be
5235 // at least one.
5236 Check(MIB->getNumOperands() >= 2,
5237 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5238
5239 // Check call stack metadata (first operand).
5240 Check(MIB->getOperand(0) != nullptr,
5241 "!memprof MemInfoBlock first operand should not be null", MIB);
5242 Check(isa<MDNode>(MIB->getOperand(0)),
5243 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5244 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5245 visitCallStackMetadata(StackMD);
5246
5247 // The next set of 1 or more operands should be MDString.
5248 unsigned I = 1;
5249 for (; I < MIB->getNumOperands(); ++I) {
5250 if (!isa<MDString>(MIB->getOperand(I))) {
5251 Check(I > 1,
5252 "!memprof MemInfoBlock second operand should be an MDString",
5253 MIB);
5254 break;
5255 }
5256 }
5257
5258 // Any remaining should be MDNode that are pairs of integers
5259 for (; I < MIB->getNumOperands(); ++I) {
5260 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5261 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5262 MIB);
5263 Check(OpNode->getNumOperands() == 2,
5264 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5265 "operands",
5266 MIB);
5267 // Check that all of Op's operands are ConstantInt.
5268 Check(llvm::all_of(OpNode->operands(),
5269 [](const MDOperand &Op) {
5270 return mdconst::hasa<ConstantInt>(Op);
5271 }),
5272 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5273 "ConstantInt operands",
5274 MIB);
5275 }
5276 }
5277}
5278
5279void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5280 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5281 // Verify the partial callstack annotated from memprof profiles. This callsite
5282 // is a part of a profiled allocation callstack.
5283 visitCallStackMetadata(MD);
5284}
5285
5286static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5287 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5288 return isa<ConstantInt>(VAL->getValue());
5289 return false;
5290}
5291
5292void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5293 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5294 &I);
5295 for (Metadata *Op : MD->operands()) {
5297 "The callee_type metadata must be a list of type metadata nodes", Op);
5298 auto *TypeMD = cast<MDNode>(Op);
5299 Check(TypeMD->getNumOperands() == 2,
5300 "Well-formed generalized type metadata must contain exactly two "
5301 "operands",
5302 Op);
5303 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5304 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5305 "The first operand of type metadata for functions must be zero", Op);
5306 Check(TypeMD->hasGeneralizedMDString(),
5307 "Only generalized type metadata can be part of the callee_type "
5308 "metadata list",
5309 Op);
5310 }
5311}
5312
5313void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5314 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5315 Check(Annotation->getNumOperands() >= 1,
5316 "annotation must have at least one operand");
5317 for (const MDOperand &Op : Annotation->operands()) {
5318 bool TupleOfStrings =
5319 isa<MDTuple>(Op.get()) &&
5320 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5321 return isa<MDString>(Annotation.get());
5322 });
5323 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5324 "operands must be a string or a tuple of strings");
5325 }
5326}
5327
5328void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5329 unsigned NumOps = MD->getNumOperands();
5330 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5331 MD);
5332 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5333 "first scope operand must be self-referential or string", MD);
5334 if (NumOps == 3)
5336 "third scope operand must be string (if used)", MD);
5337
5338 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5339 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5340
5341 unsigned NumDomainOps = Domain->getNumOperands();
5342 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5343 "domain must have one or two operands", Domain);
5344 Check(Domain->getOperand(0).get() == Domain ||
5345 isa<MDString>(Domain->getOperand(0)),
5346 "first domain operand must be self-referential or string", Domain);
5347 if (NumDomainOps == 2)
5348 Check(isa<MDString>(Domain->getOperand(1)),
5349 "second domain operand must be string (if used)", Domain);
5350}
5351
5352void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5353 for (const MDOperand &Op : MD->operands()) {
5354 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5355 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5356 visitAliasScopeMetadata(OpMD);
5357 }
5358}
5359
5360void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5361 auto IsValidAccessScope = [](const MDNode *MD) {
5362 return MD->getNumOperands() == 0 && MD->isDistinct();
5363 };
5364
5365 // It must be either an access scope itself...
5366 if (IsValidAccessScope(MD))
5367 return;
5368
5369 // ...or a list of access scopes.
5370 for (const MDOperand &Op : MD->operands()) {
5371 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5372 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5373 Check(IsValidAccessScope(OpMD),
5374 "Access scope list contains invalid access scope", MD);
5375 }
5376}
5377
5378void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5379 static const char *ValidArgs[] = {"address_is_null", "address",
5380 "read_provenance", "provenance"};
5381
5382 auto *SI = dyn_cast<StoreInst>(&I);
5383 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5384 Check(SI->getValueOperand()->getType()->isPointerTy(),
5385 "!captures metadata can only be applied to store with value operand of "
5386 "pointer type",
5387 &I);
5388 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5389 &I);
5390
5391 for (Metadata *Op : Captures->operands()) {
5392 auto *Str = dyn_cast<MDString>(Op);
5393 Check(Str, "!captures metadata must be a list of strings", &I);
5394 Check(is_contained(ValidArgs, Str->getString()),
5395 "invalid entry in !captures metadata", &I, Str);
5396 }
5397}
5398
5399void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5400 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5401 Check(MD->getNumOperands() == 1, "!alloc_token must have 1 operand", MD);
5402 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5403}
5404
5405/// verifyInstruction - Verify that an instruction is well formed.
5406///
5407void Verifier::visitInstruction(Instruction &I) {
5408 BasicBlock *BB = I.getParent();
5409 Check(BB, "Instruction not embedded in basic block!", &I);
5410
5411 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5412 for (User *U : I.users()) {
5413 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5414 "Only PHI nodes may reference their own value!", &I);
5415 }
5416 }
5417
5418 // Check that void typed values don't have names
5419 Check(!I.getType()->isVoidTy() || !I.hasName(),
5420 "Instruction has a name, but provides a void value!", &I);
5421
5422 // Check that the return value of the instruction is either void or a legal
5423 // value type.
5424 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5425 "Instruction returns a non-scalar type!", &I);
5426
5427 // Check that the instruction doesn't produce metadata. Calls are already
5428 // checked against the callee type.
5429 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5430 "Invalid use of metadata!", &I);
5431
5432 // Check that all uses of the instruction, if they are instructions
5433 // themselves, actually have parent basic blocks. If the use is not an
5434 // instruction, it is an error!
5435 for (Use &U : I.uses()) {
5436 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5437 Check(Used->getParent() != nullptr,
5438 "Instruction referencing"
5439 " instruction not embedded in a basic block!",
5440 &I, Used);
5441 else {
5442 CheckFailed("Use of instruction is not an instruction!", U);
5443 return;
5444 }
5445 }
5446
5447 // Get a pointer to the call base of the instruction if it is some form of
5448 // call.
5449 const CallBase *CBI = dyn_cast<CallBase>(&I);
5450
5451 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5452 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5453
5454 // Check to make sure that only first-class-values are operands to
5455 // instructions.
5456 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5457 Check(false, "Instruction operands must be first-class values!", &I);
5458 }
5459
5460 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5461 // This code checks whether the function is used as the operand of a
5462 // clang_arc_attachedcall operand bundle.
5463 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5464 int Idx) {
5465 return CBI && CBI->isOperandBundleOfType(
5467 };
5468
5469 // Check to make sure that the "address of" an intrinsic function is never
5470 // taken. Ignore cases where the address of the intrinsic function is used
5471 // as the argument of operand bundle "clang.arc.attachedcall" as those
5472 // cases are handled in verifyAttachedCallBundle.
5473 Check((!F->isIntrinsic() ||
5474 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5475 IsAttachedCallOperand(F, CBI, i)),
5476 "Cannot take the address of an intrinsic!", &I);
5477 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5478 F->getIntrinsicID() == Intrinsic::donothing ||
5479 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5480 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5481 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5482 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5483 F->getIntrinsicID() == Intrinsic::coro_resume ||
5484 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5485 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5486 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5487 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5488 F->getIntrinsicID() ==
5489 Intrinsic::experimental_patchpoint_void ||
5490 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5491 F->getIntrinsicID() == Intrinsic::fake_use ||
5492 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5493 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5494 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5495 IsAttachedCallOperand(F, CBI, i),
5496 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5497 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5498 "wasm.(re)throw",
5499 &I);
5500 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5501 &M, F, F->getParent());
5502 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5503 Check(OpBB->getParent() == BB->getParent(),
5504 "Referring to a basic block in another function!", &I);
5505 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5506 Check(OpArg->getParent() == BB->getParent(),
5507 "Referring to an argument in another function!", &I);
5508 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5509 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5510 &M, GV, GV->getParent());
5511 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5512 Check(OpInst->getFunction() == BB->getParent(),
5513 "Referring to an instruction in another function!", &I);
5514 verifyDominatesUse(I, i);
5515 } else if (isa<InlineAsm>(I.getOperand(i))) {
5516 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5517 "Cannot take the address of an inline asm!", &I);
5518 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5519 visitConstantExprsRecursively(CPA);
5520 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5521 if (CE->getType()->isPtrOrPtrVectorTy()) {
5522 // If we have a ConstantExpr pointer, we need to see if it came from an
5523 // illegal bitcast.
5524 visitConstantExprsRecursively(CE);
5525 }
5526 }
5527 }
5528
5529 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5530 Check(I.getType()->isFPOrFPVectorTy(),
5531 "fpmath requires a floating point result!", &I);
5532 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5533 if (ConstantFP *CFP0 =
5535 const APFloat &Accuracy = CFP0->getValueAPF();
5536 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5537 "fpmath accuracy must have float type", &I);
5538 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5539 "fpmath accuracy not a positive number!", &I);
5540 } else {
5541 Check(false, "invalid fpmath accuracy!", &I);
5542 }
5543 }
5544
5545 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5547 "Ranges are only for loads, calls and invokes!", &I);
5548 visitRangeMetadata(I, Range, I.getType());
5549 }
5550
5551 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5554 "noalias.addrspace are only for memory operations!", &I);
5555 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5556 }
5557
5558 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5560 "invariant.group metadata is only for loads and stores", &I);
5561 }
5562
5563 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5564 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5565 &I);
5567 "nonnull applies only to load instructions, use attributes"
5568 " for calls or invokes",
5569 &I);
5570 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5571 }
5572
5573 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5574 visitDereferenceableMetadata(I, MD);
5575
5576 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5577 visitDereferenceableMetadata(I, MD);
5578
5579 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5580 visitNofreeMetadata(I, MD);
5581
5582 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5583 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5584
5585 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5586 visitAliasScopeListMetadata(MD);
5587 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5588 visitAliasScopeListMetadata(MD);
5589
5590 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5591 visitAccessGroupMetadata(MD);
5592
5593 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5594 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5595 &I);
5597 "align applies only to load instructions, "
5598 "use attributes for calls or invokes",
5599 &I);
5600 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5601 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5602 Check(CI && CI->getType()->isIntegerTy(64),
5603 "align metadata value must be an i64!", &I);
5604 uint64_t Align = CI->getZExtValue();
5605 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5606 &I);
5607 Check(Align <= Value::MaximumAlignment,
5608 "alignment is larger that implementation defined limit", &I);
5609 }
5610
5611 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5612 visitProfMetadata(I, MD);
5613
5614 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5615 visitMemProfMetadata(I, MD);
5616
5617 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5618 visitCallsiteMetadata(I, MD);
5619
5620 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5621 visitCalleeTypeMetadata(I, MD);
5622
5623 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5624 visitDIAssignIDMetadata(I, MD);
5625
5626 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5627 visitMMRAMetadata(I, MMRA);
5628
5629 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5630 visitAnnotationMetadata(Annotation);
5631
5632 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5633 visitCapturesMetadata(I, Captures);
5634
5635 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5636 visitAllocTokenMetadata(I, MD);
5637
5638 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5639 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5640 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5641
5642 if (auto *DL = dyn_cast<DILocation>(N)) {
5643 if (DL->getAtomGroup()) {
5644 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5645 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5646 "Instructions enabled",
5647 DL, DL->getScope()->getSubprogram());
5648 }
5649 }
5650 }
5651
5653 I.getAllMetadata(MDs);
5654 for (auto Attachment : MDs) {
5655 unsigned Kind = Attachment.first;
5656 auto AllowLocs =
5657 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5658 ? AreDebugLocsAllowed::Yes
5659 : AreDebugLocsAllowed::No;
5660 visitMDNode(*Attachment.second, AllowLocs);
5661 }
5662
5663 InstsInThisBlock.insert(&I);
5664}
5665
5666/// Allow intrinsics to be verified in different ways.
5667void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5669 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5670 IF);
5671
5672 // Verify that the intrinsic prototype lines up with what the .td files
5673 // describe.
5674 FunctionType *IFTy = IF->getFunctionType();
5675 bool IsVarArg = IFTy->isVarArg();
5676
5680
5681 // Walk the descriptors to extract overloaded types.
5686 "Intrinsic has incorrect return type!", IF);
5688 "Intrinsic has incorrect argument type!", IF);
5689
5690 // Verify if the intrinsic call matches the vararg property.
5691 if (IsVarArg)
5693 "Intrinsic was not defined with variable arguments!", IF);
5694 else
5696 "Callsite was not defined with variable arguments!", IF);
5697
5698 // All descriptors should be absorbed by now.
5699 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5700
5701 // Now that we have the intrinsic ID and the actual argument types (and we
5702 // know they are legal for the intrinsic!) get the intrinsic name through the
5703 // usual means. This allows us to verify the mangling of argument types into
5704 // the name.
5705 const std::string ExpectedName =
5706 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5707 Check(ExpectedName == IF->getName(),
5708 "Intrinsic name not mangled correctly for type arguments! "
5709 "Should be: " +
5710 ExpectedName,
5711 IF);
5712
5713 // If the intrinsic takes MDNode arguments, verify that they are either global
5714 // or are local to *this* function.
5715 for (Value *V : Call.args()) {
5716 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5717 visitMetadataAsValue(*MD, Call.getCaller());
5718 if (auto *Const = dyn_cast<Constant>(V))
5719 Check(!Const->getType()->isX86_AMXTy(),
5720 "const x86_amx is not allowed in argument!");
5721 }
5722
5723 switch (ID) {
5724 default:
5725 break;
5726 case Intrinsic::assume: {
5727 if (Call.hasOperandBundles()) {
5729 Check(Cond && Cond->isOne(),
5730 "assume with operand bundles must have i1 true condition", Call);
5731 }
5732 for (auto &Elem : Call.bundle_op_infos()) {
5733 unsigned ArgCount = Elem.End - Elem.Begin;
5734 // Separate storage assumptions are special insofar as they're the only
5735 // operand bundles allowed on assumes that aren't parameter attributes.
5736 if (Elem.Tag->getKey() == "separate_storage") {
5737 Check(ArgCount == 2,
5738 "separate_storage assumptions should have 2 arguments", Call);
5739 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5740 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5741 "arguments to separate_storage assumptions should be pointers",
5742 Call);
5743 continue;
5744 }
5745 Check(Elem.Tag->getKey() == "ignore" ||
5746 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5747 "tags must be valid attribute names", Call);
5748 Attribute::AttrKind Kind =
5749 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5750 if (Kind == Attribute::Alignment) {
5751 Check(ArgCount <= 3 && ArgCount >= 2,
5752 "alignment assumptions should have 2 or 3 arguments", Call);
5753 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5754 "first argument should be a pointer", Call);
5755 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5756 "second argument should be an integer", Call);
5757 if (ArgCount == 3)
5758 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5759 "third argument should be an integer if present", Call);
5760 continue;
5761 }
5762 if (Kind == Attribute::Dereferenceable) {
5763 Check(ArgCount == 2,
5764 "dereferenceable assumptions should have 2 arguments", Call);
5765 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5766 "first argument should be a pointer", Call);
5767 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5768 "second argument should be an integer", Call);
5769 continue;
5770 }
5771 Check(ArgCount <= 2, "too many arguments", Call);
5772 if (Kind == Attribute::None)
5773 break;
5774 if (Attribute::isIntAttrKind(Kind)) {
5775 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5776 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5777 "the second argument should be a constant integral value", Call);
5778 } else if (Attribute::canUseAsParamAttr(Kind)) {
5779 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5780 } else if (Attribute::canUseAsFnAttr(Kind)) {
5781 Check((ArgCount) == 0, "this attribute has no argument", Call);
5782 }
5783 }
5784 break;
5785 }
5786 case Intrinsic::ucmp:
5787 case Intrinsic::scmp: {
5788 Type *SrcTy = Call.getOperand(0)->getType();
5789 Type *DestTy = Call.getType();
5790
5791 Check(DestTy->getScalarSizeInBits() >= 2,
5792 "result type must be at least 2 bits wide", Call);
5793
5794 bool IsDestTypeVector = DestTy->isVectorTy();
5795 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5796 "ucmp/scmp argument and result types must both be either vector or "
5797 "scalar types",
5798 Call);
5799 if (IsDestTypeVector) {
5800 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5801 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5802 Check(SrcVecLen == DestVecLen,
5803 "return type and arguments must have the same number of "
5804 "elements",
5805 Call);
5806 }
5807 break;
5808 }
5809 case Intrinsic::coro_id: {
5810 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5811 if (isa<ConstantPointerNull>(InfoArg))
5812 break;
5813 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5814 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5815 "info argument of llvm.coro.id must refer to an initialized "
5816 "constant");
5817 Constant *Init = GV->getInitializer();
5819 "info argument of llvm.coro.id must refer to either a struct or "
5820 "an array");
5821 break;
5822 }
5823 case Intrinsic::is_fpclass: {
5824 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5825 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5826 "unsupported bits for llvm.is.fpclass test mask");
5827 break;
5828 }
5829 case Intrinsic::fptrunc_round: {
5830 // Check the rounding mode
5831 Metadata *MD = nullptr;
5833 if (MAV)
5834 MD = MAV->getMetadata();
5835
5836 Check(MD != nullptr, "missing rounding mode argument", Call);
5837
5838 Check(isa<MDString>(MD),
5839 ("invalid value for llvm.fptrunc.round metadata operand"
5840 " (the operand should be a string)"),
5841 MD);
5842
5843 std::optional<RoundingMode> RoundMode =
5844 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5845 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5846 "unsupported rounding mode argument", Call);
5847 break;
5848 }
5849#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5850#include "llvm/IR/VPIntrinsics.def"
5851#undef BEGIN_REGISTER_VP_INTRINSIC
5852 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5853 break;
5854#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5855 case Intrinsic::INTRINSIC:
5856#include "llvm/IR/ConstrainedOps.def"
5857#undef INSTRUCTION
5858 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5859 break;
5860 case Intrinsic::dbg_declare: // llvm.dbg.declare
5861 case Intrinsic::dbg_value: // llvm.dbg.value
5862 case Intrinsic::dbg_assign: // llvm.dbg.assign
5863 case Intrinsic::dbg_label: // llvm.dbg.label
5864 // We no longer interpret debug intrinsics (the old variable-location
5865 // design). They're meaningless as far as LLVM is concerned we could make
5866 // it an error for them to appear, but it's possible we'll have users
5867 // converting back to intrinsics for the forseeable future (such as DXIL),
5868 // so tolerate their existance.
5869 break;
5870 case Intrinsic::memcpy:
5871 case Intrinsic::memcpy_inline:
5872 case Intrinsic::memmove:
5873 case Intrinsic::memset:
5874 case Intrinsic::memset_inline:
5875 break;
5876 case Intrinsic::experimental_memset_pattern: {
5877 const auto Memset = cast<MemSetPatternInst>(&Call);
5878 Check(Memset->getValue()->getType()->isSized(),
5879 "unsized types cannot be used as memset patterns", Call);
5880 break;
5881 }
5882 case Intrinsic::memcpy_element_unordered_atomic:
5883 case Intrinsic::memmove_element_unordered_atomic:
5884 case Intrinsic::memset_element_unordered_atomic: {
5885 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
5886
5887 ConstantInt *ElementSizeCI =
5888 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5889 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5890 Check(ElementSizeVal.isPowerOf2(),
5891 "element size of the element-wise atomic memory intrinsic "
5892 "must be a power of 2",
5893 Call);
5894
5895 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5896 return Alignment && ElementSizeVal.ule(Alignment->value());
5897 };
5898 Check(IsValidAlignment(AMI->getDestAlign()),
5899 "incorrect alignment of the destination argument", Call);
5900 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
5901 Check(IsValidAlignment(AMT->getSourceAlign()),
5902 "incorrect alignment of the source argument", Call);
5903 }
5904 break;
5905 }
5906 case Intrinsic::call_preallocated_setup: {
5907 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
5908 bool FoundCall = false;
5909 for (User *U : Call.users()) {
5910 auto *UseCall = dyn_cast<CallBase>(U);
5911 Check(UseCall != nullptr,
5912 "Uses of llvm.call.preallocated.setup must be calls");
5913 Intrinsic::ID IID = UseCall->getIntrinsicID();
5914 if (IID == Intrinsic::call_preallocated_arg) {
5915 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5916 Check(AllocArgIndex != nullptr,
5917 "llvm.call.preallocated.alloc arg index must be a constant");
5918 auto AllocArgIndexInt = AllocArgIndex->getValue();
5919 Check(AllocArgIndexInt.sge(0) &&
5920 AllocArgIndexInt.slt(NumArgs->getValue()),
5921 "llvm.call.preallocated.alloc arg index must be between 0 and "
5922 "corresponding "
5923 "llvm.call.preallocated.setup's argument count");
5924 } else if (IID == Intrinsic::call_preallocated_teardown) {
5925 // nothing to do
5926 } else {
5927 Check(!FoundCall, "Can have at most one call corresponding to a "
5928 "llvm.call.preallocated.setup");
5929 FoundCall = true;
5930 size_t NumPreallocatedArgs = 0;
5931 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5932 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5933 ++NumPreallocatedArgs;
5934 }
5935 }
5936 Check(NumPreallocatedArgs != 0,
5937 "cannot use preallocated intrinsics on a call without "
5938 "preallocated arguments");
5939 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5940 "llvm.call.preallocated.setup arg size must be equal to number "
5941 "of preallocated arguments "
5942 "at call site",
5943 Call, *UseCall);
5944 // getOperandBundle() cannot be called if more than one of the operand
5945 // bundle exists. There is already a check elsewhere for this, so skip
5946 // here if we see more than one.
5947 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5948 1) {
5949 return;
5950 }
5951 auto PreallocatedBundle =
5952 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5953 Check(PreallocatedBundle,
5954 "Use of llvm.call.preallocated.setup outside intrinsics "
5955 "must be in \"preallocated\" operand bundle");
5956 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5957 "preallocated bundle must have token from corresponding "
5958 "llvm.call.preallocated.setup");
5959 }
5960 }
5961 break;
5962 }
5963 case Intrinsic::call_preallocated_arg: {
5964 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5965 Check(Token &&
5966 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5967 "llvm.call.preallocated.arg token argument must be a "
5968 "llvm.call.preallocated.setup");
5969 Check(Call.hasFnAttr(Attribute::Preallocated),
5970 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5971 "call site attribute");
5972 break;
5973 }
5974 case Intrinsic::call_preallocated_teardown: {
5975 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5976 Check(Token &&
5977 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5978 "llvm.call.preallocated.teardown token argument must be a "
5979 "llvm.call.preallocated.setup");
5980 break;
5981 }
5982 case Intrinsic::gcroot:
5983 case Intrinsic::gcwrite:
5984 case Intrinsic::gcread:
5985 if (ID == Intrinsic::gcroot) {
5986 AllocaInst *AI =
5988 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5990 "llvm.gcroot parameter #2 must be a constant.", Call);
5991 if (!AI->getAllocatedType()->isPointerTy()) {
5993 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5994 "or argument #2 must be a non-null constant.",
5995 Call);
5996 }
5997 }
5998
5999 Check(Call.getParent()->getParent()->hasGC(),
6000 "Enclosing function does not use GC.", Call);
6001 break;
6002 case Intrinsic::init_trampoline:
6004 "llvm.init_trampoline parameter #2 must resolve to a function.",
6005 Call);
6006 break;
6007 case Intrinsic::prefetch:
6008 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6009 "rw argument to llvm.prefetch must be 0-1", Call);
6010 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6011 "locality argument to llvm.prefetch must be 0-3", Call);
6012 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6013 "cache type argument to llvm.prefetch must be 0-1", Call);
6014 break;
6015 case Intrinsic::stackprotector:
6017 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6018 break;
6019 case Intrinsic::localescape: {
6020 BasicBlock *BB = Call.getParent();
6021 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6022 Call);
6023 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6024 Call);
6025 for (Value *Arg : Call.args()) {
6026 if (isa<ConstantPointerNull>(Arg))
6027 continue; // Null values are allowed as placeholders.
6028 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6029 Check(AI && AI->isStaticAlloca(),
6030 "llvm.localescape only accepts static allocas", Call);
6031 }
6032 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6033 SawFrameEscape = true;
6034 break;
6035 }
6036 case Intrinsic::localrecover: {
6038 Function *Fn = dyn_cast<Function>(FnArg);
6039 Check(Fn && !Fn->isDeclaration(),
6040 "llvm.localrecover first "
6041 "argument must be function defined in this module",
6042 Call);
6043 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6044 auto &Entry = FrameEscapeInfo[Fn];
6045 Entry.second = unsigned(
6046 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6047 break;
6048 }
6049
6050 case Intrinsic::experimental_gc_statepoint:
6051 if (auto *CI = dyn_cast<CallInst>(&Call))
6052 Check(!CI->isInlineAsm(),
6053 "gc.statepoint support for inline assembly unimplemented", CI);
6054 Check(Call.getParent()->getParent()->hasGC(),
6055 "Enclosing function does not use GC.", Call);
6056
6057 verifyStatepoint(Call);
6058 break;
6059 case Intrinsic::experimental_gc_result: {
6060 Check(Call.getParent()->getParent()->hasGC(),
6061 "Enclosing function does not use GC.", Call);
6062
6063 auto *Statepoint = Call.getArgOperand(0);
6064 if (isa<UndefValue>(Statepoint))
6065 break;
6066
6067 // Are we tied to a statepoint properly?
6068 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6069 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6070 Intrinsic::experimental_gc_statepoint,
6071 "gc.result operand #1 must be from a statepoint", Call,
6072 Call.getArgOperand(0));
6073
6074 // Check that result type matches wrapped callee.
6075 auto *TargetFuncType =
6076 cast<FunctionType>(StatepointCall->getParamElementType(2));
6077 Check(Call.getType() == TargetFuncType->getReturnType(),
6078 "gc.result result type does not match wrapped callee", Call);
6079 break;
6080 }
6081 case Intrinsic::experimental_gc_relocate: {
6082 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6083
6085 "gc.relocate must return a pointer or a vector of pointers", Call);
6086
6087 // Check that this relocate is correctly tied to the statepoint
6088
6089 // This is case for relocate on the unwinding path of an invoke statepoint
6090 if (LandingPadInst *LandingPad =
6092
6093 const BasicBlock *InvokeBB =
6094 LandingPad->getParent()->getUniquePredecessor();
6095
6096 // Landingpad relocates should have only one predecessor with invoke
6097 // statepoint terminator
6098 Check(InvokeBB, "safepoints should have unique landingpads",
6099 LandingPad->getParent());
6100 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6101 InvokeBB);
6103 "gc relocate should be linked to a statepoint", InvokeBB);
6104 } else {
6105 // In all other cases relocate should be tied to the statepoint directly.
6106 // This covers relocates on a normal return path of invoke statepoint and
6107 // relocates of a call statepoint.
6108 auto *Token = Call.getArgOperand(0);
6110 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6111 }
6112
6113 // Verify rest of the relocate arguments.
6114 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6115
6116 // Both the base and derived must be piped through the safepoint.
6119 "gc.relocate operand #2 must be integer offset", Call);
6120
6121 Value *Derived = Call.getArgOperand(2);
6122 Check(isa<ConstantInt>(Derived),
6123 "gc.relocate operand #3 must be integer offset", Call);
6124
6125 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6126 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6127
6128 // Check the bounds
6129 if (isa<UndefValue>(StatepointCall))
6130 break;
6131 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6132 .getOperandBundle(LLVMContext::OB_gc_live)) {
6133 Check(BaseIndex < Opt->Inputs.size(),
6134 "gc.relocate: statepoint base index out of bounds", Call);
6135 Check(DerivedIndex < Opt->Inputs.size(),
6136 "gc.relocate: statepoint derived index out of bounds", Call);
6137 }
6138
6139 // Relocated value must be either a pointer type or vector-of-pointer type,
6140 // but gc_relocate does not need to return the same pointer type as the
6141 // relocated pointer. It can be casted to the correct type later if it's
6142 // desired. However, they must have the same address space and 'vectorness'
6143 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6144 auto *ResultType = Call.getType();
6145 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6146 auto *BaseType = Relocate.getBasePtr()->getType();
6147
6148 Check(BaseType->isPtrOrPtrVectorTy(),
6149 "gc.relocate: relocated value must be a pointer", Call);
6150 Check(DerivedType->isPtrOrPtrVectorTy(),
6151 "gc.relocate: relocated value must be a pointer", Call);
6152
6153 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6154 "gc.relocate: vector relocates to vector and pointer to pointer",
6155 Call);
6156 Check(
6157 ResultType->getPointerAddressSpace() ==
6158 DerivedType->getPointerAddressSpace(),
6159 "gc.relocate: relocating a pointer shouldn't change its address space",
6160 Call);
6161
6162 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6163 Check(GC, "gc.relocate: calling function must have GCStrategy",
6164 Call.getFunction());
6165 if (GC) {
6166 auto isGCPtr = [&GC](Type *PTy) {
6167 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6168 };
6169 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6170 Check(isGCPtr(BaseType),
6171 "gc.relocate: relocated value must be a gc pointer", Call);
6172 Check(isGCPtr(DerivedType),
6173 "gc.relocate: relocated value must be a gc pointer", Call);
6174 }
6175 break;
6176 }
6177 case Intrinsic::experimental_patchpoint: {
6178 if (Call.getCallingConv() == CallingConv::AnyReg) {
6180 "patchpoint: invalid return type used with anyregcc", Call);
6181 }
6182 break;
6183 }
6184 case Intrinsic::eh_exceptioncode:
6185 case Intrinsic::eh_exceptionpointer: {
6187 "eh.exceptionpointer argument must be a catchpad", Call);
6188 break;
6189 }
6190 case Intrinsic::get_active_lane_mask: {
6192 "get_active_lane_mask: must return a "
6193 "vector",
6194 Call);
6195 auto *ElemTy = Call.getType()->getScalarType();
6196 Check(ElemTy->isIntegerTy(1),
6197 "get_active_lane_mask: element type is not "
6198 "i1",
6199 Call);
6200 break;
6201 }
6202 case Intrinsic::experimental_get_vector_length: {
6203 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6204 Check(!VF->isNegative() && !VF->isZero(),
6205 "get_vector_length: VF must be positive", Call);
6206 break;
6207 }
6208 case Intrinsic::masked_load: {
6209 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6210 Call);
6211
6212 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
6214 Value *PassThru = Call.getArgOperand(3);
6215 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6216 Call);
6217 Check(Alignment->getValue().isPowerOf2(),
6218 "masked_load: alignment must be a power of 2", Call);
6219 Check(PassThru->getType() == Call.getType(),
6220 "masked_load: pass through and return type must match", Call);
6221 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6222 cast<VectorType>(Call.getType())->getElementCount(),
6223 "masked_load: vector mask must be same length as return", Call);
6224 break;
6225 }
6226 case Intrinsic::masked_store: {
6227 Value *Val = Call.getArgOperand(0);
6228 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
6230 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6231 Call);
6232 Check(Alignment->getValue().isPowerOf2(),
6233 "masked_store: alignment must be a power of 2", Call);
6234 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6235 cast<VectorType>(Val->getType())->getElementCount(),
6236 "masked_store: vector mask must be same length as value", Call);
6237 break;
6238 }
6239
6240 case Intrinsic::masked_gather: {
6241 const APInt &Alignment =
6243 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6244 "masked_gather: alignment must be 0 or a power of 2", Call);
6245 break;
6246 }
6247 case Intrinsic::masked_scatter: {
6248 const APInt &Alignment =
6249 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
6250 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6251 "masked_scatter: alignment must be 0 or a power of 2", Call);
6252 break;
6253 }
6254
6255 case Intrinsic::experimental_guard: {
6256 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6258 "experimental_guard must have exactly one "
6259 "\"deopt\" operand bundle");
6260 break;
6261 }
6262
6263 case Intrinsic::experimental_deoptimize: {
6264 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6265 Call);
6267 "experimental_deoptimize must have exactly one "
6268 "\"deopt\" operand bundle");
6270 "experimental_deoptimize return type must match caller return type");
6271
6272 if (isa<CallInst>(Call)) {
6274 Check(RI,
6275 "calls to experimental_deoptimize must be followed by a return");
6276
6277 if (!Call.getType()->isVoidTy() && RI)
6278 Check(RI->getReturnValue() == &Call,
6279 "calls to experimental_deoptimize must be followed by a return "
6280 "of the value computed by experimental_deoptimize");
6281 }
6282
6283 break;
6284 }
6285 case Intrinsic::vastart: {
6287 "va_start called in a non-varargs function");
6288 break;
6289 }
6290 case Intrinsic::get_dynamic_area_offset: {
6291 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6292 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6293 IntTy->getBitWidth(),
6294 "get_dynamic_area_offset result type must be scalar integer matching "
6295 "alloca address space width",
6296 Call);
6297 break;
6298 }
6299 case Intrinsic::vector_reduce_and:
6300 case Intrinsic::vector_reduce_or:
6301 case Intrinsic::vector_reduce_xor:
6302 case Intrinsic::vector_reduce_add:
6303 case Intrinsic::vector_reduce_mul:
6304 case Intrinsic::vector_reduce_smax:
6305 case Intrinsic::vector_reduce_smin:
6306 case Intrinsic::vector_reduce_umax:
6307 case Intrinsic::vector_reduce_umin: {
6308 Type *ArgTy = Call.getArgOperand(0)->getType();
6309 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6310 "Intrinsic has incorrect argument type!");
6311 break;
6312 }
6313 case Intrinsic::vector_reduce_fmax:
6314 case Intrinsic::vector_reduce_fmin: {
6315 Type *ArgTy = Call.getArgOperand(0)->getType();
6316 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6317 "Intrinsic has incorrect argument type!");
6318 break;
6319 }
6320 case Intrinsic::vector_reduce_fadd:
6321 case Intrinsic::vector_reduce_fmul: {
6322 // Unlike the other reductions, the first argument is a start value. The
6323 // second argument is the vector to be reduced.
6324 Type *ArgTy = Call.getArgOperand(1)->getType();
6325 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6326 "Intrinsic has incorrect argument type!");
6327 break;
6328 }
6329 case Intrinsic::smul_fix:
6330 case Intrinsic::smul_fix_sat:
6331 case Intrinsic::umul_fix:
6332 case Intrinsic::umul_fix_sat:
6333 case Intrinsic::sdiv_fix:
6334 case Intrinsic::sdiv_fix_sat:
6335 case Intrinsic::udiv_fix:
6336 case Intrinsic::udiv_fix_sat: {
6337 Value *Op1 = Call.getArgOperand(0);
6338 Value *Op2 = Call.getArgOperand(1);
6340 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6341 "vector of ints");
6343 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6344 "vector of ints");
6345
6346 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6347 Check(Op3->getType()->isIntegerTy(),
6348 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6349 Check(Op3->getBitWidth() <= 32,
6350 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6351
6352 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6353 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6354 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6355 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6356 "the operands");
6357 } else {
6358 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6359 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6360 "to the width of the operands");
6361 }
6362 break;
6363 }
6364 case Intrinsic::lrint:
6365 case Intrinsic::llrint:
6366 case Intrinsic::lround:
6367 case Intrinsic::llround: {
6368 Type *ValTy = Call.getArgOperand(0)->getType();
6369 Type *ResultTy = Call.getType();
6370 auto *VTy = dyn_cast<VectorType>(ValTy);
6371 auto *RTy = dyn_cast<VectorType>(ResultTy);
6372 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6373 ExpectedName + ": argument must be floating-point or vector "
6374 "of floating-points, and result must be integer or "
6375 "vector of integers",
6376 &Call);
6377 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6378 ExpectedName + ": argument and result disagree on vector use", &Call);
6379 if (VTy) {
6380 Check(VTy->getElementCount() == RTy->getElementCount(),
6381 ExpectedName + ": argument must be same length as result", &Call);
6382 }
6383 break;
6384 }
6385 case Intrinsic::bswap: {
6386 Type *Ty = Call.getType();
6387 unsigned Size = Ty->getScalarSizeInBits();
6388 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6389 break;
6390 }
6391 case Intrinsic::invariant_start: {
6392 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6393 Check(InvariantSize &&
6394 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6395 "invariant_start parameter must be -1, 0 or a positive number",
6396 &Call);
6397 break;
6398 }
6399 case Intrinsic::matrix_multiply:
6400 case Intrinsic::matrix_transpose:
6401 case Intrinsic::matrix_column_major_load:
6402 case Intrinsic::matrix_column_major_store: {
6404 ConstantInt *Stride = nullptr;
6405 ConstantInt *NumRows;
6406 ConstantInt *NumColumns;
6407 VectorType *ResultTy;
6408 Type *Op0ElemTy = nullptr;
6409 Type *Op1ElemTy = nullptr;
6410 switch (ID) {
6411 case Intrinsic::matrix_multiply: {
6412 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6413 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6414 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6416 ->getNumElements() ==
6417 NumRows->getZExtValue() * N->getZExtValue(),
6418 "First argument of a matrix operation does not match specified "
6419 "shape!");
6421 ->getNumElements() ==
6422 N->getZExtValue() * NumColumns->getZExtValue(),
6423 "Second argument of a matrix operation does not match specified "
6424 "shape!");
6425
6426 ResultTy = cast<VectorType>(Call.getType());
6427 Op0ElemTy =
6428 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6429 Op1ElemTy =
6430 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6431 break;
6432 }
6433 case Intrinsic::matrix_transpose:
6434 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6435 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6436 ResultTy = cast<VectorType>(Call.getType());
6437 Op0ElemTy =
6438 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6439 break;
6440 case Intrinsic::matrix_column_major_load: {
6442 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6443 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6444 ResultTy = cast<VectorType>(Call.getType());
6445 break;
6446 }
6447 case Intrinsic::matrix_column_major_store: {
6449 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6450 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6451 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6452 Op0ElemTy =
6453 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6454 break;
6455 }
6456 default:
6457 llvm_unreachable("unexpected intrinsic");
6458 }
6459
6460 Check(ResultTy->getElementType()->isIntegerTy() ||
6461 ResultTy->getElementType()->isFloatingPointTy(),
6462 "Result type must be an integer or floating-point type!", IF);
6463
6464 if (Op0ElemTy)
6465 Check(ResultTy->getElementType() == Op0ElemTy,
6466 "Vector element type mismatch of the result and first operand "
6467 "vector!",
6468 IF);
6469
6470 if (Op1ElemTy)
6471 Check(ResultTy->getElementType() == Op1ElemTy,
6472 "Vector element type mismatch of the result and second operand "
6473 "vector!",
6474 IF);
6475
6477 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6478 "Result of a matrix operation does not fit in the returned vector!");
6479
6480 if (Stride)
6481 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6482 "Stride must be greater or equal than the number of rows!", IF);
6483
6484 break;
6485 }
6486 case Intrinsic::vector_splice: {
6488 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6489 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6490 if (Call.getParent() && Call.getParent()->getParent()) {
6491 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6492 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6493 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6494 }
6495 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6496 (Idx >= 0 && Idx < KnownMinNumElements),
6497 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6498 "known minimum number of elements in the vector. For scalable "
6499 "vectors the minimum number of elements is determined from "
6500 "vscale_range.",
6501 &Call);
6502 break;
6503 }
6504 case Intrinsic::stepvector: {
6506 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6507 VecTy->getScalarSizeInBits() >= 8,
6508 "stepvector only supported for vectors of integers "
6509 "with a bitwidth of at least 8.",
6510 &Call);
6511 break;
6512 }
6513 case Intrinsic::experimental_vector_match: {
6514 Value *Op1 = Call.getArgOperand(0);
6515 Value *Op2 = Call.getArgOperand(1);
6517
6518 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6519 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6520 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6521
6522 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6524 "Second operand must be a fixed length vector.", &Call);
6525 Check(Op1Ty->getElementType()->isIntegerTy(),
6526 "First operand must be a vector of integers.", &Call);
6527 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6528 "First two operands must have the same element type.", &Call);
6529 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6530 "First operand and mask must have the same number of elements.",
6531 &Call);
6532 Check(MaskTy->getElementType()->isIntegerTy(1),
6533 "Mask must be a vector of i1's.", &Call);
6534 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6535 &Call);
6536 break;
6537 }
6538 case Intrinsic::vector_insert: {
6539 Value *Vec = Call.getArgOperand(0);
6540 Value *SubVec = Call.getArgOperand(1);
6541 Value *Idx = Call.getArgOperand(2);
6542 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6543
6544 VectorType *VecTy = cast<VectorType>(Vec->getType());
6545 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6546
6547 ElementCount VecEC = VecTy->getElementCount();
6548 ElementCount SubVecEC = SubVecTy->getElementCount();
6549 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6550 "vector_insert parameters must have the same element "
6551 "type.",
6552 &Call);
6553 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6554 "vector_insert index must be a constant multiple of "
6555 "the subvector's known minimum vector length.");
6556
6557 // If this insertion is not the 'mixed' case where a fixed vector is
6558 // inserted into a scalable vector, ensure that the insertion of the
6559 // subvector does not overrun the parent vector.
6560 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6561 Check(IdxN < VecEC.getKnownMinValue() &&
6562 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6563 "subvector operand of vector_insert would overrun the "
6564 "vector being inserted into.");
6565 }
6566 break;
6567 }
6568 case Intrinsic::vector_extract: {
6569 Value *Vec = Call.getArgOperand(0);
6570 Value *Idx = Call.getArgOperand(1);
6571 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6572
6573 VectorType *ResultTy = cast<VectorType>(Call.getType());
6574 VectorType *VecTy = cast<VectorType>(Vec->getType());
6575
6576 ElementCount VecEC = VecTy->getElementCount();
6577 ElementCount ResultEC = ResultTy->getElementCount();
6578
6579 Check(ResultTy->getElementType() == VecTy->getElementType(),
6580 "vector_extract result must have the same element "
6581 "type as the input vector.",
6582 &Call);
6583 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6584 "vector_extract index must be a constant multiple of "
6585 "the result type's known minimum vector length.");
6586
6587 // If this extraction is not the 'mixed' case where a fixed vector is
6588 // extracted from a scalable vector, ensure that the extraction does not
6589 // overrun the parent vector.
6590 if (VecEC.isScalable() == ResultEC.isScalable()) {
6591 Check(IdxN < VecEC.getKnownMinValue() &&
6592 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6593 "vector_extract would overrun.");
6594 }
6595 break;
6596 }
6597 case Intrinsic::vector_partial_reduce_add: {
6600
6601 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6602 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6603
6604 Check((VecWidth % AccWidth) == 0,
6605 "Invalid vector widths for partial "
6606 "reduction. The width of the input vector "
6607 "must be a positive integer multiple of "
6608 "the width of the accumulator vector.");
6609 break;
6610 }
6611 case Intrinsic::experimental_noalias_scope_decl: {
6612 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6613 break;
6614 }
6615 case Intrinsic::preserve_array_access_index:
6616 case Intrinsic::preserve_struct_access_index:
6617 case Intrinsic::aarch64_ldaxr:
6618 case Intrinsic::aarch64_ldxr:
6619 case Intrinsic::arm_ldaex:
6620 case Intrinsic::arm_ldrex: {
6621 Type *ElemTy = Call.getParamElementType(0);
6622 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6623 &Call);
6624 break;
6625 }
6626 case Intrinsic::aarch64_stlxr:
6627 case Intrinsic::aarch64_stxr:
6628 case Intrinsic::arm_stlex:
6629 case Intrinsic::arm_strex: {
6630 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6631 Check(ElemTy,
6632 "Intrinsic requires elementtype attribute on second argument.",
6633 &Call);
6634 break;
6635 }
6636 case Intrinsic::aarch64_prefetch: {
6637 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6638 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6639 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6640 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6641 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6642 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6643 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6644 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6645 break;
6646 }
6647 case Intrinsic::callbr_landingpad: {
6648 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6649 Check(CBR, "intrinstic requires callbr operand", &Call);
6650 if (!CBR)
6651 break;
6652
6653 const BasicBlock *LandingPadBB = Call.getParent();
6654 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6655 if (!PredBB) {
6656 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6657 break;
6658 }
6659 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6660 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6661 &Call);
6662 break;
6663 }
6664 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6665 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6666 "block in indirect destination list",
6667 &Call);
6668 const Instruction &First = *LandingPadBB->begin();
6669 Check(&First == &Call, "No other instructions may proceed intrinsic",
6670 &Call);
6671 break;
6672 }
6673 case Intrinsic::amdgcn_cs_chain: {
6674 auto CallerCC = Call.getCaller()->getCallingConv();
6675 switch (CallerCC) {
6676 case CallingConv::AMDGPU_CS:
6677 case CallingConv::AMDGPU_CS_Chain:
6678 case CallingConv::AMDGPU_CS_ChainPreserve:
6679 break;
6680 default:
6681 CheckFailed("Intrinsic can only be used from functions with the "
6682 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6683 "calling conventions",
6684 &Call);
6685 break;
6686 }
6687
6688 Check(Call.paramHasAttr(2, Attribute::InReg),
6689 "SGPR arguments must have the `inreg` attribute", &Call);
6690 Check(!Call.paramHasAttr(3, Attribute::InReg),
6691 "VGPR arguments must not have the `inreg` attribute", &Call);
6692
6693 auto *Next = Call.getNextNode();
6694 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6695 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6696 Intrinsic::amdgcn_unreachable;
6697 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6698 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6699 break;
6700 }
6701 case Intrinsic::amdgcn_init_exec_from_input: {
6702 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6703 Check(Arg && Arg->hasInRegAttr(),
6704 "only inreg arguments to the parent function are valid as inputs to "
6705 "this intrinsic",
6706 &Call);
6707 break;
6708 }
6709 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6710 auto CallerCC = Call.getCaller()->getCallingConv();
6711 switch (CallerCC) {
6712 case CallingConv::AMDGPU_CS_Chain:
6713 case CallingConv::AMDGPU_CS_ChainPreserve:
6714 break;
6715 default:
6716 CheckFailed("Intrinsic can only be used from functions with the "
6717 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6718 "calling conventions",
6719 &Call);
6720 break;
6721 }
6722
6723 unsigned InactiveIdx = 1;
6724 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6725 "Value for inactive lanes must not have the `inreg` attribute",
6726 &Call);
6727 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6728 "Value for inactive lanes must be a function argument", &Call);
6729 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6730 "Value for inactive lanes must be a VGPR function argument", &Call);
6731 break;
6732 }
6733 case Intrinsic::amdgcn_call_whole_wave: {
6735 Check(F, "Indirect whole wave calls are not allowed", &Call);
6736
6737 CallingConv::ID CC = F->getCallingConv();
6738 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6739 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6740 &Call);
6741
6742 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6743
6744 Check(Call.arg_size() == F->arg_size(),
6745 "Call argument count must match callee argument count", &Call);
6746
6747 // The first argument of the call is the callee, and the first argument of
6748 // the callee is the active mask. The rest of the arguments must match.
6749 Check(F->arg_begin()->getType()->isIntegerTy(1),
6750 "Callee must have i1 as its first argument", &Call);
6751 for (auto [CallArg, FuncArg] :
6752 drop_begin(zip_equal(Call.args(), F->args()))) {
6753 Check(CallArg->getType() == FuncArg.getType(),
6754 "Argument types must match", &Call);
6755
6756 // Check that inreg attributes match between call site and function
6757 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6758 FuncArg.hasInRegAttr(),
6759 "Argument inreg attributes must match", &Call);
6760 }
6761 break;
6762 }
6763 case Intrinsic::amdgcn_s_prefetch_data: {
6764 Check(
6767 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6768 break;
6769 }
6770 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6771 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6772 Value *Src0 = Call.getArgOperand(0);
6773 Value *Src1 = Call.getArgOperand(1);
6774
6775 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6776 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6777 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6778 Call.getArgOperand(3));
6779 Check(BLGP <= 4, "invalid value for blgp format", Call,
6780 Call.getArgOperand(4));
6781
6782 // AMDGPU::MFMAScaleFormats values
6783 auto getFormatNumRegs = [](unsigned FormatVal) {
6784 switch (FormatVal) {
6785 case 0:
6786 case 1:
6787 return 8u;
6788 case 2:
6789 case 3:
6790 return 6u;
6791 case 4:
6792 return 4u;
6793 default:
6794 llvm_unreachable("invalid format value");
6795 }
6796 };
6797
6798 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6799 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6800 return false;
6801 unsigned NumElts = Ty->getNumElements();
6802 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6803 };
6804
6805 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6806 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6807 Check(isValidSrcASrcBVector(Src0Ty),
6808 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6809 Check(isValidSrcASrcBVector(Src1Ty),
6810 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6811
6812 // Permit excess registers for the format.
6813 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6814 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6815 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6816 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6817 break;
6818 }
6819 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6820 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6821 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6822 Value *Src0 = Call.getArgOperand(1);
6823 Value *Src1 = Call.getArgOperand(3);
6824
6825 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6826 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6827 Check(FmtA <= 4, "invalid value for matrix format", Call,
6828 Call.getArgOperand(0));
6829 Check(FmtB <= 4, "invalid value for matrix format", Call,
6830 Call.getArgOperand(2));
6831
6832 // AMDGPU::MatrixFMT values
6833 auto getFormatNumRegs = [](unsigned FormatVal) {
6834 switch (FormatVal) {
6835 case 0:
6836 case 1:
6837 return 16u;
6838 case 2:
6839 case 3:
6840 return 12u;
6841 case 4:
6842 return 8u;
6843 default:
6844 llvm_unreachable("invalid format value");
6845 }
6846 };
6847
6848 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6849 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6850 return false;
6851 unsigned NumElts = Ty->getNumElements();
6852 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6853 };
6854
6855 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6856 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6857 Check(isValidSrcASrcBVector(Src0Ty),
6858 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6859 Check(isValidSrcASrcBVector(Src1Ty),
6860 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6861
6862 // Permit excess registers for the format.
6863 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6864 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6865 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6866 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6867 break;
6868 }
6869 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
6870 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
6871 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
6872 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
6873 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
6874 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
6875 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
6876 Value *PtrArg = Call.getArgOperand(0);
6877 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
6879 "cooperative atomic intrinsics require a generic or global pointer",
6880 &Call, PtrArg);
6881
6882 // Last argument must be a MD string
6884 MDNode *MD = cast<MDNode>(Op->getMetadata());
6885 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
6886 "cooperative atomic intrinsics require that the last argument is a "
6887 "metadata string",
6888 &Call, Op);
6889 break;
6890 }
6891 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6892 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6893 Value *V = Call.getArgOperand(0);
6894 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6895 Check(RegCount % 8 == 0,
6896 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6897 break;
6898 }
6899 case Intrinsic::experimental_convergence_entry:
6900 case Intrinsic::experimental_convergence_anchor:
6901 break;
6902 case Intrinsic::experimental_convergence_loop:
6903 break;
6904 case Intrinsic::ptrmask: {
6905 Type *Ty0 = Call.getArgOperand(0)->getType();
6906 Type *Ty1 = Call.getArgOperand(1)->getType();
6908 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6909 "of pointers",
6910 &Call);
6911 Check(
6912 Ty0->isVectorTy() == Ty1->isVectorTy(),
6913 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6914 &Call);
6915 if (Ty0->isVectorTy())
6916 Check(cast<VectorType>(Ty0)->getElementCount() ==
6917 cast<VectorType>(Ty1)->getElementCount(),
6918 "llvm.ptrmask intrinsic arguments must have the same number of "
6919 "elements",
6920 &Call);
6921 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6922 "llvm.ptrmask intrinsic second argument bitwidth must match "
6923 "pointer index type size of first argument",
6924 &Call);
6925 break;
6926 }
6927 case Intrinsic::thread_pointer: {
6929 DL.getDefaultGlobalsAddressSpace(),
6930 "llvm.thread.pointer intrinsic return type must be for the globals "
6931 "address space",
6932 &Call);
6933 break;
6934 }
6935 case Intrinsic::threadlocal_address: {
6936 const Value &Arg0 = *Call.getArgOperand(0);
6937 Check(isa<GlobalValue>(Arg0),
6938 "llvm.threadlocal.address first argument must be a GlobalValue");
6939 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6940 "llvm.threadlocal.address operand isThreadLocal() must be true");
6941 break;
6942 }
6943 case Intrinsic::lifetime_start:
6944 case Intrinsic::lifetime_end: {
6947 "llvm.lifetime.start/end can only be used on alloca or poison",
6948 &Call);
6949 break;
6950 }
6951 };
6952
6953 // Verify that there aren't any unmediated control transfers between funclets.
6955 Function *F = Call.getParent()->getParent();
6956 if (F->hasPersonalityFn() &&
6957 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6958 // Run EH funclet coloring on-demand and cache results for other intrinsic
6959 // calls in this function
6960 if (BlockEHFuncletColors.empty())
6961 BlockEHFuncletColors = colorEHFunclets(*F);
6962
6963 // Check for catch-/cleanup-pad in first funclet block
6964 bool InEHFunclet = false;
6965 BasicBlock *CallBB = Call.getParent();
6966 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6967 assert(CV.size() > 0 && "Uncolored block");
6968 for (BasicBlock *ColorFirstBB : CV)
6969 if (auto It = ColorFirstBB->getFirstNonPHIIt();
6970 It != ColorFirstBB->end())
6972 InEHFunclet = true;
6973
6974 // Check for funclet operand bundle
6975 bool HasToken = false;
6976 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6978 HasToken = true;
6979
6980 // This would cause silent code truncation in WinEHPrepare
6981 if (InEHFunclet)
6982 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6983 }
6984 }
6985}
6986
6987/// Carefully grab the subprogram from a local scope.
6988///
6989/// This carefully grabs the subprogram from a local scope, avoiding the
6990/// built-in assertions that would typically fire.
6992 if (!LocalScope)
6993 return nullptr;
6994
6995 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6996 return SP;
6997
6998 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6999 return getSubprogram(LB->getRawScope());
7000
7001 // Just return null; broken scope chains are checked elsewhere.
7002 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7003 return nullptr;
7004}
7005
7006void Verifier::visit(DbgLabelRecord &DLR) {
7008 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7009
7010 // Ignore broken !dbg attachments; they're checked elsewhere.
7011 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7012 if (!isa<DILocation>(N))
7013 return;
7014
7015 BasicBlock *BB = DLR.getParent();
7016 Function *F = BB ? BB->getParent() : nullptr;
7017
7018 // The scopes for variables and !dbg attachments must agree.
7019 DILabel *Label = DLR.getLabel();
7020 DILocation *Loc = DLR.getDebugLoc();
7021 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7022
7023 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7024 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7025 if (!LabelSP || !LocSP)
7026 return;
7027
7028 CheckDI(LabelSP == LocSP,
7029 "mismatched subprogram between #dbg_label label and !dbg attachment",
7030 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7031 Loc->getScope()->getSubprogram());
7032}
7033
7034void Verifier::visit(DbgVariableRecord &DVR) {
7035 BasicBlock *BB = DVR.getParent();
7036 Function *F = BB->getParent();
7037
7038 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7039 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7040 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7041 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7042
7043 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7044 // DIArgList, or an empty MDNode (which is a legacy representation for an
7045 // "undef" location).
7046 auto *MD = DVR.getRawLocation();
7047 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7048 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7049 "invalid #dbg record address/value", &DVR, MD, BB, F);
7050 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7051 visitValueAsMetadata(*VAM, F);
7052 if (DVR.isDbgDeclare()) {
7053 // Allow integers here to support inttoptr salvage.
7054 Type *Ty = VAM->getValue()->getType();
7055 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7056 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7057 F);
7058 }
7059 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7060 visitDIArgList(*AL, F);
7061 }
7062
7064 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7065 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7066
7068 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7069 F);
7070 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7071
7072 if (DVR.isDbgAssign()) {
7074 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7075 F);
7076 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7077 AreDebugLocsAllowed::No);
7078
7079 const auto *RawAddr = DVR.getRawAddress();
7080 // Similarly to the location above, the address for an assign
7081 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7082 // represents an undef address.
7083 CheckDI(
7084 isa<ValueAsMetadata>(RawAddr) ||
7085 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7086 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7087 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7088 visitValueAsMetadata(*VAM, F);
7089
7091 "invalid #dbg_assign address expression", &DVR,
7092 DVR.getRawAddressExpression(), BB, F);
7093 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7094
7095 // All of the linked instructions should be in the same function as DVR.
7096 for (Instruction *I : at::getAssignmentInsts(&DVR))
7097 CheckDI(DVR.getFunction() == I->getFunction(),
7098 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7099 }
7100
7101 // This check is redundant with one in visitLocalVariable().
7102 DILocalVariable *Var = DVR.getVariable();
7103 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7104 BB, F);
7105
7106 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7107 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7108 &DVR, DLNode, BB, F);
7109 DILocation *Loc = DVR.getDebugLoc();
7110
7111 // The scopes for variables and !dbg attachments must agree.
7112 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7113 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7114 if (!VarSP || !LocSP)
7115 return; // Broken scope chains are checked elsewhere.
7116
7117 CheckDI(VarSP == LocSP,
7118 "mismatched subprogram between #dbg record variable and DILocation",
7119 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7120 Loc->getScope()->getSubprogram(), BB, F);
7121
7122 verifyFnArgs(DVR);
7123}
7124
7125void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7126 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7127 auto *RetTy = cast<VectorType>(VPCast->getType());
7128 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7129 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7130 "VP cast intrinsic first argument and result vector lengths must be "
7131 "equal",
7132 *VPCast);
7133
7134 switch (VPCast->getIntrinsicID()) {
7135 default:
7136 llvm_unreachable("Unknown VP cast intrinsic");
7137 case Intrinsic::vp_trunc:
7138 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7139 "llvm.vp.trunc intrinsic first argument and result element type "
7140 "must be integer",
7141 *VPCast);
7142 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7143 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7144 "larger than the bit size of the return type",
7145 *VPCast);
7146 break;
7147 case Intrinsic::vp_zext:
7148 case Intrinsic::vp_sext:
7149 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7150 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7151 "element type must be integer",
7152 *VPCast);
7153 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7154 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7155 "argument must be smaller than the bit size of the return type",
7156 *VPCast);
7157 break;
7158 case Intrinsic::vp_fptoui:
7159 case Intrinsic::vp_fptosi:
7160 case Intrinsic::vp_lrint:
7161 case Intrinsic::vp_llrint:
7162 Check(
7163 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7164 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7165 "type must be floating-point and result element type must be integer",
7166 *VPCast);
7167 break;
7168 case Intrinsic::vp_uitofp:
7169 case Intrinsic::vp_sitofp:
7170 Check(
7171 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7172 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7173 "type must be integer and result element type must be floating-point",
7174 *VPCast);
7175 break;
7176 case Intrinsic::vp_fptrunc:
7177 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7178 "llvm.vp.fptrunc intrinsic first argument and result element type "
7179 "must be floating-point",
7180 *VPCast);
7181 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7182 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7183 "larger than the bit size of the return type",
7184 *VPCast);
7185 break;
7186 case Intrinsic::vp_fpext:
7187 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7188 "llvm.vp.fpext intrinsic first argument and result element type "
7189 "must be floating-point",
7190 *VPCast);
7191 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7192 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7193 "smaller than the bit size of the return type",
7194 *VPCast);
7195 break;
7196 case Intrinsic::vp_ptrtoint:
7197 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7198 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7199 "pointer and result element type must be integer",
7200 *VPCast);
7201 break;
7202 case Intrinsic::vp_inttoptr:
7203 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7204 "llvm.vp.inttoptr intrinsic first argument element type must be "
7205 "integer and result element type must be pointer",
7206 *VPCast);
7207 break;
7208 }
7209 }
7210
7211 switch (VPI.getIntrinsicID()) {
7212 case Intrinsic::vp_fcmp: {
7213 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7215 "invalid predicate for VP FP comparison intrinsic", &VPI);
7216 break;
7217 }
7218 case Intrinsic::vp_icmp: {
7219 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7221 "invalid predicate for VP integer comparison intrinsic", &VPI);
7222 break;
7223 }
7224 case Intrinsic::vp_is_fpclass: {
7225 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7226 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7227 "unsupported bits for llvm.vp.is.fpclass test mask");
7228 break;
7229 }
7230 case Intrinsic::experimental_vp_splice: {
7231 VectorType *VecTy = cast<VectorType>(VPI.getType());
7232 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7233 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7234 if (VPI.getParent() && VPI.getParent()->getParent()) {
7235 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7236 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7237 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7238 }
7239 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7240 (Idx >= 0 && Idx < KnownMinNumElements),
7241 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7242 "known minimum number of elements in the vector. For scalable "
7243 "vectors the minimum number of elements is determined from "
7244 "vscale_range.",
7245 &VPI);
7246 break;
7247 }
7248 }
7249}
7250
7251void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7252 unsigned NumOperands = FPI.getNonMetadataArgCount();
7253 bool HasRoundingMD =
7255
7256 // Add the expected number of metadata operands.
7257 NumOperands += (1 + HasRoundingMD);
7258
7259 // Compare intrinsics carry an extra predicate metadata operand.
7261 NumOperands += 1;
7262 Check((FPI.arg_size() == NumOperands),
7263 "invalid arguments for constrained FP intrinsic", &FPI);
7264
7265 switch (FPI.getIntrinsicID()) {
7266 case Intrinsic::experimental_constrained_lrint:
7267 case Intrinsic::experimental_constrained_llrint: {
7268 Type *ValTy = FPI.getArgOperand(0)->getType();
7269 Type *ResultTy = FPI.getType();
7270 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7271 "Intrinsic does not support vectors", &FPI);
7272 break;
7273 }
7274
7275 case Intrinsic::experimental_constrained_lround:
7276 case Intrinsic::experimental_constrained_llround: {
7277 Type *ValTy = FPI.getArgOperand(0)->getType();
7278 Type *ResultTy = FPI.getType();
7279 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7280 "Intrinsic does not support vectors", &FPI);
7281 break;
7282 }
7283
7284 case Intrinsic::experimental_constrained_fcmp:
7285 case Intrinsic::experimental_constrained_fcmps: {
7286 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7288 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7289 break;
7290 }
7291
7292 case Intrinsic::experimental_constrained_fptosi:
7293 case Intrinsic::experimental_constrained_fptoui: {
7294 Value *Operand = FPI.getArgOperand(0);
7295 ElementCount SrcEC;
7296 Check(Operand->getType()->isFPOrFPVectorTy(),
7297 "Intrinsic first argument must be floating point", &FPI);
7298 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7299 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7300 }
7301
7302 Operand = &FPI;
7303 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7304 "Intrinsic first argument and result disagree on vector use", &FPI);
7305 Check(Operand->getType()->isIntOrIntVectorTy(),
7306 "Intrinsic result must be an integer", &FPI);
7307 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7308 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7309 "Intrinsic first argument and result vector lengths must be equal",
7310 &FPI);
7311 }
7312 break;
7313 }
7314
7315 case Intrinsic::experimental_constrained_sitofp:
7316 case Intrinsic::experimental_constrained_uitofp: {
7317 Value *Operand = FPI.getArgOperand(0);
7318 ElementCount SrcEC;
7319 Check(Operand->getType()->isIntOrIntVectorTy(),
7320 "Intrinsic first argument must be integer", &FPI);
7321 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7322 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7323 }
7324
7325 Operand = &FPI;
7326 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7327 "Intrinsic first argument and result disagree on vector use", &FPI);
7328 Check(Operand->getType()->isFPOrFPVectorTy(),
7329 "Intrinsic result must be a floating point", &FPI);
7330 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7331 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7332 "Intrinsic first argument and result vector lengths must be equal",
7333 &FPI);
7334 }
7335 break;
7336 }
7337
7338 case Intrinsic::experimental_constrained_fptrunc:
7339 case Intrinsic::experimental_constrained_fpext: {
7340 Value *Operand = FPI.getArgOperand(0);
7341 Type *OperandTy = Operand->getType();
7342 Value *Result = &FPI;
7343 Type *ResultTy = Result->getType();
7344 Check(OperandTy->isFPOrFPVectorTy(),
7345 "Intrinsic first argument must be FP or FP vector", &FPI);
7346 Check(ResultTy->isFPOrFPVectorTy(),
7347 "Intrinsic result must be FP or FP vector", &FPI);
7348 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7349 "Intrinsic first argument and result disagree on vector use", &FPI);
7350 if (OperandTy->isVectorTy()) {
7351 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7352 cast<VectorType>(ResultTy)->getElementCount(),
7353 "Intrinsic first argument and result vector lengths must be equal",
7354 &FPI);
7355 }
7356 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7357 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7358 "Intrinsic first argument's type must be larger than result type",
7359 &FPI);
7360 } else {
7361 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7362 "Intrinsic first argument's type must be smaller than result type",
7363 &FPI);
7364 }
7365 break;
7366 }
7367
7368 default:
7369 break;
7370 }
7371
7372 // If a non-metadata argument is passed in a metadata slot then the
7373 // error will be caught earlier when the incorrect argument doesn't
7374 // match the specification in the intrinsic call table. Thus, no
7375 // argument type check is needed here.
7376
7377 Check(FPI.getExceptionBehavior().has_value(),
7378 "invalid exception behavior argument", &FPI);
7379 if (HasRoundingMD) {
7380 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7381 &FPI);
7382 }
7383}
7384
7385void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7386 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7387 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7388
7389 // We don't know whether this intrinsic verified correctly.
7390 if (!V || !E || !E->isValid())
7391 return;
7392
7393 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7394 auto Fragment = E->getFragmentInfo();
7395 if (!Fragment)
7396 return;
7397
7398 // The frontend helps out GDB by emitting the members of local anonymous
7399 // unions as artificial local variables with shared storage. When SROA splits
7400 // the storage for artificial local variables that are smaller than the entire
7401 // union, the overhang piece will be outside of the allotted space for the
7402 // variable and this check fails.
7403 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7404 if (V->isArtificial())
7405 return;
7406
7407 verifyFragmentExpression(*V, *Fragment, &DVR);
7408}
7409
7410template <typename ValueOrMetadata>
7411void Verifier::verifyFragmentExpression(const DIVariable &V,
7413 ValueOrMetadata *Desc) {
7414 // If there's no size, the type is broken, but that should be checked
7415 // elsewhere.
7416 auto VarSize = V.getSizeInBits();
7417 if (!VarSize)
7418 return;
7419
7420 unsigned FragSize = Fragment.SizeInBits;
7421 unsigned FragOffset = Fragment.OffsetInBits;
7422 CheckDI(FragSize + FragOffset <= *VarSize,
7423 "fragment is larger than or outside of variable", Desc, &V);
7424 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7425}
7426
7427void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7428 // This function does not take the scope of noninlined function arguments into
7429 // account. Don't run it if current function is nodebug, because it may
7430 // contain inlined debug intrinsics.
7431 if (!HasDebugInfo)
7432 return;
7433
7434 // For performance reasons only check non-inlined ones.
7435 if (DVR.getDebugLoc()->getInlinedAt())
7436 return;
7437
7438 DILocalVariable *Var = DVR.getVariable();
7439 CheckDI(Var, "#dbg record without variable");
7440
7441 unsigned ArgNo = Var->getArg();
7442 if (!ArgNo)
7443 return;
7444
7445 // Verify there are no duplicate function argument debug info entries.
7446 // These will cause hard-to-debug assertions in the DWARF backend.
7447 if (DebugFnArgs.size() < ArgNo)
7448 DebugFnArgs.resize(ArgNo, nullptr);
7449
7450 auto *Prev = DebugFnArgs[ArgNo - 1];
7451 DebugFnArgs[ArgNo - 1] = Var;
7452 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7453 Prev, Var);
7454}
7455
7456void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7457 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7458
7459 // We don't know whether this intrinsic verified correctly.
7460 if (!E || !E->isValid())
7461 return;
7462
7464 Value *VarValue = DVR.getVariableLocationOp(0);
7465 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7466 return;
7467 // We allow EntryValues for swift async arguments, as they have an
7468 // ABI-guarantee to be turned into a specific register.
7469 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7470 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7471 return;
7472 }
7473
7474 CheckDI(!E->isEntryValue(),
7475 "Entry values are only allowed in MIR unless they target a "
7476 "swiftasync Argument",
7477 &DVR);
7478}
7479
7480void Verifier::verifyCompileUnits() {
7481 // When more than one Module is imported into the same context, such as during
7482 // an LTO build before linking the modules, ODR type uniquing may cause types
7483 // to point to a different CU. This check does not make sense in this case.
7484 if (M.getContext().isODRUniquingDebugTypes())
7485 return;
7486 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7487 SmallPtrSet<const Metadata *, 2> Listed;
7488 if (CUs)
7489 Listed.insert_range(CUs->operands());
7490 for (const auto *CU : CUVisited)
7491 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7492 CUVisited.clear();
7493}
7494
7495void Verifier::verifyDeoptimizeCallingConvs() {
7496 if (DeoptimizeDeclarations.empty())
7497 return;
7498
7499 const Function *First = DeoptimizeDeclarations[0];
7500 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7501 Check(First->getCallingConv() == F->getCallingConv(),
7502 "All llvm.experimental.deoptimize declarations must have the same "
7503 "calling convention",
7504 First, F);
7505 }
7506}
7507
7508void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7509 const OperandBundleUse &BU) {
7510 FunctionType *FTy = Call.getFunctionType();
7511
7512 Check((FTy->getReturnType()->isPointerTy() ||
7513 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7514 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7515 "function returning a pointer or a non-returning function that has a "
7516 "void return type",
7517 Call);
7518
7519 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7520 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7521 "an argument",
7522 Call);
7523
7524 auto *Fn = cast<Function>(BU.Inputs.front());
7525 Intrinsic::ID IID = Fn->getIntrinsicID();
7526
7527 if (IID) {
7528 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7529 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7530 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7531 "invalid function argument", Call);
7532 } else {
7533 StringRef FnName = Fn->getName();
7534 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7535 FnName == "objc_claimAutoreleasedReturnValue" ||
7536 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7537 "invalid function argument", Call);
7538 }
7539}
7540
7541void Verifier::verifyNoAliasScopeDecl() {
7542 if (NoAliasScopeDecls.empty())
7543 return;
7544
7545 // only a single scope must be declared at a time.
7546 for (auto *II : NoAliasScopeDecls) {
7547 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7548 "Not a llvm.experimental.noalias.scope.decl ?");
7549 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7551 Check(ScopeListMV != nullptr,
7552 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7553 "argument",
7554 II);
7555
7556 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7557 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7558 Check(ScopeListMD->getNumOperands() == 1,
7559 "!id.scope.list must point to a list with a single scope", II);
7560 visitAliasScopeListMetadata(ScopeListMD);
7561 }
7562
7563 // Only check the domination rule when requested. Once all passes have been
7564 // adapted this option can go away.
7566 return;
7567
7568 // Now sort the intrinsics based on the scope MDNode so that declarations of
7569 // the same scopes are next to each other.
7570 auto GetScope = [](IntrinsicInst *II) {
7571 const auto *ScopeListMV = cast<MetadataAsValue>(
7573 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7574 };
7575
7576 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7577 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7578 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7579 return GetScope(Lhs) < GetScope(Rhs);
7580 };
7581
7582 llvm::sort(NoAliasScopeDecls, Compare);
7583
7584 // Go over the intrinsics and check that for the same scope, they are not
7585 // dominating each other.
7586 auto ItCurrent = NoAliasScopeDecls.begin();
7587 while (ItCurrent != NoAliasScopeDecls.end()) {
7588 auto CurScope = GetScope(*ItCurrent);
7589 auto ItNext = ItCurrent;
7590 do {
7591 ++ItNext;
7592 } while (ItNext != NoAliasScopeDecls.end() &&
7593 GetScope(*ItNext) == CurScope);
7594
7595 // [ItCurrent, ItNext) represents the declarations for the same scope.
7596 // Ensure they are not dominating each other.. but only if it is not too
7597 // expensive.
7598 if (ItNext - ItCurrent < 32)
7599 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7600 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7601 if (I != J)
7602 Check(!DT.dominates(I, J),
7603 "llvm.experimental.noalias.scope.decl dominates another one "
7604 "with the same scope",
7605 I);
7606 ItCurrent = ItNext;
7607 }
7608}
7609
7610//===----------------------------------------------------------------------===//
7611// Implement the public interfaces to this file...
7612//===----------------------------------------------------------------------===//
7613
7615 Function &F = const_cast<Function &>(f);
7616
7617 // Don't use a raw_null_ostream. Printing IR is expensive.
7618 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7619
7620 // Note that this function's return value is inverted from what you would
7621 // expect of a function called "verify".
7622 return !V.verify(F);
7623}
7624
7626 bool *BrokenDebugInfo) {
7627 // Don't use a raw_null_ostream. Printing IR is expensive.
7628 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7629
7630 bool Broken = false;
7631 for (const Function &F : M)
7632 Broken |= !V.verify(F);
7633
7634 Broken |= !V.verify();
7635 if (BrokenDebugInfo)
7636 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7637 // Note that this function's return value is inverted from what you would
7638 // expect of a function called "verify".
7639 return Broken;
7640}
7641
7642namespace {
7643
7644struct VerifierLegacyPass : public FunctionPass {
7645 static char ID;
7646
7647 std::unique_ptr<Verifier> V;
7648 bool FatalErrors = true;
7649
7650 VerifierLegacyPass() : FunctionPass(ID) {
7652 }
7653 explicit VerifierLegacyPass(bool FatalErrors)
7654 : FunctionPass(ID),
7655 FatalErrors(FatalErrors) {
7657 }
7658
7659 bool doInitialization(Module &M) override {
7660 V = std::make_unique<Verifier>(
7661 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7662 return false;
7663 }
7664
7665 bool runOnFunction(Function &F) override {
7666 if (!V->verify(F) && FatalErrors) {
7667 errs() << "in function " << F.getName() << '\n';
7668 report_fatal_error("Broken function found, compilation aborted!");
7669 }
7670 return false;
7671 }
7672
7673 bool doFinalization(Module &M) override {
7674 bool HasErrors = false;
7675 for (Function &F : M)
7676 if (F.isDeclaration())
7677 HasErrors |= !V->verify(F);
7678
7679 HasErrors |= !V->verify();
7680 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7681 report_fatal_error("Broken module found, compilation aborted!");
7682 return false;
7683 }
7684
7685 void getAnalysisUsage(AnalysisUsage &AU) const override {
7686 AU.setPreservesAll();
7687 }
7688};
7689
7690} // end anonymous namespace
7691
7692/// Helper to issue failure from the TBAA verification
7693template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7694 if (Diagnostic)
7695 return Diagnostic->CheckFailed(Args...);
7696}
7697
7698#define CheckTBAA(C, ...) \
7699 do { \
7700 if (!(C)) { \
7701 CheckFailed(__VA_ARGS__); \
7702 return false; \
7703 } \
7704 } while (false)
7705
7706/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7707/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7708/// struct-type node describing an aggregate data structure (like a struct).
7709TBAAVerifier::TBAABaseNodeSummary
7710TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
7711 bool IsNewFormat) {
7712 if (BaseNode->getNumOperands() < 2) {
7713 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
7714 return {true, ~0u};
7715 }
7716
7717 auto Itr = TBAABaseNodes.find(BaseNode);
7718 if (Itr != TBAABaseNodes.end())
7719 return Itr->second;
7720
7721 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7722 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7723 (void)InsertResult;
7724 assert(InsertResult.second && "We just checked!");
7725 return Result;
7726}
7727
7728TBAAVerifier::TBAABaseNodeSummary
7729TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
7730 const MDNode *BaseNode, bool IsNewFormat) {
7731 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7732
7733 if (BaseNode->getNumOperands() == 2) {
7734 // Scalar nodes can only be accessed at offset 0.
7735 return isValidScalarTBAANode(BaseNode)
7736 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7737 : InvalidNode;
7738 }
7739
7740 if (IsNewFormat) {
7741 if (BaseNode->getNumOperands() % 3 != 0) {
7742 CheckFailed("Access tag nodes must have the number of operands that is a "
7743 "multiple of 3!", BaseNode);
7744 return InvalidNode;
7745 }
7746 } else {
7747 if (BaseNode->getNumOperands() % 2 != 1) {
7748 CheckFailed("Struct tag nodes must have an odd number of operands!",
7749 BaseNode);
7750 return InvalidNode;
7751 }
7752 }
7753
7754 // Check the type size field.
7755 if (IsNewFormat) {
7756 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7757 BaseNode->getOperand(1));
7758 if (!TypeSizeNode) {
7759 CheckFailed("Type size nodes must be constants!", I, BaseNode);
7760 return InvalidNode;
7761 }
7762 }
7763
7764 // Check the type name field. In the new format it can be anything.
7765 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7766 CheckFailed("Struct tag nodes have a string as their first operand",
7767 BaseNode);
7768 return InvalidNode;
7769 }
7770
7771 bool Failed = false;
7772
7773 std::optional<APInt> PrevOffset;
7774 unsigned BitWidth = ~0u;
7775
7776 // We've already checked that BaseNode is not a degenerate root node with one
7777 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7778 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7779 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7780 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7781 Idx += NumOpsPerField) {
7782 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7783 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7784 if (!isa<MDNode>(FieldTy)) {
7785 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
7786 Failed = true;
7787 continue;
7788 }
7789
7790 auto *OffsetEntryCI =
7792 if (!OffsetEntryCI) {
7793 CheckFailed("Offset entries must be constants!", I, BaseNode);
7794 Failed = true;
7795 continue;
7796 }
7797
7798 if (BitWidth == ~0u)
7799 BitWidth = OffsetEntryCI->getBitWidth();
7800
7801 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7802 CheckFailed(
7803 "Bitwidth between the offsets and struct type entries must match", I,
7804 BaseNode);
7805 Failed = true;
7806 continue;
7807 }
7808
7809 // NB! As far as I can tell, we generate a non-strictly increasing offset
7810 // sequence only from structs that have zero size bit fields. When
7811 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7812 // pick the field lexically the latest in struct type metadata node. This
7813 // mirrors the actual behavior of the alias analysis implementation.
7814 bool IsAscending =
7815 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7816
7817 if (!IsAscending) {
7818 CheckFailed("Offsets must be increasing!", I, BaseNode);
7819 Failed = true;
7820 }
7821
7822 PrevOffset = OffsetEntryCI->getValue();
7823
7824 if (IsNewFormat) {
7825 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7826 BaseNode->getOperand(Idx + 2));
7827 if (!MemberSizeNode) {
7828 CheckFailed("Member size entries must be constants!", I, BaseNode);
7829 Failed = true;
7830 continue;
7831 }
7832 }
7833 }
7834
7835 return Failed ? InvalidNode
7836 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7837}
7838
7839static bool IsRootTBAANode(const MDNode *MD) {
7840 return MD->getNumOperands() < 2;
7841}
7842
7843static bool IsScalarTBAANodeImpl(const MDNode *MD,
7845 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7846 return false;
7847
7848 if (!isa<MDString>(MD->getOperand(0)))
7849 return false;
7850
7851 if (MD->getNumOperands() == 3) {
7853 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7854 return false;
7855 }
7856
7857 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7858 return Parent && Visited.insert(Parent).second &&
7859 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7860}
7861
7862bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7863 auto ResultIt = TBAAScalarNodes.find(MD);
7864 if (ResultIt != TBAAScalarNodes.end())
7865 return ResultIt->second;
7866
7867 SmallPtrSet<const MDNode *, 4> Visited;
7868 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7869 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7870 (void)InsertResult;
7871 assert(InsertResult.second && "Just checked!");
7872
7873 return Result;
7874}
7875
7876/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7877/// Offset in place to be the offset within the field node returned.
7878///
7879/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7880MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
7881 const MDNode *BaseNode,
7882 APInt &Offset,
7883 bool IsNewFormat) {
7884 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7885
7886 // Scalar nodes have only one possible "field" -- their parent in the access
7887 // hierarchy. Offset must be zero at this point, but our caller is supposed
7888 // to check that.
7889 if (BaseNode->getNumOperands() == 2)
7890 return cast<MDNode>(BaseNode->getOperand(1));
7891
7892 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7893 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7894 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7895 Idx += NumOpsPerField) {
7896 auto *OffsetEntryCI =
7897 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7898 if (OffsetEntryCI->getValue().ugt(Offset)) {
7899 if (Idx == FirstFieldOpNo) {
7900 CheckFailed("Could not find TBAA parent in struct type node", I,
7901 BaseNode, &Offset);
7902 return nullptr;
7903 }
7904
7905 unsigned PrevIdx = Idx - NumOpsPerField;
7906 auto *PrevOffsetEntryCI =
7907 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7908 Offset -= PrevOffsetEntryCI->getValue();
7909 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7910 }
7911 }
7912
7913 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7914 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7915 BaseNode->getOperand(LastIdx + 1));
7916 Offset -= LastOffsetEntryCI->getValue();
7917 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7918}
7919
7921 if (!Type || Type->getNumOperands() < 3)
7922 return false;
7923
7924 // In the new format type nodes shall have a reference to the parent type as
7925 // its first operand.
7926 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7927}
7928
7930 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
7931 MD);
7932
7933 if (I)
7937 "This instruction shall not have a TBAA access tag!", I);
7938
7939 bool IsStructPathTBAA =
7940 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7941
7942 CheckTBAA(IsStructPathTBAA,
7943 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7944 I);
7945
7946 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7947 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7948
7949 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7950
7951 if (IsNewFormat) {
7952 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7953 "Access tag metadata must have either 4 or 5 operands", I, MD);
7954 } else {
7955 CheckTBAA(MD->getNumOperands() < 5,
7956 "Struct tag metadata must have either 3 or 4 operands", I, MD);
7957 }
7958
7959 // Check the access size field.
7960 if (IsNewFormat) {
7961 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7962 MD->getOperand(3));
7963 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
7964 }
7965
7966 // Check the immutability flag.
7967 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7968 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7969 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7970 MD->getOperand(ImmutabilityFlagOpNo));
7971 CheckTBAA(IsImmutableCI,
7972 "Immutability tag on struct tag metadata must be a constant", I,
7973 MD);
7974 CheckTBAA(
7975 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7976 "Immutability part of the struct tag metadata must be either 0 or 1", I,
7977 MD);
7978 }
7979
7980 CheckTBAA(BaseNode && AccessType,
7981 "Malformed struct tag metadata: base and access-type "
7982 "should be non-null and point to Metadata nodes",
7983 I, MD, BaseNode, AccessType);
7984
7985 if (!IsNewFormat) {
7986 CheckTBAA(isValidScalarTBAANode(AccessType),
7987 "Access type node must be a valid scalar type", I, MD,
7988 AccessType);
7989 }
7990
7992 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
7993
7994 APInt Offset = OffsetCI->getValue();
7995 bool SeenAccessTypeInPath = false;
7996
7997 SmallPtrSet<MDNode *, 4> StructPath;
7998
7999 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8000 BaseNode =
8001 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8002 if (!StructPath.insert(BaseNode).second) {
8003 CheckFailed("Cycle detected in struct path", I, MD);
8004 return false;
8005 }
8006
8007 bool Invalid;
8008 unsigned BaseNodeBitWidth;
8009 std::tie(Invalid, BaseNodeBitWidth) =
8010 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8011
8012 // If the base node is invalid in itself, then we've already printed all the
8013 // errors we wanted to print.
8014 if (Invalid)
8015 return false;
8016
8017 SeenAccessTypeInPath |= BaseNode == AccessType;
8018
8019 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8020 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8021 MD, &Offset);
8022
8023 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8024 (BaseNodeBitWidth == 0 && Offset == 0) ||
8025 (IsNewFormat && BaseNodeBitWidth == ~0u),
8026 "Access bit-width not the same as description bit-width", I, MD,
8027 BaseNodeBitWidth, Offset.getBitWidth());
8028
8029 if (IsNewFormat && SeenAccessTypeInPath)
8030 break;
8031 }
8032
8033 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8034 MD);
8035 return true;
8036}
8037
8038char VerifierLegacyPass::ID = 0;
8039INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8040
8042 return new VerifierLegacyPass(FatalErrors);
8043}
8044
8045AnalysisKey VerifierAnalysis::Key;
8052
8057
8059 auto Res = AM.getResult<VerifierAnalysis>(M);
8060 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8061 report_fatal_error("Broken module found, compilation aborted!");
8062
8063 return PreservedAnalyses::all();
8064}
8065
8067 auto res = AM.getResult<VerifierAnalysis>(F);
8068 if (res.IRBroken && FatalErrors)
8069 report_fatal_error("Broken function found, compilation aborted!");
8070
8071 return PreservedAnalyses::all();
8072}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:682
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:723
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
bool isFiniteNonZero() const
Definition APFloat.h:1459
bool isNegative() const
Definition APFloat.h:1449
const fltSemantics & getSemantics() const
Definition APFloat.h:1457
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1201
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:380
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:417
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:440
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1562
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:399
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:784
bool isIntPredicate() const
Definition InstrTypes.h:785
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:778
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:226
bool isNegative() const
Definition Constants.h:209
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:214
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1072
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1059
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1062
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1065
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI Function * getFunction()
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:291
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:167
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:222
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:831
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:665
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:328
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
bool isTemporary() const
Definition Metadata.h:1262
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
bool isDistinct() const
Definition Metadata.h:1261
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1258
LLVMContext & getContext() const
Definition Metadata.h:1242
bool equalsStr(StringRef Str) const
Definition Metadata.h:922
Metadata * get() const
Definition Metadata.h:929
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:618
static LLVM_ABI bool isTagMD(const Metadata *MD)
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:112
Metadata * getMetadata() const
Definition Metadata.h:201
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
static constexpr size_t npos
Definition StringRef.h:57
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:719
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:441
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:75
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:91
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:62
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1058
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:154
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:498
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:812
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:156
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:240
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:241
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:39
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:811
@ DW_MACINFO_start_file
Definition Dwarf.h:812
@ DW_MACINFO_define
Definition Dwarf.h:810
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:708
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1727
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:841
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2474
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2138
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:293
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:677
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:754
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:288
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1624
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:24
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1899
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:304
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:156
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:150
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:297
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:152
LLVMContext & Context
Definition Verifier.cpp:147
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:154
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:286
const Module & M
Definition Verifier.cpp:143
const DataLayout & DL
Definition Verifier.cpp:146
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:313
const Triple & TT
Definition Verifier.cpp:145
ModuleSlotTracker MST
Definition Verifier.cpp:144