LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
139namespace llvm {
140
143 const Module &M;
145 const Triple &TT;
148
149 /// Track the brokenness of the module while recursively visiting.
150 bool Broken = false;
151 /// Broken debug info can be "recovered" from by stripping the debug info.
152 bool BrokenDebugInfo = false;
153 /// Whether to treat broken debug info as an error.
155
157 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
158 Context(M.getContext()) {}
159
160private:
161 void Write(const Module *M) {
162 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
163 }
164
165 void Write(const Value *V) {
166 if (V)
167 Write(*V);
168 }
169
170 void Write(const Value &V) {
171 if (isa<Instruction>(V)) {
172 V.print(*OS, MST);
173 *OS << '\n';
174 } else {
175 V.printAsOperand(*OS, true, MST);
176 *OS << '\n';
177 }
178 }
179
180 void Write(const DbgRecord *DR) {
181 if (DR) {
182 DR->print(*OS, MST, false);
183 *OS << '\n';
184 }
185 }
186
188 switch (Type) {
190 *OS << "value";
191 break;
193 *OS << "declare";
194 break;
196 *OS << "assign";
197 break;
199 *OS << "end";
200 break;
202 *OS << "any";
203 break;
204 };
205 }
206
207 void Write(const Metadata *MD) {
208 if (!MD)
209 return;
210 MD->print(*OS, MST, &M);
211 *OS << '\n';
212 }
213
214 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
215 Write(MD.get());
216 }
217
218 void Write(const NamedMDNode *NMD) {
219 if (!NMD)
220 return;
221 NMD->print(*OS, MST);
222 *OS << '\n';
223 }
224
225 void Write(Type *T) {
226 if (!T)
227 return;
228 *OS << ' ' << *T;
229 }
230
231 void Write(const Comdat *C) {
232 if (!C)
233 return;
234 *OS << *C;
235 }
236
237 void Write(const APInt *AI) {
238 if (!AI)
239 return;
240 *OS << *AI << '\n';
241 }
242
243 void Write(const unsigned i) { *OS << i << '\n'; }
244
245 // NOLINTNEXTLINE(readability-identifier-naming)
246 void Write(const Attribute *A) {
247 if (!A)
248 return;
249 *OS << A->getAsString() << '\n';
250 }
251
252 // NOLINTNEXTLINE(readability-identifier-naming)
253 void Write(const AttributeSet *AS) {
254 if (!AS)
255 return;
256 *OS << AS->getAsString() << '\n';
257 }
258
259 // NOLINTNEXTLINE(readability-identifier-naming)
260 void Write(const AttributeList *AL) {
261 if (!AL)
262 return;
263 AL->print(*OS);
264 }
265
266 void Write(Printable P) { *OS << P << '\n'; }
267
268 template <typename T> void Write(ArrayRef<T> Vs) {
269 for (const T &V : Vs)
270 Write(V);
271 }
272
273 template <typename T1, typename... Ts>
274 void WriteTs(const T1 &V1, const Ts &... Vs) {
275 Write(V1);
276 WriteTs(Vs...);
277 }
278
279 template <typename... Ts> void WriteTs() {}
280
281public:
282 /// A check failed, so printout out the condition and the message.
283 ///
284 /// This provides a nice place to put a breakpoint if you want to see why
285 /// something is not correct.
286 void CheckFailed(const Twine &Message) {
287 if (OS)
288 *OS << Message << '\n';
289 Broken = true;
290 }
291
292 /// A check failed (with values to print).
293 ///
294 /// This calls the Message-only version so that the above is easier to set a
295 /// breakpoint on.
296 template <typename T1, typename... Ts>
297 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
298 CheckFailed(Message);
299 if (OS)
300 WriteTs(V1, Vs...);
301 }
302
303 /// A debug info check failed.
304 void DebugInfoCheckFailed(const Twine &Message) {
305 if (OS)
306 *OS << Message << '\n';
308 BrokenDebugInfo = true;
309 }
310
311 /// A debug info check failed (with values to print).
312 template <typename T1, typename... Ts>
313 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
314 const Ts &... Vs) {
315 DebugInfoCheckFailed(Message);
316 if (OS)
317 WriteTs(V1, Vs...);
318 }
319};
320
321} // namespace llvm
322
323namespace {
324
325class Verifier : public InstVisitor<Verifier>, VerifierSupport {
326 friend class InstVisitor<Verifier>;
327 DominatorTree DT;
328
329 /// When verifying a basic block, keep track of all of the
330 /// instructions we have seen so far.
331 ///
332 /// This allows us to do efficient dominance checks for the case when an
333 /// instruction has an operand that is an instruction in the same block.
334 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
335
336 /// Keep track of the metadata nodes that have been checked already.
337 SmallPtrSet<const Metadata *, 32> MDNodes;
338
339 /// Keep track which DISubprogram is attached to which function.
340 DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
341
342 /// Track all DICompileUnits visited.
343 SmallPtrSet<const Metadata *, 2> CUVisited;
344
345 /// The result type for a landingpad.
346 Type *LandingPadResultTy;
347
348 /// Whether we've seen a call to @llvm.localescape in this function
349 /// already.
350 bool SawFrameEscape;
351
352 /// Whether the current function has a DISubprogram attached to it.
353 bool HasDebugInfo = false;
354
355 /// Stores the count of how many objects were passed to llvm.localescape for a
356 /// given function and the largest index passed to llvm.localrecover.
357 DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
358
359 // Maps catchswitches and cleanuppads that unwind to siblings to the
360 // terminators that indicate the unwind, used to detect cycles therein.
361 MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
362
363 /// Cache which blocks are in which funclet, if an EH funclet personality is
364 /// in use. Otherwise empty.
365 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
366
367 /// Cache of constants visited in search of ConstantExprs.
368 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
369
370 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
371 SmallVector<const Function *, 4> DeoptimizeDeclarations;
372
373 /// Cache of attribute lists verified.
374 SmallPtrSet<const void *, 32> AttributeListsVisited;
375
376 // Verify that this GlobalValue is only used in this module.
377 // This map is used to avoid visiting uses twice. We can arrive at a user
378 // twice, if they have multiple operands. In particular for very large
379 // constant expressions, we can arrive at a particular user many times.
380 SmallPtrSet<const Value *, 32> GlobalValueVisited;
381
382 // Keeps track of duplicate function argument debug info.
384
385 TBAAVerifier TBAAVerifyHelper;
386 ConvergenceVerifier ConvergenceVerifyHelper;
387
388 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
389
390 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
391
392public:
393 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
394 const Module &M)
395 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
396 SawFrameEscape(false), TBAAVerifyHelper(this) {
397 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
398 }
399
400 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
401
402 bool verify(const Function &F) {
403 llvm::TimeTraceScope timeScope("Verifier");
404 assert(F.getParent() == &M &&
405 "An instance of this class only works with a specific module!");
406
407 // First ensure the function is well-enough formed to compute dominance
408 // information, and directly compute a dominance tree. We don't rely on the
409 // pass manager to provide this as it isolates us from a potentially
410 // out-of-date dominator tree and makes it significantly more complex to run
411 // this code outside of a pass manager.
412 // FIXME: It's really gross that we have to cast away constness here.
413 if (!F.empty())
414 DT.recalculate(const_cast<Function &>(F));
415
416 for (const BasicBlock &BB : F) {
417 if (!BB.empty() && BB.back().isTerminator())
418 continue;
419
420 if (OS) {
421 *OS << "Basic Block in function '" << F.getName()
422 << "' does not have terminator!\n";
423 BB.printAsOperand(*OS, true, MST);
424 *OS << "\n";
425 }
426 return false;
427 }
428
429 auto FailureCB = [this](const Twine &Message) {
430 this->CheckFailed(Message);
431 };
432 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
433
434 Broken = false;
435 // FIXME: We strip const here because the inst visitor strips const.
436 visit(const_cast<Function &>(F));
437 verifySiblingFuncletUnwinds();
438
439 if (ConvergenceVerifyHelper.sawTokens())
440 ConvergenceVerifyHelper.verify(DT);
441
442 InstsInThisBlock.clear();
443 DebugFnArgs.clear();
444 LandingPadResultTy = nullptr;
445 SawFrameEscape = false;
446 SiblingFuncletInfo.clear();
447 verifyNoAliasScopeDecl();
448 NoAliasScopeDecls.clear();
449
450 return !Broken;
451 }
452
453 /// Verify the module that this instance of \c Verifier was initialized with.
454 bool verify() {
455 Broken = false;
456
457 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
458 for (const Function &F : M)
459 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
460 DeoptimizeDeclarations.push_back(&F);
461
462 // Now that we've visited every function, verify that we never asked to
463 // recover a frame index that wasn't escaped.
464 verifyFrameRecoverIndices();
465 for (const GlobalVariable &GV : M.globals())
466 visitGlobalVariable(GV);
467
468 for (const GlobalAlias &GA : M.aliases())
469 visitGlobalAlias(GA);
470
471 for (const GlobalIFunc &GI : M.ifuncs())
472 visitGlobalIFunc(GI);
473
474 for (const NamedMDNode &NMD : M.named_metadata())
475 visitNamedMDNode(NMD);
476
477 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
478 visitComdat(SMEC.getValue());
479
480 visitModuleFlags();
481 visitModuleIdents();
482 visitModuleCommandLines();
483
484 verifyCompileUnits();
485
486 verifyDeoptimizeCallingConvs();
487 DISubprogramAttachments.clear();
488 return !Broken;
489 }
490
491private:
492 /// Whether a metadata node is allowed to be, or contain, a DILocation.
493 enum class AreDebugLocsAllowed { No, Yes };
494
495 /// Metadata that should be treated as a range, with slightly different
496 /// requirements.
497 enum class RangeLikeMetadataKind {
498 Range, // MD_range
499 AbsoluteSymbol, // MD_absolute_symbol
500 NoaliasAddrspace // MD_noalias_addrspace
501 };
502
503 // Verification methods...
504 void visitGlobalValue(const GlobalValue &GV);
505 void visitGlobalVariable(const GlobalVariable &GV);
506 void visitGlobalAlias(const GlobalAlias &GA);
507 void visitGlobalIFunc(const GlobalIFunc &GI);
508 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
509 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
510 const GlobalAlias &A, const Constant &C);
511 void visitNamedMDNode(const NamedMDNode &NMD);
512 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
513 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
514 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
515 void visitDIArgList(const DIArgList &AL, Function *F);
516 void visitComdat(const Comdat &C);
517 void visitModuleIdents();
518 void visitModuleCommandLines();
519 void visitModuleFlags();
520 void visitModuleFlag(const MDNode *Op,
521 DenseMap<const MDString *, const MDNode *> &SeenIDs,
522 SmallVectorImpl<const MDNode *> &Requirements);
523 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
524 void visitFunction(const Function &F);
525 void visitBasicBlock(BasicBlock &BB);
526 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
527 RangeLikeMetadataKind Kind);
528 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
529 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
530 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
531 void visitNofreeMetadata(Instruction &I, MDNode *MD);
532 void visitProfMetadata(Instruction &I, MDNode *MD);
533 void visitCallStackMetadata(MDNode *MD);
534 void visitMemProfMetadata(Instruction &I, MDNode *MD);
535 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
536 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
537 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
538 void visitMMRAMetadata(Instruction &I, MDNode *MD);
539 void visitAnnotationMetadata(MDNode *Annotation);
540 void visitAliasScopeMetadata(const MDNode *MD);
541 void visitAliasScopeListMetadata(const MDNode *MD);
542 void visitAccessGroupMetadata(const MDNode *MD);
543
544 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
545#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
546#include "llvm/IR/Metadata.def"
547 void visitDIScope(const DIScope &N);
548 void visitDIVariable(const DIVariable &N);
549 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
550 void visitDITemplateParameter(const DITemplateParameter &N);
551
552 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
553
554 void visit(DbgLabelRecord &DLR);
555 void visit(DbgVariableRecord &DVR);
556 // InstVisitor overrides...
557 using InstVisitor<Verifier>::visit;
558 void visitDbgRecords(Instruction &I);
559 void visit(Instruction &I);
560
561 void visitTruncInst(TruncInst &I);
562 void visitZExtInst(ZExtInst &I);
563 void visitSExtInst(SExtInst &I);
564 void visitFPTruncInst(FPTruncInst &I);
565 void visitFPExtInst(FPExtInst &I);
566 void visitFPToUIInst(FPToUIInst &I);
567 void visitFPToSIInst(FPToSIInst &I);
568 void visitUIToFPInst(UIToFPInst &I);
569 void visitSIToFPInst(SIToFPInst &I);
570 void visitIntToPtrInst(IntToPtrInst &I);
571 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
572 void visitPtrToAddrInst(PtrToAddrInst &I);
573 void visitPtrToIntInst(PtrToIntInst &I);
574 void visitBitCastInst(BitCastInst &I);
575 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
576 void visitPHINode(PHINode &PN);
577 void visitCallBase(CallBase &Call);
578 void visitUnaryOperator(UnaryOperator &U);
579 void visitBinaryOperator(BinaryOperator &B);
580 void visitICmpInst(ICmpInst &IC);
581 void visitFCmpInst(FCmpInst &FC);
582 void visitExtractElementInst(ExtractElementInst &EI);
583 void visitInsertElementInst(InsertElementInst &EI);
584 void visitShuffleVectorInst(ShuffleVectorInst &EI);
585 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
586 void visitCallInst(CallInst &CI);
587 void visitInvokeInst(InvokeInst &II);
588 void visitGetElementPtrInst(GetElementPtrInst &GEP);
589 void visitLoadInst(LoadInst &LI);
590 void visitStoreInst(StoreInst &SI);
591 void verifyDominatesUse(Instruction &I, unsigned i);
592 void visitInstruction(Instruction &I);
593 void visitTerminator(Instruction &I);
594 void visitBranchInst(BranchInst &BI);
595 void visitReturnInst(ReturnInst &RI);
596 void visitSwitchInst(SwitchInst &SI);
597 void visitIndirectBrInst(IndirectBrInst &BI);
598 void visitCallBrInst(CallBrInst &CBI);
599 void visitSelectInst(SelectInst &SI);
600 void visitUserOp1(Instruction &I);
601 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
602 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
603 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
604 void visitVPIntrinsic(VPIntrinsic &VPI);
605 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
606 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
607 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
608 void visitFenceInst(FenceInst &FI);
609 void visitAllocaInst(AllocaInst &AI);
610 void visitExtractValueInst(ExtractValueInst &EVI);
611 void visitInsertValueInst(InsertValueInst &IVI);
612 void visitEHPadPredecessors(Instruction &I);
613 void visitLandingPadInst(LandingPadInst &LPI);
614 void visitResumeInst(ResumeInst &RI);
615 void visitCatchPadInst(CatchPadInst &CPI);
616 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
617 void visitCleanupPadInst(CleanupPadInst &CPI);
618 void visitFuncletPadInst(FuncletPadInst &FPI);
619 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
620 void visitCleanupReturnInst(CleanupReturnInst &CRI);
621
622 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
623 void verifySwiftErrorValue(const Value *SwiftErrorVal);
624 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
625 void verifyMustTailCall(CallInst &CI);
626 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
627 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
628 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
629 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
630 const Value *V);
631 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
632 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
633 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
634
635 void visitConstantExprsRecursively(const Constant *EntryC);
636 void visitConstantExpr(const ConstantExpr *CE);
637 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
638 void verifyInlineAsmCall(const CallBase &Call);
639 void verifyStatepoint(const CallBase &Call);
640 void verifyFrameRecoverIndices();
641 void verifySiblingFuncletUnwinds();
642
643 void verifyFragmentExpression(const DbgVariableRecord &I);
644 template <typename ValueOrMetadata>
645 void verifyFragmentExpression(const DIVariable &V,
647 ValueOrMetadata *Desc);
648 void verifyFnArgs(const DbgVariableRecord &DVR);
649 void verifyNotEntryValue(const DbgVariableRecord &I);
650
651 /// Module-level debug info verification...
652 void verifyCompileUnits();
653
654 /// Module-level verification that all @llvm.experimental.deoptimize
655 /// declarations share the same calling convention.
656 void verifyDeoptimizeCallingConvs();
657
658 void verifyAttachedCallBundle(const CallBase &Call,
659 const OperandBundleUse &BU);
660
661 /// Verify the llvm.experimental.noalias.scope.decl declarations
662 void verifyNoAliasScopeDecl();
663};
664
665} // end anonymous namespace
666
667/// We know that cond should be true, if not print an error message.
668#define Check(C, ...) \
669 do { \
670 if (!(C)) { \
671 CheckFailed(__VA_ARGS__); \
672 return; \
673 } \
674 } while (false)
675
676/// We know that a debug info condition should be true, if not print
677/// an error message.
678#define CheckDI(C, ...) \
679 do { \
680 if (!(C)) { \
681 DebugInfoCheckFailed(__VA_ARGS__); \
682 return; \
683 } \
684 } while (false)
685
686void Verifier::visitDbgRecords(Instruction &I) {
687 if (!I.DebugMarker)
688 return;
689 CheckDI(I.DebugMarker->MarkedInstr == &I,
690 "Instruction has invalid DebugMarker", &I);
691 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
692 "PHI Node must not have any attached DbgRecords", &I);
693 for (DbgRecord &DR : I.getDbgRecordRange()) {
694 CheckDI(DR.getMarker() == I.DebugMarker,
695 "DbgRecord had invalid DebugMarker", &I, &DR);
696 if (auto *Loc =
698 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
699 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
700 visit(*DVR);
701 // These have to appear after `visit` for consistency with existing
702 // intrinsic behaviour.
703 verifyFragmentExpression(*DVR);
704 verifyNotEntryValue(*DVR);
705 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
706 visit(*DLR);
707 }
708 }
709}
710
711void Verifier::visit(Instruction &I) {
712 visitDbgRecords(I);
713 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
714 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
716}
717
718// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
719static void forEachUser(const Value *User,
721 llvm::function_ref<bool(const Value *)> Callback) {
722 if (!Visited.insert(User).second)
723 return;
724
726 while (!WorkList.empty()) {
727 const Value *Cur = WorkList.pop_back_val();
728 if (!Visited.insert(Cur).second)
729 continue;
730 if (Callback(Cur))
731 append_range(WorkList, Cur->materialized_users());
732 }
733}
734
735void Verifier::visitGlobalValue(const GlobalValue &GV) {
737 "Global is external, but doesn't have external or weak linkage!", &GV);
738
739 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
740 if (const MDNode *Associated =
741 GO->getMetadata(LLVMContext::MD_associated)) {
742 Check(Associated->getNumOperands() == 1,
743 "associated metadata must have one operand", &GV, Associated);
744 const Metadata *Op = Associated->getOperand(0).get();
745 Check(Op, "associated metadata must have a global value", GO, Associated);
746
747 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
748 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
749 if (VM) {
750 Check(isa<PointerType>(VM->getValue()->getType()),
751 "associated value must be pointer typed", GV, Associated);
752
753 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
754 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
755 "associated metadata must point to a GlobalObject", GO, Stripped);
756 Check(Stripped != GO,
757 "global values should not associate to themselves", GO,
758 Associated);
759 }
760 }
761
762 // FIXME: Why is getMetadata on GlobalValue protected?
763 if (const MDNode *AbsoluteSymbol =
764 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
765 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
766 DL.getIntPtrType(GO->getType()),
767 RangeLikeMetadataKind::AbsoluteSymbol);
768 }
769 }
770
772 "Only global variables can have appending linkage!", &GV);
773
774 if (GV.hasAppendingLinkage()) {
775 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
776 Check(GVar && GVar->getValueType()->isArrayTy(),
777 "Only global arrays can have appending linkage!", GVar);
778 }
779
780 if (GV.isDeclarationForLinker())
781 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
782
783 if (GV.hasDLLExportStorageClass()) {
785 "dllexport GlobalValue must have default or protected visibility",
786 &GV);
787 }
788 if (GV.hasDLLImportStorageClass()) {
790 "dllimport GlobalValue must have default visibility", &GV);
791 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
792 &GV);
793
794 Check((GV.isDeclaration() &&
797 "Global is marked as dllimport, but not external", &GV);
798 }
799
800 if (GV.isImplicitDSOLocal())
801 Check(GV.isDSOLocal(),
802 "GlobalValue with local linkage or non-default "
803 "visibility must be dso_local!",
804 &GV);
805
806 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
807 if (const Instruction *I = dyn_cast<Instruction>(V)) {
808 if (!I->getParent() || !I->getParent()->getParent())
809 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
810 I);
811 else if (I->getParent()->getParent()->getParent() != &M)
812 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
813 I->getParent()->getParent(),
814 I->getParent()->getParent()->getParent());
815 return false;
816 } else if (const Function *F = dyn_cast<Function>(V)) {
817 if (F->getParent() != &M)
818 CheckFailed("Global is used by function in a different module", &GV, &M,
819 F, F->getParent());
820 return false;
821 }
822 return true;
823 });
824}
825
826void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
827 Type *GVType = GV.getValueType();
828
829 if (MaybeAlign A = GV.getAlign()) {
830 Check(A->value() <= Value::MaximumAlignment,
831 "huge alignment values are unsupported", &GV);
832 }
833
834 if (GV.hasInitializer()) {
835 Check(GV.getInitializer()->getType() == GVType,
836 "Global variable initializer type does not match global "
837 "variable type!",
838 &GV);
840 "Global variable initializer must be sized", &GV);
841 visitConstantExprsRecursively(GV.getInitializer());
842 // If the global has common linkage, it must have a zero initializer and
843 // cannot be constant.
844 if (GV.hasCommonLinkage()) {
846 "'common' global must have a zero initializer!", &GV);
847 Check(!GV.isConstant(), "'common' global may not be marked constant!",
848 &GV);
849 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
850 }
851 }
852
853 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
854 GV.getName() == "llvm.global_dtors")) {
856 "invalid linkage for intrinsic global variable", &GV);
858 "invalid uses of intrinsic global variable", &GV);
859
860 // Don't worry about emitting an error for it not being an array,
861 // visitGlobalValue will complain on appending non-array.
862 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
863 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
864 PointerType *FuncPtrTy =
865 PointerType::get(Context, DL.getProgramAddressSpace());
866 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
867 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
868 STy->getTypeAtIndex(1) == FuncPtrTy,
869 "wrong type for intrinsic global variable", &GV);
870 Check(STy->getNumElements() == 3,
871 "the third field of the element type is mandatory, "
872 "specify ptr null to migrate from the obsoleted 2-field form");
873 Type *ETy = STy->getTypeAtIndex(2);
874 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
875 &GV);
876 }
877 }
878
879 if (GV.hasName() && (GV.getName() == "llvm.used" ||
880 GV.getName() == "llvm.compiler.used")) {
882 "invalid linkage for intrinsic global variable", &GV);
884 "invalid uses of intrinsic global variable", &GV);
885
886 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
887 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
888 Check(PTy, "wrong type for intrinsic global variable", &GV);
889 if (GV.hasInitializer()) {
890 const Constant *Init = GV.getInitializer();
891 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
892 Check(InitArray, "wrong initalizer for intrinsic global variable",
893 Init);
894 for (Value *Op : InitArray->operands()) {
895 Value *V = Op->stripPointerCasts();
898 Twine("invalid ") + GV.getName() + " member", V);
899 Check(V->hasName(),
900 Twine("members of ") + GV.getName() + " must be named", V);
901 }
902 }
903 }
904 }
905
906 // Visit any debug info attachments.
908 GV.getMetadata(LLVMContext::MD_dbg, MDs);
909 for (auto *MD : MDs) {
910 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
911 visitDIGlobalVariableExpression(*GVE);
912 else
913 CheckDI(false, "!dbg attachment of global variable must be a "
914 "DIGlobalVariableExpression");
915 }
916
917 // Scalable vectors cannot be global variables, since we don't know
918 // the runtime size.
919 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
920
921 // Check if it is or contains a target extension type that disallows being
922 // used as a global.
924 "Global @" + GV.getName() + " has illegal target extension type",
925 GVType);
926
927 if (!GV.hasInitializer()) {
928 visitGlobalValue(GV);
929 return;
930 }
931
932 // Walk any aggregate initializers looking for bitcasts between address spaces
933 visitConstantExprsRecursively(GV.getInitializer());
934
935 visitGlobalValue(GV);
936}
937
938void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
939 SmallPtrSet<const GlobalAlias*, 4> Visited;
940 Visited.insert(&GA);
941 visitAliaseeSubExpr(Visited, GA, C);
942}
943
944void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
945 const GlobalAlias &GA, const Constant &C) {
948 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
949 "available_externally alias must point to available_externally "
950 "global value",
951 &GA);
952 }
953 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
955 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
956 &GA);
957 }
958
959 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
960 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
961
962 Check(!GA2->isInterposable(),
963 "Alias cannot point to an interposable alias", &GA);
964 } else {
965 // Only continue verifying subexpressions of GlobalAliases.
966 // Do not recurse into global initializers.
967 return;
968 }
969 }
970
971 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
972 visitConstantExprsRecursively(CE);
973
974 for (const Use &U : C.operands()) {
975 Value *V = &*U;
976 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
977 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
978 else if (const auto *C2 = dyn_cast<Constant>(V))
979 visitAliaseeSubExpr(Visited, GA, *C2);
980 }
981}
982
983void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
985 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
986 "weak_odr, external, or available_externally linkage!",
987 &GA);
988 const Constant *Aliasee = GA.getAliasee();
989 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
990 Check(GA.getType() == Aliasee->getType(),
991 "Alias and aliasee types should match!", &GA);
992
993 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
994 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
995
996 visitAliaseeSubExpr(GA, *Aliasee);
997
998 visitGlobalValue(GA);
999}
1000
1001void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1003 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1004 "weak_odr, or external linkage!",
1005 &GI);
1006 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1007 // is a Function definition.
1008 const Function *Resolver = GI.getResolverFunction();
1009 Check(Resolver, "IFunc must have a Function resolver", &GI);
1010 Check(!Resolver->isDeclarationForLinker(),
1011 "IFunc resolver must be a definition", &GI);
1012
1013 // Check that the immediate resolver operand (prior to any bitcasts) has the
1014 // correct type.
1015 const Type *ResolverTy = GI.getResolver()->getType();
1016
1018 "IFunc resolver must return a pointer", &GI);
1019
1020 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1021 "IFunc resolver has incorrect type", &GI);
1022}
1023
1024void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1025 // There used to be various other llvm.dbg.* nodes, but we don't support
1026 // upgrading them and we want to reserve the namespace for future uses.
1027 if (NMD.getName().starts_with("llvm.dbg."))
1028 CheckDI(NMD.getName() == "llvm.dbg.cu",
1029 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1030 for (const MDNode *MD : NMD.operands()) {
1031 if (NMD.getName() == "llvm.dbg.cu")
1032 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1033
1034 if (!MD)
1035 continue;
1036
1037 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1038 }
1039}
1040
1041void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1042 // Only visit each node once. Metadata can be mutually recursive, so this
1043 // avoids infinite recursion here, as well as being an optimization.
1044 if (!MDNodes.insert(&MD).second)
1045 return;
1046
1047 Check(&MD.getContext() == &Context,
1048 "MDNode context does not match Module context!", &MD);
1049
1050 switch (MD.getMetadataID()) {
1051 default:
1052 llvm_unreachable("Invalid MDNode subclass");
1053 case Metadata::MDTupleKind:
1054 break;
1055#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1056 case Metadata::CLASS##Kind: \
1057 visit##CLASS(cast<CLASS>(MD)); \
1058 break;
1059#include "llvm/IR/Metadata.def"
1060 }
1061
1062 for (const Metadata *Op : MD.operands()) {
1063 if (!Op)
1064 continue;
1065 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1066 &MD, Op);
1067 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1068 "DILocation not allowed within this metadata node", &MD, Op);
1069 if (auto *N = dyn_cast<MDNode>(Op)) {
1070 visitMDNode(*N, AllowLocs);
1071 continue;
1072 }
1073 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1074 visitValueAsMetadata(*V, nullptr);
1075 continue;
1076 }
1077 }
1078
1079 // Check these last, so we diagnose problems in operands first.
1080 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1081 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1082}
1083
1084void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1085 Check(MD.getValue(), "Expected valid value", &MD);
1086 Check(!MD.getValue()->getType()->isMetadataTy(),
1087 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1088
1089 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1090 if (!L)
1091 return;
1092
1093 Check(F, "function-local metadata used outside a function", L);
1094
1095 // If this was an instruction, bb, or argument, verify that it is in the
1096 // function that we expect.
1097 Function *ActualF = nullptr;
1098 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1099 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1100 ActualF = I->getParent()->getParent();
1101 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1102 ActualF = BB->getParent();
1103 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1104 ActualF = A->getParent();
1105 assert(ActualF && "Unimplemented function local metadata case!");
1106
1107 Check(ActualF == F, "function-local metadata used in wrong function", L);
1108}
1109
1110void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1111 for (const ValueAsMetadata *VAM : AL.getArgs())
1112 visitValueAsMetadata(*VAM, F);
1113}
1114
1115void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1116 Metadata *MD = MDV.getMetadata();
1117 if (auto *N = dyn_cast<MDNode>(MD)) {
1118 visitMDNode(*N, AreDebugLocsAllowed::No);
1119 return;
1120 }
1121
1122 // Only visit each node once. Metadata can be mutually recursive, so this
1123 // avoids infinite recursion here, as well as being an optimization.
1124 if (!MDNodes.insert(MD).second)
1125 return;
1126
1127 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1128 visitValueAsMetadata(*V, F);
1129
1130 if (auto *AL = dyn_cast<DIArgList>(MD))
1131 visitDIArgList(*AL, F);
1132}
1133
1134static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1135static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1136static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1137
1138void Verifier::visitDILocation(const DILocation &N) {
1139 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1140 "location requires a valid scope", &N, N.getRawScope());
1141 if (auto *IA = N.getRawInlinedAt())
1142 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1143 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1144 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1145}
1146
1147void Verifier::visitGenericDINode(const GenericDINode &N) {
1148 CheckDI(N.getTag(), "invalid tag", &N);
1149}
1150
1151void Verifier::visitDIScope(const DIScope &N) {
1152 if (auto *F = N.getRawFile())
1153 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1154}
1155
1156void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1157 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1158 auto *BaseType = N.getRawBaseType();
1159 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1160 auto *LBound = N.getRawLowerBound();
1161 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1162 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1163 "LowerBound must be signed constant or DIVariable or DIExpression",
1164 &N);
1165 auto *UBound = N.getRawUpperBound();
1166 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1167 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1168 "UpperBound must be signed constant or DIVariable or DIExpression",
1169 &N);
1170 auto *Stride = N.getRawStride();
1171 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1172 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1173 "Stride must be signed constant or DIVariable or DIExpression", &N);
1174 auto *Bias = N.getRawBias();
1175 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1176 isa<DIExpression>(Bias),
1177 "Bias must be signed constant or DIVariable or DIExpression", &N);
1178 // Subrange types currently only support constant size.
1179 auto *Size = N.getRawSizeInBits();
1181 "SizeInBits must be a constant");
1182}
1183
1184void Verifier::visitDISubrange(const DISubrange &N) {
1185 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1186 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1187 "Subrange can have any one of count or upperBound", &N);
1188 auto *CBound = N.getRawCountNode();
1189 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1190 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1191 "Count must be signed constant or DIVariable or DIExpression", &N);
1192 auto Count = N.getCount();
1194 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1195 "invalid subrange count", &N);
1196 auto *LBound = N.getRawLowerBound();
1197 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1198 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1199 "LowerBound must be signed constant or DIVariable or DIExpression",
1200 &N);
1201 auto *UBound = N.getRawUpperBound();
1202 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1203 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1204 "UpperBound must be signed constant or DIVariable or DIExpression",
1205 &N);
1206 auto *Stride = N.getRawStride();
1207 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1208 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1209 "Stride must be signed constant or DIVariable or DIExpression", &N);
1210}
1211
1212void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1213 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1214 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1215 "GenericSubrange can have any one of count or upperBound", &N);
1216 auto *CBound = N.getRawCountNode();
1217 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1218 "Count must be signed constant or DIVariable or DIExpression", &N);
1219 auto *LBound = N.getRawLowerBound();
1220 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1221 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1222 "LowerBound must be signed constant or DIVariable or DIExpression",
1223 &N);
1224 auto *UBound = N.getRawUpperBound();
1225 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1226 "UpperBound must be signed constant or DIVariable or DIExpression",
1227 &N);
1228 auto *Stride = N.getRawStride();
1229 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1230 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1231 "Stride must be signed constant or DIVariable or DIExpression", &N);
1232}
1233
1234void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1235 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1236}
1237
1238void Verifier::visitDIBasicType(const DIBasicType &N) {
1239 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1240 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1241 N.getTag() == dwarf::DW_TAG_string_type,
1242 "invalid tag", &N);
1243 // Basic types currently only support constant size.
1244 auto *Size = N.getRawSizeInBits();
1246 "SizeInBits must be a constant");
1247}
1248
1249void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1250 visitDIBasicType(N);
1251
1252 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1253 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1254 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1255 "invalid encoding", &N);
1259 "invalid kind", &N);
1261 N.getFactorRaw() == 0,
1262 "factor should be 0 for rationals", &N);
1264 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1265 "numerator and denominator should be 0 for non-rationals", &N);
1266}
1267
1268void Verifier::visitDIStringType(const DIStringType &N) {
1269 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1270 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1271 &N);
1272}
1273
1274void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1275 // Common scope checks.
1276 visitDIScope(N);
1277
1278 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1279 N.getTag() == dwarf::DW_TAG_pointer_type ||
1280 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1281 N.getTag() == dwarf::DW_TAG_reference_type ||
1282 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1283 N.getTag() == dwarf::DW_TAG_const_type ||
1284 N.getTag() == dwarf::DW_TAG_immutable_type ||
1285 N.getTag() == dwarf::DW_TAG_volatile_type ||
1286 N.getTag() == dwarf::DW_TAG_restrict_type ||
1287 N.getTag() == dwarf::DW_TAG_atomic_type ||
1288 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1289 N.getTag() == dwarf::DW_TAG_member ||
1290 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1291 N.getTag() == dwarf::DW_TAG_inheritance ||
1292 N.getTag() == dwarf::DW_TAG_friend ||
1293 N.getTag() == dwarf::DW_TAG_set_type ||
1294 N.getTag() == dwarf::DW_TAG_template_alias,
1295 "invalid tag", &N);
1296 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1297 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1298 N.getRawExtraData());
1299 }
1300
1301 if (N.getTag() == dwarf::DW_TAG_set_type) {
1302 if (auto *T = N.getRawBaseType()) {
1306 CheckDI(
1307 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1308 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1309 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1310 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1311 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1312 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1313 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1314 "invalid set base type", &N, T);
1315 }
1316 }
1317
1318 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1319 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1320 N.getRawBaseType());
1321
1322 if (N.getDWARFAddressSpace()) {
1323 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1324 N.getTag() == dwarf::DW_TAG_reference_type ||
1325 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1326 "DWARF address space only applies to pointer or reference types",
1327 &N);
1328 }
1329
1330 auto *Size = N.getRawSizeInBits();
1333 "SizeInBits must be a constant or DIVariable or DIExpression");
1334}
1335
1336/// Detect mutually exclusive flags.
1337static bool hasConflictingReferenceFlags(unsigned Flags) {
1338 return ((Flags & DINode::FlagLValueReference) &&
1339 (Flags & DINode::FlagRValueReference)) ||
1340 ((Flags & DINode::FlagTypePassByValue) &&
1341 (Flags & DINode::FlagTypePassByReference));
1342}
1343
1344void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1345 auto *Params = dyn_cast<MDTuple>(&RawParams);
1346 CheckDI(Params, "invalid template params", &N, &RawParams);
1347 for (Metadata *Op : Params->operands()) {
1348 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1349 &N, Params, Op);
1350 }
1351}
1352
1353void Verifier::visitDICompositeType(const DICompositeType &N) {
1354 // Common scope checks.
1355 visitDIScope(N);
1356
1357 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1358 N.getTag() == dwarf::DW_TAG_structure_type ||
1359 N.getTag() == dwarf::DW_TAG_union_type ||
1360 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1361 N.getTag() == dwarf::DW_TAG_class_type ||
1362 N.getTag() == dwarf::DW_TAG_variant_part ||
1363 N.getTag() == dwarf::DW_TAG_variant ||
1364 N.getTag() == dwarf::DW_TAG_namelist,
1365 "invalid tag", &N);
1366
1367 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1368 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1369 N.getRawBaseType());
1370
1371 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1372 "invalid composite elements", &N, N.getRawElements());
1373 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1374 N.getRawVTableHolder());
1376 "invalid reference flags", &N);
1377 unsigned DIBlockByRefStruct = 1 << 4;
1378 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1379 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1380 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1381 "DISubprogram contains null entry in `elements` field", &N);
1382
1383 if (N.isVector()) {
1384 const DINodeArray Elements = N.getElements();
1385 CheckDI(Elements.size() == 1 &&
1386 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1387 "invalid vector, expected one element of type subrange", &N);
1388 }
1389
1390 if (auto *Params = N.getRawTemplateParams())
1391 visitTemplateParams(N, *Params);
1392
1393 if (auto *D = N.getRawDiscriminator()) {
1394 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1395 "discriminator can only appear on variant part");
1396 }
1397
1398 if (N.getRawDataLocation()) {
1399 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1400 "dataLocation can only appear in array type");
1401 }
1402
1403 if (N.getRawAssociated()) {
1404 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1405 "associated can only appear in array type");
1406 }
1407
1408 if (N.getRawAllocated()) {
1409 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1410 "allocated can only appear in array type");
1411 }
1412
1413 if (N.getRawRank()) {
1414 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1415 "rank can only appear in array type");
1416 }
1417
1418 if (N.getTag() == dwarf::DW_TAG_array_type) {
1419 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1420 }
1421
1422 auto *Size = N.getRawSizeInBits();
1425 "SizeInBits must be a constant or DIVariable or DIExpression");
1426}
1427
1428void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1429 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1430 if (auto *Types = N.getRawTypeArray()) {
1431 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1432 for (Metadata *Ty : N.getTypeArray()->operands()) {
1433 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1434 }
1435 }
1437 "invalid reference flags", &N);
1438}
1439
1440void Verifier::visitDIFile(const DIFile &N) {
1441 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1442 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1443 if (Checksum) {
1444 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1445 "invalid checksum kind", &N);
1446 size_t Size;
1447 switch (Checksum->Kind) {
1448 case DIFile::CSK_MD5:
1449 Size = 32;
1450 break;
1451 case DIFile::CSK_SHA1:
1452 Size = 40;
1453 break;
1454 case DIFile::CSK_SHA256:
1455 Size = 64;
1456 break;
1457 }
1458 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1459 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1460 "invalid checksum", &N);
1461 }
1462}
1463
1464void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1465 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1466 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1467
1468 // Don't bother verifying the compilation directory or producer string
1469 // as those could be empty.
1470 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1471 N.getRawFile());
1472 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1473 N.getFile());
1474
1475 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1476 "invalid emission kind", &N);
1477
1478 if (auto *Array = N.getRawEnumTypes()) {
1479 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1480 for (Metadata *Op : N.getEnumTypes()->operands()) {
1482 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1483 "invalid enum type", &N, N.getEnumTypes(), Op);
1484 }
1485 }
1486 if (auto *Array = N.getRawRetainedTypes()) {
1487 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1488 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1489 CheckDI(
1490 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1491 !cast<DISubprogram>(Op)->isDefinition())),
1492 "invalid retained type", &N, Op);
1493 }
1494 }
1495 if (auto *Array = N.getRawGlobalVariables()) {
1496 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1497 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1499 "invalid global variable ref", &N, Op);
1500 }
1501 }
1502 if (auto *Array = N.getRawImportedEntities()) {
1503 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1504 for (Metadata *Op : N.getImportedEntities()->operands()) {
1505 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1506 &N, Op);
1507 }
1508 }
1509 if (auto *Array = N.getRawMacros()) {
1510 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1511 for (Metadata *Op : N.getMacros()->operands()) {
1512 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1513 }
1514 }
1515 CUVisited.insert(&N);
1516}
1517
1518void Verifier::visitDISubprogram(const DISubprogram &N) {
1519 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1520 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1521 if (auto *F = N.getRawFile())
1522 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1523 else
1524 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1525 if (auto *T = N.getRawType())
1526 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1527 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1528 N.getRawContainingType());
1529 if (auto *Params = N.getRawTemplateParams())
1530 visitTemplateParams(N, *Params);
1531 if (auto *S = N.getRawDeclaration())
1532 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1533 "invalid subprogram declaration", &N, S);
1534 if (auto *RawNode = N.getRawRetainedNodes()) {
1535 auto *Node = dyn_cast<MDTuple>(RawNode);
1536 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1537 for (Metadata *Op : Node->operands()) {
1540 "invalid retained nodes, expected DILocalVariable, DILabel or "
1541 "DIImportedEntity",
1542 &N, Node, Op);
1543 }
1544 }
1546 "invalid reference flags", &N);
1547
1548 auto *Unit = N.getRawUnit();
1549 if (N.isDefinition()) {
1550 // Subprogram definitions (not part of the type hierarchy).
1551 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1552 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1553 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1554 // There's no good way to cross the CU boundary to insert a nested
1555 // DISubprogram definition in one CU into a type defined in another CU.
1556 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1557 if (CT && CT->getRawIdentifier() &&
1558 M.getContext().isODRUniquingDebugTypes())
1559 CheckDI(N.getDeclaration(),
1560 "definition subprograms cannot be nested within DICompositeType "
1561 "when enabling ODR",
1562 &N);
1563 } else {
1564 // Subprogram declarations (part of the type hierarchy).
1565 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1566 CheckDI(!N.getRawDeclaration(),
1567 "subprogram declaration must not have a declaration field");
1568 }
1569
1570 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1571 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1572 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1573 for (Metadata *Op : ThrownTypes->operands())
1574 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1575 Op);
1576 }
1577
1578 if (N.areAllCallsDescribed())
1579 CheckDI(N.isDefinition(),
1580 "DIFlagAllCallsDescribed must be attached to a definition");
1581}
1582
1583void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1584 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1585 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1586 "invalid local scope", &N, N.getRawScope());
1587 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1588 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1589}
1590
1591void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1592 visitDILexicalBlockBase(N);
1593
1594 CheckDI(N.getLine() || !N.getColumn(),
1595 "cannot have column info without line info", &N);
1596}
1597
1598void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1599 visitDILexicalBlockBase(N);
1600}
1601
1602void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1603 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1604 if (auto *S = N.getRawScope())
1605 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1606 if (auto *S = N.getRawDecl())
1607 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1608}
1609
1610void Verifier::visitDINamespace(const DINamespace &N) {
1611 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1612 if (auto *S = N.getRawScope())
1613 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1614}
1615
1616void Verifier::visitDIMacro(const DIMacro &N) {
1617 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1618 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1619 "invalid macinfo type", &N);
1620 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1621 if (!N.getValue().empty()) {
1622 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1623 }
1624}
1625
1626void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1627 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1628 "invalid macinfo type", &N);
1629 if (auto *F = N.getRawFile())
1630 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1631
1632 if (auto *Array = N.getRawElements()) {
1633 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1634 for (Metadata *Op : N.getElements()->operands()) {
1635 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1636 }
1637 }
1638}
1639
1640void Verifier::visitDIModule(const DIModule &N) {
1641 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1642 CheckDI(!N.getName().empty(), "anonymous module", &N);
1643}
1644
1645void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1646 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1647}
1648
1649void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1650 visitDITemplateParameter(N);
1651
1652 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1653 &N);
1654}
1655
1656void Verifier::visitDITemplateValueParameter(
1657 const DITemplateValueParameter &N) {
1658 visitDITemplateParameter(N);
1659
1660 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1661 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1662 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1663 "invalid tag", &N);
1664}
1665
1666void Verifier::visitDIVariable(const DIVariable &N) {
1667 if (auto *S = N.getRawScope())
1668 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1669 if (auto *F = N.getRawFile())
1670 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1671}
1672
1673void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1674 // Checks common to all variables.
1675 visitDIVariable(N);
1676
1677 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1678 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1679 // Check only if the global variable is not an extern
1680 if (N.isDefinition())
1681 CheckDI(N.getType(), "missing global variable type", &N);
1682 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1684 "invalid static data member declaration", &N, Member);
1685 }
1686}
1687
1688void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1689 // Checks common to all variables.
1690 visitDIVariable(N);
1691
1692 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1693 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1694 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1695 "local variable requires a valid scope", &N, N.getRawScope());
1696 if (auto Ty = N.getType())
1697 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1698}
1699
1700void Verifier::visitDIAssignID(const DIAssignID &N) {
1701 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1702 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1703}
1704
1705void Verifier::visitDILabel(const DILabel &N) {
1706 if (auto *S = N.getRawScope())
1707 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1708 if (auto *F = N.getRawFile())
1709 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1710
1711 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1712 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1713 "label requires a valid scope", &N, N.getRawScope());
1714}
1715
1716void Verifier::visitDIExpression(const DIExpression &N) {
1717 CheckDI(N.isValid(), "invalid expression", &N);
1718}
1719
1720void Verifier::visitDIGlobalVariableExpression(
1721 const DIGlobalVariableExpression &GVE) {
1722 CheckDI(GVE.getVariable(), "missing variable");
1723 if (auto *Var = GVE.getVariable())
1724 visitDIGlobalVariable(*Var);
1725 if (auto *Expr = GVE.getExpression()) {
1726 visitDIExpression(*Expr);
1727 if (auto Fragment = Expr->getFragmentInfo())
1728 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1729 }
1730}
1731
1732void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1733 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1734 if (auto *T = N.getRawType())
1735 CheckDI(isType(T), "invalid type ref", &N, T);
1736 if (auto *F = N.getRawFile())
1737 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1738}
1739
1740void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1741 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1742 N.getTag() == dwarf::DW_TAG_imported_declaration,
1743 "invalid tag", &N);
1744 if (auto *S = N.getRawScope())
1745 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1746 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1747 N.getRawEntity());
1748}
1749
1750void Verifier::visitComdat(const Comdat &C) {
1751 // In COFF the Module is invalid if the GlobalValue has private linkage.
1752 // Entities with private linkage don't have entries in the symbol table.
1753 if (TT.isOSBinFormatCOFF())
1754 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1755 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1756 GV);
1757}
1758
1759void Verifier::visitModuleIdents() {
1760 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1761 if (!Idents)
1762 return;
1763
1764 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1765 // Scan each llvm.ident entry and make sure that this requirement is met.
1766 for (const MDNode *N : Idents->operands()) {
1767 Check(N->getNumOperands() == 1,
1768 "incorrect number of operands in llvm.ident metadata", N);
1769 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1770 ("invalid value for llvm.ident metadata entry operand"
1771 "(the operand should be a string)"),
1772 N->getOperand(0));
1773 }
1774}
1775
1776void Verifier::visitModuleCommandLines() {
1777 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1778 if (!CommandLines)
1779 return;
1780
1781 // llvm.commandline takes a list of metadata entry. Each entry has only one
1782 // string. Scan each llvm.commandline entry and make sure that this
1783 // requirement is met.
1784 for (const MDNode *N : CommandLines->operands()) {
1785 Check(N->getNumOperands() == 1,
1786 "incorrect number of operands in llvm.commandline metadata", N);
1787 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1788 ("invalid value for llvm.commandline metadata entry operand"
1789 "(the operand should be a string)"),
1790 N->getOperand(0));
1791 }
1792}
1793
1794void Verifier::visitModuleFlags() {
1795 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1796 if (!Flags) return;
1797
1798 // Scan each flag, and track the flags and requirements.
1799 DenseMap<const MDString*, const MDNode*> SeenIDs;
1800 SmallVector<const MDNode*, 16> Requirements;
1801 uint64_t PAuthABIPlatform = -1;
1802 uint64_t PAuthABIVersion = -1;
1803 for (const MDNode *MDN : Flags->operands()) {
1804 visitModuleFlag(MDN, SeenIDs, Requirements);
1805 if (MDN->getNumOperands() != 3)
1806 continue;
1807 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1808 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1809 if (const auto *PAP =
1811 PAuthABIPlatform = PAP->getZExtValue();
1812 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1813 if (const auto *PAV =
1815 PAuthABIVersion = PAV->getZExtValue();
1816 }
1817 }
1818 }
1819
1820 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1821 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1822 "'aarch64-elf-pauthabi-version' module flags must be present");
1823
1824 // Validate that the requirements in the module are valid.
1825 for (const MDNode *Requirement : Requirements) {
1826 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1827 const Metadata *ReqValue = Requirement->getOperand(1);
1828
1829 const MDNode *Op = SeenIDs.lookup(Flag);
1830 if (!Op) {
1831 CheckFailed("invalid requirement on flag, flag is not present in module",
1832 Flag);
1833 continue;
1834 }
1835
1836 if (Op->getOperand(2) != ReqValue) {
1837 CheckFailed(("invalid requirement on flag, "
1838 "flag does not have the required value"),
1839 Flag);
1840 continue;
1841 }
1842 }
1843}
1844
1845void
1846Verifier::visitModuleFlag(const MDNode *Op,
1847 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1848 SmallVectorImpl<const MDNode *> &Requirements) {
1849 // Each module flag should have three arguments, the merge behavior (a
1850 // constant int), the flag ID (an MDString), and the value.
1851 Check(Op->getNumOperands() == 3,
1852 "incorrect number of operands in module flag", Op);
1853 Module::ModFlagBehavior MFB;
1854 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1856 "invalid behavior operand in module flag (expected constant integer)",
1857 Op->getOperand(0));
1858 Check(false,
1859 "invalid behavior operand in module flag (unexpected constant)",
1860 Op->getOperand(0));
1861 }
1862 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1863 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1864 Op->getOperand(1));
1865
1866 // Check the values for behaviors with additional requirements.
1867 switch (MFB) {
1868 case Module::Error:
1869 case Module::Warning:
1870 case Module::Override:
1871 // These behavior types accept any value.
1872 break;
1873
1874 case Module::Min: {
1875 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1876 Check(V && V->getValue().isNonNegative(),
1877 "invalid value for 'min' module flag (expected constant non-negative "
1878 "integer)",
1879 Op->getOperand(2));
1880 break;
1881 }
1882
1883 case Module::Max: {
1885 "invalid value for 'max' module flag (expected constant integer)",
1886 Op->getOperand(2));
1887 break;
1888 }
1889
1890 case Module::Require: {
1891 // The value should itself be an MDNode with two operands, a flag ID (an
1892 // MDString), and a value.
1893 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1894 Check(Value && Value->getNumOperands() == 2,
1895 "invalid value for 'require' module flag (expected metadata pair)",
1896 Op->getOperand(2));
1897 Check(isa<MDString>(Value->getOperand(0)),
1898 ("invalid value for 'require' module flag "
1899 "(first value operand should be a string)"),
1900 Value->getOperand(0));
1901
1902 // Append it to the list of requirements, to check once all module flags are
1903 // scanned.
1904 Requirements.push_back(Value);
1905 break;
1906 }
1907
1908 case Module::Append:
1909 case Module::AppendUnique: {
1910 // These behavior types require the operand be an MDNode.
1911 Check(isa<MDNode>(Op->getOperand(2)),
1912 "invalid value for 'append'-type module flag "
1913 "(expected a metadata node)",
1914 Op->getOperand(2));
1915 break;
1916 }
1917 }
1918
1919 // Unless this is a "requires" flag, check the ID is unique.
1920 if (MFB != Module::Require) {
1921 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1922 Check(Inserted,
1923 "module flag identifiers must be unique (or of 'require' type)", ID);
1924 }
1925
1926 if (ID->getString() == "wchar_size") {
1927 ConstantInt *Value
1929 Check(Value, "wchar_size metadata requires constant integer argument");
1930 }
1931
1932 if (ID->getString() == "Linker Options") {
1933 // If the llvm.linker.options named metadata exists, we assume that the
1934 // bitcode reader has upgraded the module flag. Otherwise the flag might
1935 // have been created by a client directly.
1936 Check(M.getNamedMetadata("llvm.linker.options"),
1937 "'Linker Options' named metadata no longer supported");
1938 }
1939
1940 if (ID->getString() == "SemanticInterposition") {
1941 ConstantInt *Value =
1943 Check(Value,
1944 "SemanticInterposition metadata requires constant integer argument");
1945 }
1946
1947 if (ID->getString() == "CG Profile") {
1948 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1949 visitModuleFlagCGProfileEntry(MDO);
1950 }
1951}
1952
1953void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1954 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1955 if (!FuncMDO)
1956 return;
1957 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1958 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1959 "expected a Function or null", FuncMDO);
1960 };
1961 auto Node = dyn_cast_or_null<MDNode>(MDO);
1962 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1963 CheckFunction(Node->getOperand(0));
1964 CheckFunction(Node->getOperand(1));
1965 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1966 Check(Count && Count->getType()->isIntegerTy(),
1967 "expected an integer constant", Node->getOperand(2));
1968}
1969
1970void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
1971 for (Attribute A : Attrs) {
1972
1973 if (A.isStringAttribute()) {
1974#define GET_ATTR_NAMES
1975#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
1976#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
1977 if (A.getKindAsString() == #DISPLAY_NAME) { \
1978 auto V = A.getValueAsString(); \
1979 if (!(V.empty() || V == "true" || V == "false")) \
1980 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
1981 ""); \
1982 }
1983
1984#include "llvm/IR/Attributes.inc"
1985 continue;
1986 }
1987
1988 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
1989 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
1990 V);
1991 return;
1992 }
1993 }
1994}
1995
1996// VerifyParameterAttrs - Check the given attributes for an argument or return
1997// value of the specified type. The value V is printed in error messages.
1998void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1999 const Value *V) {
2000 if (!Attrs.hasAttributes())
2001 return;
2002
2003 verifyAttributeTypes(Attrs, V);
2004
2005 for (Attribute Attr : Attrs)
2006 Check(Attr.isStringAttribute() ||
2007 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2008 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2009 V);
2010
2011 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2012 unsigned AttrCount =
2013 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2014 Check(AttrCount == 1,
2015 "Attribute 'immarg' is incompatible with other attributes except the "
2016 "'range' attribute",
2017 V);
2018 }
2019
2020 // Check for mutually incompatible attributes. Only inreg is compatible with
2021 // sret.
2022 unsigned AttrCount = 0;
2023 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2024 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2025 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2026 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2027 Attrs.hasAttribute(Attribute::InReg);
2028 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2029 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2030 Check(AttrCount <= 1,
2031 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2032 "'byref', and 'sret' are incompatible!",
2033 V);
2034
2035 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2036 Attrs.hasAttribute(Attribute::ReadOnly)),
2037 "Attributes "
2038 "'inalloca and readonly' are incompatible!",
2039 V);
2040
2041 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2042 Attrs.hasAttribute(Attribute::Returned)),
2043 "Attributes "
2044 "'sret and returned' are incompatible!",
2045 V);
2046
2047 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2048 Attrs.hasAttribute(Attribute::SExt)),
2049 "Attributes "
2050 "'zeroext and signext' are incompatible!",
2051 V);
2052
2053 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2054 Attrs.hasAttribute(Attribute::ReadOnly)),
2055 "Attributes "
2056 "'readnone and readonly' are incompatible!",
2057 V);
2058
2059 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2060 Attrs.hasAttribute(Attribute::WriteOnly)),
2061 "Attributes "
2062 "'readnone and writeonly' are incompatible!",
2063 V);
2064
2065 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2066 Attrs.hasAttribute(Attribute::WriteOnly)),
2067 "Attributes "
2068 "'readonly and writeonly' are incompatible!",
2069 V);
2070
2071 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2072 Attrs.hasAttribute(Attribute::AlwaysInline)),
2073 "Attributes "
2074 "'noinline and alwaysinline' are incompatible!",
2075 V);
2076
2077 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2078 Attrs.hasAttribute(Attribute::ReadNone)),
2079 "Attributes writable and readnone are incompatible!", V);
2080
2081 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2082 Attrs.hasAttribute(Attribute::ReadOnly)),
2083 "Attributes writable and readonly are incompatible!", V);
2084
2085 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2086 for (Attribute Attr : Attrs) {
2087 if (!Attr.isStringAttribute() &&
2088 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2089 CheckFailed("Attribute '" + Attr.getAsString() +
2090 "' applied to incompatible type!", V);
2091 return;
2092 }
2093 }
2094
2095 if (isa<PointerType>(Ty)) {
2096 if (Attrs.hasAttribute(Attribute::Alignment)) {
2097 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2098 Check(AttrAlign.value() <= Value::MaximumAlignment,
2099 "huge alignment values are unsupported", V);
2100 }
2101 if (Attrs.hasAttribute(Attribute::ByVal)) {
2102 Type *ByValTy = Attrs.getByValType();
2103 SmallPtrSet<Type *, 4> Visited;
2104 Check(ByValTy->isSized(&Visited),
2105 "Attribute 'byval' does not support unsized types!", V);
2106 // Check if it is or contains a target extension type that disallows being
2107 // used on the stack.
2109 "'byval' argument has illegal target extension type", V);
2110 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2111 "huge 'byval' arguments are unsupported", V);
2112 }
2113 if (Attrs.hasAttribute(Attribute::ByRef)) {
2114 SmallPtrSet<Type *, 4> Visited;
2115 Check(Attrs.getByRefType()->isSized(&Visited),
2116 "Attribute 'byref' does not support unsized types!", V);
2117 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2118 (1ULL << 32),
2119 "huge 'byref' arguments are unsupported", V);
2120 }
2121 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2122 SmallPtrSet<Type *, 4> Visited;
2123 Check(Attrs.getInAllocaType()->isSized(&Visited),
2124 "Attribute 'inalloca' does not support unsized types!", V);
2125 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2126 (1ULL << 32),
2127 "huge 'inalloca' arguments are unsupported", V);
2128 }
2129 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2130 SmallPtrSet<Type *, 4> Visited;
2131 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2132 "Attribute 'preallocated' does not support unsized types!", V);
2133 Check(
2134 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2135 (1ULL << 32),
2136 "huge 'preallocated' arguments are unsupported", V);
2137 }
2138 }
2139
2140 if (Attrs.hasAttribute(Attribute::Initializes)) {
2141 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2142 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2143 V);
2145 "Attribute 'initializes' does not support unordered ranges", V);
2146 }
2147
2148 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2149 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2150 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2151 V);
2152 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2153 "Invalid value for 'nofpclass' test mask", V);
2154 }
2155 if (Attrs.hasAttribute(Attribute::Range)) {
2156 const ConstantRange &CR =
2157 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2159 "Range bit width must match type bit width!", V);
2160 }
2161}
2162
2163void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2164 const Value *V) {
2165 if (Attrs.hasFnAttr(Attr)) {
2166 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2167 unsigned N;
2168 if (S.getAsInteger(10, N))
2169 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2170 }
2171}
2172
2173// Check parameter attributes against a function type.
2174// The value V is printed in error messages.
2175void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2176 const Value *V, bool IsIntrinsic,
2177 bool IsInlineAsm) {
2178 if (Attrs.isEmpty())
2179 return;
2180
2181 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2182 Check(Attrs.hasParentContext(Context),
2183 "Attribute list does not match Module context!", &Attrs, V);
2184 for (const auto &AttrSet : Attrs) {
2185 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2186 "Attribute set does not match Module context!", &AttrSet, V);
2187 for (const auto &A : AttrSet) {
2188 Check(A.hasParentContext(Context),
2189 "Attribute does not match Module context!", &A, V);
2190 }
2191 }
2192 }
2193
2194 bool SawNest = false;
2195 bool SawReturned = false;
2196 bool SawSRet = false;
2197 bool SawSwiftSelf = false;
2198 bool SawSwiftAsync = false;
2199 bool SawSwiftError = false;
2200
2201 // Verify return value attributes.
2202 AttributeSet RetAttrs = Attrs.getRetAttrs();
2203 for (Attribute RetAttr : RetAttrs)
2204 Check(RetAttr.isStringAttribute() ||
2205 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2206 "Attribute '" + RetAttr.getAsString() +
2207 "' does not apply to function return values",
2208 V);
2209
2210 unsigned MaxParameterWidth = 0;
2211 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2212 if (Ty->isVectorTy()) {
2213 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2214 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2215 if (Size > MaxParameterWidth)
2216 MaxParameterWidth = Size;
2217 }
2218 }
2219 };
2220 GetMaxParameterWidth(FT->getReturnType());
2221 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2222
2223 // Verify parameter attributes.
2224 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2225 Type *Ty = FT->getParamType(i);
2226 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2227
2228 if (!IsIntrinsic) {
2229 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2230 "immarg attribute only applies to intrinsics", V);
2231 if (!IsInlineAsm)
2232 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2233 "Attribute 'elementtype' can only be applied to intrinsics"
2234 " and inline asm.",
2235 V);
2236 }
2237
2238 verifyParameterAttrs(ArgAttrs, Ty, V);
2239 GetMaxParameterWidth(Ty);
2240
2241 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2242 Check(!SawNest, "More than one parameter has attribute nest!", V);
2243 SawNest = true;
2244 }
2245
2246 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2247 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2248 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2249 "Incompatible argument and return types for 'returned' attribute",
2250 V);
2251 SawReturned = true;
2252 }
2253
2254 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2255 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2256 Check(i == 0 || i == 1,
2257 "Attribute 'sret' is not on first or second parameter!", V);
2258 SawSRet = true;
2259 }
2260
2261 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2262 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2263 SawSwiftSelf = true;
2264 }
2265
2266 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2267 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2268 SawSwiftAsync = true;
2269 }
2270
2271 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2272 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2273 SawSwiftError = true;
2274 }
2275
2276 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2277 Check(i == FT->getNumParams() - 1,
2278 "inalloca isn't on the last parameter!", V);
2279 }
2280 }
2281
2282 if (!Attrs.hasFnAttrs())
2283 return;
2284
2285 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2286 for (Attribute FnAttr : Attrs.getFnAttrs())
2287 Check(FnAttr.isStringAttribute() ||
2288 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2289 "Attribute '" + FnAttr.getAsString() +
2290 "' does not apply to functions!",
2291 V);
2292
2293 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2294 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2295 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2296
2297 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2298 Check(Attrs.hasFnAttr(Attribute::NoInline),
2299 "Attribute 'optnone' requires 'noinline'!", V);
2300
2301 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2302 "Attributes 'optsize and optnone' are incompatible!", V);
2303
2304 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2305 "Attributes 'minsize and optnone' are incompatible!", V);
2306
2307 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2308 "Attributes 'optdebug and optnone' are incompatible!", V);
2309 }
2310
2311 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2312 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2313 "Attributes "
2314 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2315 V);
2316
2317 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2318 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2319 "Attributes 'optsize and optdebug' are incompatible!", V);
2320
2321 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2322 "Attributes 'minsize and optdebug' are incompatible!", V);
2323 }
2324
2325 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2326 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2327 "Attribute writable and memory without argmem: write are incompatible!",
2328 V);
2329
2330 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2331 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2332 "Attributes 'aarch64_pstate_sm_enabled and "
2333 "aarch64_pstate_sm_compatible' are incompatible!",
2334 V);
2335 }
2336
2337 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2338 Attrs.hasFnAttr("aarch64_inout_za") +
2339 Attrs.hasFnAttr("aarch64_out_za") +
2340 Attrs.hasFnAttr("aarch64_preserves_za") +
2341 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2342 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2343 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2344 "'aarch64_za_state_agnostic' are mutually exclusive",
2345 V);
2346
2347 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2348 Attrs.hasFnAttr("aarch64_in_zt0") +
2349 Attrs.hasFnAttr("aarch64_inout_zt0") +
2350 Attrs.hasFnAttr("aarch64_out_zt0") +
2351 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2352 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2353 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2354 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2355 "'aarch64_za_state_agnostic' are mutually exclusive",
2356 V);
2357
2358 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2359 const GlobalValue *GV = cast<GlobalValue>(V);
2361 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2362 }
2363
2364 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2365 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2366 if (ParamNo >= FT->getNumParams()) {
2367 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2368 return false;
2369 }
2370
2371 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2372 CheckFailed("'allocsize' " + Name +
2373 " argument must refer to an integer parameter",
2374 V);
2375 return false;
2376 }
2377
2378 return true;
2379 };
2380
2381 if (!CheckParam("element size", Args->first))
2382 return;
2383
2384 if (Args->second && !CheckParam("number of elements", *Args->second))
2385 return;
2386 }
2387
2388 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2389 AllocFnKind K = Attrs.getAllocKind();
2391 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2392 if (!is_contained(
2393 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2394 Type))
2395 CheckFailed(
2396 "'allockind()' requires exactly one of alloc, realloc, and free");
2397 if ((Type == AllocFnKind::Free) &&
2398 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2399 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2400 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2401 "or aligned modifiers.");
2402 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2403 if ((K & ZeroedUninit) == ZeroedUninit)
2404 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2405 }
2406
2407 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2408 StringRef S = A.getValueAsString();
2409 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2410 Function *Variant = M.getFunction(S);
2411 if (Variant) {
2412 Attribute Family = Attrs.getFnAttr("alloc-family");
2413 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2414 if (Family.isValid())
2415 Check(VariantFamily.isValid() &&
2416 VariantFamily.getValueAsString() == Family.getValueAsString(),
2417 "'alloc-variant-zeroed' must name a function belonging to the "
2418 "same 'alloc-family'");
2419
2420 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2421 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2422 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2423 "'alloc-variant-zeroed' must name a function with "
2424 "'allockind(\"zeroed\")'");
2425
2426 Check(FT == Variant->getFunctionType(),
2427 "'alloc-variant-zeroed' must name a function with the same "
2428 "signature");
2429 }
2430 }
2431
2432 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2433 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2434 if (VScaleMin == 0)
2435 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2436 else if (!isPowerOf2_32(VScaleMin))
2437 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2438 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2439 if (VScaleMax && VScaleMin > VScaleMax)
2440 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2441 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2442 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2443 }
2444
2445 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2446 StringRef FP = FPAttr.getValueAsString();
2447 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved")
2448 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2449 }
2450
2451 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2452 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2453 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2454 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2455 .getValueAsString()
2456 .empty(),
2457 "\"patchable-function-entry-section\" must not be empty");
2458 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2459
2460 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2461 StringRef S = A.getValueAsString();
2462 if (S != "none" && S != "all" && S != "non-leaf")
2463 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2464 }
2465
2466 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2467 StringRef S = A.getValueAsString();
2468 if (S != "a_key" && S != "b_key")
2469 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2470 V);
2471 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2472 CheckFailed(
2473 "'sign-return-address-key' present without `sign-return-address`");
2474 }
2475 }
2476
2477 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2478 StringRef S = A.getValueAsString();
2479 if (S != "" && S != "true" && S != "false")
2480 CheckFailed(
2481 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2482 }
2483
2484 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2485 StringRef S = A.getValueAsString();
2486 if (S != "" && S != "true" && S != "false")
2487 CheckFailed(
2488 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2489 }
2490
2491 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2492 StringRef S = A.getValueAsString();
2493 if (S != "" && S != "true" && S != "false")
2494 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2495 V);
2496 }
2497
2498 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2499 StringRef S = A.getValueAsString();
2500 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2501 if (!Info)
2502 CheckFailed("invalid name for a VFABI variant: " + S, V);
2503 }
2504
2505 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2506 StringRef S = A.getValueAsString();
2508 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2509 }
2510
2511 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2512 StringRef S = A.getValueAsString();
2514 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2515 V);
2516 }
2517}
2518
2519void Verifier::verifyFunctionMetadata(
2520 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2521 for (const auto &Pair : MDs) {
2522 if (Pair.first == LLVMContext::MD_prof) {
2523 MDNode *MD = Pair.second;
2524 // We may have functions that are synthesized by the compiler, e.g. in
2525 // WPD, that we can't currently determine the entry count.
2527 continue;
2528
2529 Check(MD->getNumOperands() >= 2,
2530 "!prof annotations should have no less than 2 operands", MD);
2531
2532 // Check first operand.
2533 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2534 MD);
2536 "expected string with name of the !prof annotation", MD);
2537 MDString *MDS = cast<MDString>(MD->getOperand(0));
2538 StringRef ProfName = MDS->getString();
2541 "first operand should be 'function_entry_count'"
2542 " or 'synthetic_function_entry_count'",
2543 MD);
2544
2545 // Check second operand.
2546 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2547 MD);
2549 "expected integer argument to function_entry_count", MD);
2550 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2551 MDNode *MD = Pair.second;
2552 Check(MD->getNumOperands() == 1,
2553 "!kcfi_type must have exactly one operand", MD);
2554 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2555 MD);
2557 "expected a constant operand for !kcfi_type", MD);
2558 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2559 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2560 "expected a constant integer operand for !kcfi_type", MD);
2562 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2563 }
2564 }
2565}
2566
2567void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2568 if (!ConstantExprVisited.insert(EntryC).second)
2569 return;
2570
2572 Stack.push_back(EntryC);
2573
2574 while (!Stack.empty()) {
2575 const Constant *C = Stack.pop_back_val();
2576
2577 // Check this constant expression.
2578 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2579 visitConstantExpr(CE);
2580
2581 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2582 visitConstantPtrAuth(CPA);
2583
2584 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2585 // Global Values get visited separately, but we do need to make sure
2586 // that the global value is in the correct module
2587 Check(GV->getParent() == &M, "Referencing global in another module!",
2588 EntryC, &M, GV, GV->getParent());
2589 continue;
2590 }
2591
2592 // Visit all sub-expressions.
2593 for (const Use &U : C->operands()) {
2594 const auto *OpC = dyn_cast<Constant>(U);
2595 if (!OpC)
2596 continue;
2597 if (!ConstantExprVisited.insert(OpC).second)
2598 continue;
2599 Stack.push_back(OpC);
2600 }
2601 }
2602}
2603
2604void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2605 if (CE->getOpcode() == Instruction::BitCast)
2606 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2607 CE->getType()),
2608 "Invalid bitcast", CE);
2609 else if (CE->getOpcode() == Instruction::PtrToAddr)
2610 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2611}
2612
2613void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2614 Check(CPA->getPointer()->getType()->isPointerTy(),
2615 "signed ptrauth constant base pointer must have pointer type");
2616
2617 Check(CPA->getType() == CPA->getPointer()->getType(),
2618 "signed ptrauth constant must have same type as its base pointer");
2619
2620 Check(CPA->getKey()->getBitWidth() == 32,
2621 "signed ptrauth constant key must be i32 constant integer");
2622
2624 "signed ptrauth constant address discriminator must be a pointer");
2625
2626 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2627 "signed ptrauth constant discriminator must be i64 constant integer");
2628}
2629
2630bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2631 // There shouldn't be more attribute sets than there are parameters plus the
2632 // function and return value.
2633 return Attrs.getNumAttrSets() <= Params + 2;
2634}
2635
2636void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2637 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2638 unsigned ArgNo = 0;
2639 unsigned LabelNo = 0;
2640 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2641 if (CI.Type == InlineAsm::isLabel) {
2642 ++LabelNo;
2643 continue;
2644 }
2645
2646 // Only deal with constraints that correspond to call arguments.
2647 if (!CI.hasArg())
2648 continue;
2649
2650 if (CI.isIndirect) {
2651 const Value *Arg = Call.getArgOperand(ArgNo);
2652 Check(Arg->getType()->isPointerTy(),
2653 "Operand for indirect constraint must have pointer type", &Call);
2654
2656 "Operand for indirect constraint must have elementtype attribute",
2657 &Call);
2658 } else {
2659 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2660 "Elementtype attribute can only be applied for indirect "
2661 "constraints",
2662 &Call);
2663 }
2664
2665 ArgNo++;
2666 }
2667
2668 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2669 Check(LabelNo == CallBr->getNumIndirectDests(),
2670 "Number of label constraints does not match number of callbr dests",
2671 &Call);
2672 } else {
2673 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2674 &Call);
2675 }
2676}
2677
2678/// Verify that statepoint intrinsic is well formed.
2679void Verifier::verifyStatepoint(const CallBase &Call) {
2680 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2681
2684 "gc.statepoint must read and write all memory to preserve "
2685 "reordering restrictions required by safepoint semantics",
2686 Call);
2687
2688 const int64_t NumPatchBytes =
2689 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2690 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2691 Check(NumPatchBytes >= 0,
2692 "gc.statepoint number of patchable bytes must be "
2693 "positive",
2694 Call);
2695
2696 Type *TargetElemType = Call.getParamElementType(2);
2697 Check(TargetElemType,
2698 "gc.statepoint callee argument must have elementtype attribute", Call);
2699 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2700 Check(TargetFuncType,
2701 "gc.statepoint callee elementtype must be function type", Call);
2702
2703 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2704 Check(NumCallArgs >= 0,
2705 "gc.statepoint number of arguments to underlying call "
2706 "must be positive",
2707 Call);
2708 const int NumParams = (int)TargetFuncType->getNumParams();
2709 if (TargetFuncType->isVarArg()) {
2710 Check(NumCallArgs >= NumParams,
2711 "gc.statepoint mismatch in number of vararg call args", Call);
2712
2713 // TODO: Remove this limitation
2714 Check(TargetFuncType->getReturnType()->isVoidTy(),
2715 "gc.statepoint doesn't support wrapping non-void "
2716 "vararg functions yet",
2717 Call);
2718 } else
2719 Check(NumCallArgs == NumParams,
2720 "gc.statepoint mismatch in number of call args", Call);
2721
2722 const uint64_t Flags
2723 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2724 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2725 "unknown flag used in gc.statepoint flags argument", Call);
2726
2727 // Verify that the types of the call parameter arguments match
2728 // the type of the wrapped callee.
2729 AttributeList Attrs = Call.getAttributes();
2730 for (int i = 0; i < NumParams; i++) {
2731 Type *ParamType = TargetFuncType->getParamType(i);
2732 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2733 Check(ArgType == ParamType,
2734 "gc.statepoint call argument does not match wrapped "
2735 "function type",
2736 Call);
2737
2738 if (TargetFuncType->isVarArg()) {
2739 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2740 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2741 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2742 }
2743 }
2744
2745 const int EndCallArgsInx = 4 + NumCallArgs;
2746
2747 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2748 Check(isa<ConstantInt>(NumTransitionArgsV),
2749 "gc.statepoint number of transition arguments "
2750 "must be constant integer",
2751 Call);
2752 const int NumTransitionArgs =
2753 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2754 Check(NumTransitionArgs == 0,
2755 "gc.statepoint w/inline transition bundle is deprecated", Call);
2756 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2757
2758 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2759 Check(isa<ConstantInt>(NumDeoptArgsV),
2760 "gc.statepoint number of deoptimization arguments "
2761 "must be constant integer",
2762 Call);
2763 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2764 Check(NumDeoptArgs == 0,
2765 "gc.statepoint w/inline deopt operands is deprecated", Call);
2766
2767 const int ExpectedNumArgs = 7 + NumCallArgs;
2768 Check(ExpectedNumArgs == (int)Call.arg_size(),
2769 "gc.statepoint too many arguments", Call);
2770
2771 // Check that the only uses of this gc.statepoint are gc.result or
2772 // gc.relocate calls which are tied to this statepoint and thus part
2773 // of the same statepoint sequence
2774 for (const User *U : Call.users()) {
2775 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2776 Check(UserCall, "illegal use of statepoint token", Call, U);
2777 if (!UserCall)
2778 continue;
2779 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2780 "gc.result or gc.relocate are the only value uses "
2781 "of a gc.statepoint",
2782 Call, U);
2783 if (isa<GCResultInst>(UserCall)) {
2784 Check(UserCall->getArgOperand(0) == &Call,
2785 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2786 } else if (isa<GCRelocateInst>(Call)) {
2787 Check(UserCall->getArgOperand(0) == &Call,
2788 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2789 }
2790 }
2791
2792 // Note: It is legal for a single derived pointer to be listed multiple
2793 // times. It's non-optimal, but it is legal. It can also happen after
2794 // insertion if we strip a bitcast away.
2795 // Note: It is really tempting to check that each base is relocated and
2796 // that a derived pointer is never reused as a base pointer. This turns
2797 // out to be problematic since optimizations run after safepoint insertion
2798 // can recognize equality properties that the insertion logic doesn't know
2799 // about. See example statepoint.ll in the verifier subdirectory
2800}
2801
2802void Verifier::verifyFrameRecoverIndices() {
2803 for (auto &Counts : FrameEscapeInfo) {
2804 Function *F = Counts.first;
2805 unsigned EscapedObjectCount = Counts.second.first;
2806 unsigned MaxRecoveredIndex = Counts.second.second;
2807 Check(MaxRecoveredIndex <= EscapedObjectCount,
2808 "all indices passed to llvm.localrecover must be less than the "
2809 "number of arguments passed to llvm.localescape in the parent "
2810 "function",
2811 F);
2812 }
2813}
2814
2815static Instruction *getSuccPad(Instruction *Terminator) {
2816 BasicBlock *UnwindDest;
2817 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2818 UnwindDest = II->getUnwindDest();
2819 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2820 UnwindDest = CSI->getUnwindDest();
2821 else
2822 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2823 return &*UnwindDest->getFirstNonPHIIt();
2824}
2825
2826void Verifier::verifySiblingFuncletUnwinds() {
2827 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2828 SmallPtrSet<Instruction *, 8> Visited;
2829 SmallPtrSet<Instruction *, 8> Active;
2830 for (const auto &Pair : SiblingFuncletInfo) {
2831 Instruction *PredPad = Pair.first;
2832 if (Visited.count(PredPad))
2833 continue;
2834 Active.insert(PredPad);
2835 Instruction *Terminator = Pair.second;
2836 do {
2837 Instruction *SuccPad = getSuccPad(Terminator);
2838 if (Active.count(SuccPad)) {
2839 // Found a cycle; report error
2840 Instruction *CyclePad = SuccPad;
2841 SmallVector<Instruction *, 8> CycleNodes;
2842 do {
2843 CycleNodes.push_back(CyclePad);
2844 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2845 if (CycleTerminator != CyclePad)
2846 CycleNodes.push_back(CycleTerminator);
2847 CyclePad = getSuccPad(CycleTerminator);
2848 } while (CyclePad != SuccPad);
2849 Check(false, "EH pads can't handle each other's exceptions",
2850 ArrayRef<Instruction *>(CycleNodes));
2851 }
2852 // Don't re-walk a node we've already checked
2853 if (!Visited.insert(SuccPad).second)
2854 break;
2855 // Walk to this successor if it has a map entry.
2856 PredPad = SuccPad;
2857 auto TermI = SiblingFuncletInfo.find(PredPad);
2858 if (TermI == SiblingFuncletInfo.end())
2859 break;
2860 Terminator = TermI->second;
2861 Active.insert(PredPad);
2862 } while (true);
2863 // Each node only has one successor, so we've walked all the active
2864 // nodes' successors.
2865 Active.clear();
2866 }
2867}
2868
2869// visitFunction - Verify that a function is ok.
2870//
2871void Verifier::visitFunction(const Function &F) {
2872 visitGlobalValue(F);
2873
2874 // Check function arguments.
2875 FunctionType *FT = F.getFunctionType();
2876 unsigned NumArgs = F.arg_size();
2877
2878 Check(&Context == &F.getContext(),
2879 "Function context does not match Module context!", &F);
2880
2881 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2882 Check(FT->getNumParams() == NumArgs,
2883 "# formal arguments must match # of arguments for function type!", &F,
2884 FT);
2885 Check(F.getReturnType()->isFirstClassType() ||
2886 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2887 "Functions cannot return aggregate values!", &F);
2888
2889 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2890 "Invalid struct return type!", &F);
2891
2892 if (MaybeAlign A = F.getAlign()) {
2893 Check(A->value() <= Value::MaximumAlignment,
2894 "huge alignment values are unsupported", &F);
2895 }
2896
2897 AttributeList Attrs = F.getAttributes();
2898
2899 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2900 "Attribute after last parameter!", &F);
2901
2902 bool IsIntrinsic = F.isIntrinsic();
2903
2904 // Check function attributes.
2905 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2906
2907 // On function declarations/definitions, we do not support the builtin
2908 // attribute. We do not check this in VerifyFunctionAttrs since that is
2909 // checking for Attributes that can/can not ever be on functions.
2910 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2911 "Attribute 'builtin' can only be applied to a callsite.", &F);
2912
2913 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2914 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2915
2916 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
2917 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
2918
2919 if (Attrs.hasFnAttr(Attribute::Naked))
2920 for (const Argument &Arg : F.args())
2921 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
2922
2923 // Check that this function meets the restrictions on this calling convention.
2924 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2925 // restrictions can be lifted.
2926 switch (F.getCallingConv()) {
2927 default:
2928 case CallingConv::C:
2929 break;
2930 case CallingConv::X86_INTR: {
2931 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2932 "Calling convention parameter requires byval", &F);
2933 break;
2934 }
2935 case CallingConv::AMDGPU_KERNEL:
2936 case CallingConv::SPIR_KERNEL:
2937 case CallingConv::AMDGPU_CS_Chain:
2938 case CallingConv::AMDGPU_CS_ChainPreserve:
2939 Check(F.getReturnType()->isVoidTy(),
2940 "Calling convention requires void return type", &F);
2941 [[fallthrough]];
2942 case CallingConv::AMDGPU_VS:
2943 case CallingConv::AMDGPU_HS:
2944 case CallingConv::AMDGPU_GS:
2945 case CallingConv::AMDGPU_PS:
2946 case CallingConv::AMDGPU_CS:
2947 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2948 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2949 const unsigned StackAS = DL.getAllocaAddrSpace();
2950 unsigned i = 0;
2951 for (const Argument &Arg : F.args()) {
2952 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
2953 "Calling convention disallows byval", &F);
2954 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
2955 "Calling convention disallows preallocated", &F);
2956 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
2957 "Calling convention disallows inalloca", &F);
2958
2959 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
2960 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
2961 // value here.
2962 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
2963 "Calling convention disallows stack byref", &F);
2964 }
2965
2966 ++i;
2967 }
2968 }
2969
2970 [[fallthrough]];
2971 case CallingConv::Fast:
2972 case CallingConv::Cold:
2973 case CallingConv::Intel_OCL_BI:
2974 case CallingConv::PTX_Kernel:
2975 case CallingConv::PTX_Device:
2976 Check(!F.isVarArg(),
2977 "Calling convention does not support varargs or "
2978 "perfect forwarding!",
2979 &F);
2980 break;
2981 case CallingConv::AMDGPU_Gfx_WholeWave:
2982 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
2983 "Calling convention requires first argument to be i1", &F);
2984 Check(!F.arg_begin()->hasInRegAttr(),
2985 "Calling convention requires first argument to not be inreg", &F);
2986 Check(!F.isVarArg(),
2987 "Calling convention does not support varargs or "
2988 "perfect forwarding!",
2989 &F);
2990 break;
2991 }
2992
2993 // Check that the argument values match the function type for this function...
2994 unsigned i = 0;
2995 for (const Argument &Arg : F.args()) {
2996 Check(Arg.getType() == FT->getParamType(i),
2997 "Argument value does not match function argument type!", &Arg,
2998 FT->getParamType(i));
2999 Check(Arg.getType()->isFirstClassType(),
3000 "Function arguments must have first-class types!", &Arg);
3001 if (!IsIntrinsic) {
3002 Check(!Arg.getType()->isMetadataTy(),
3003 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3004 Check(!Arg.getType()->isTokenLikeTy(),
3005 "Function takes token but isn't an intrinsic", &Arg, &F);
3006 Check(!Arg.getType()->isX86_AMXTy(),
3007 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3008 }
3009
3010 // Check that swifterror argument is only used by loads and stores.
3011 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3012 verifySwiftErrorValue(&Arg);
3013 }
3014 ++i;
3015 }
3016
3017 if (!IsIntrinsic) {
3018 Check(!F.getReturnType()->isTokenLikeTy(),
3019 "Function returns a token but isn't an intrinsic", &F);
3020 Check(!F.getReturnType()->isX86_AMXTy(),
3021 "Function returns a x86_amx but isn't an intrinsic", &F);
3022 }
3023
3024 // Get the function metadata attachments.
3026 F.getAllMetadata(MDs);
3027 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3028 verifyFunctionMetadata(MDs);
3029
3030 // Check validity of the personality function
3031 if (F.hasPersonalityFn()) {
3032 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3033 if (Per)
3034 Check(Per->getParent() == F.getParent(),
3035 "Referencing personality function in another module!", &F,
3036 F.getParent(), Per, Per->getParent());
3037 }
3038
3039 // EH funclet coloring can be expensive, recompute on-demand
3040 BlockEHFuncletColors.clear();
3041
3042 if (F.isMaterializable()) {
3043 // Function has a body somewhere we can't see.
3044 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3045 MDs.empty() ? nullptr : MDs.front().second);
3046 } else if (F.isDeclaration()) {
3047 for (const auto &I : MDs) {
3048 // This is used for call site debug information.
3049 CheckDI(I.first != LLVMContext::MD_dbg ||
3050 !cast<DISubprogram>(I.second)->isDistinct(),
3051 "function declaration may only have a unique !dbg attachment",
3052 &F);
3053 Check(I.first != LLVMContext::MD_prof,
3054 "function declaration may not have a !prof attachment", &F);
3055
3056 // Verify the metadata itself.
3057 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3058 }
3059 Check(!F.hasPersonalityFn(),
3060 "Function declaration shouldn't have a personality routine", &F);
3061 } else {
3062 // Verify that this function (which has a body) is not named "llvm.*". It
3063 // is not legal to define intrinsics.
3064 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3065
3066 // Check the entry node
3067 const BasicBlock *Entry = &F.getEntryBlock();
3068 Check(pred_empty(Entry),
3069 "Entry block to function must not have predecessors!", Entry);
3070
3071 // The address of the entry block cannot be taken, unless it is dead.
3072 if (Entry->hasAddressTaken()) {
3073 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3074 "blockaddress may not be used with the entry block!", Entry);
3075 }
3076
3077 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3078 NumKCFIAttachments = 0;
3079 // Visit metadata attachments.
3080 for (const auto &I : MDs) {
3081 // Verify that the attachment is legal.
3082 auto AllowLocs = AreDebugLocsAllowed::No;
3083 switch (I.first) {
3084 default:
3085 break;
3086 case LLVMContext::MD_dbg: {
3087 ++NumDebugAttachments;
3088 CheckDI(NumDebugAttachments == 1,
3089 "function must have a single !dbg attachment", &F, I.second);
3090 CheckDI(isa<DISubprogram>(I.second),
3091 "function !dbg attachment must be a subprogram", &F, I.second);
3092 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3093 "function definition may only have a distinct !dbg attachment",
3094 &F);
3095
3096 auto *SP = cast<DISubprogram>(I.second);
3097 const Function *&AttachedTo = DISubprogramAttachments[SP];
3098 CheckDI(!AttachedTo || AttachedTo == &F,
3099 "DISubprogram attached to more than one function", SP, &F);
3100 AttachedTo = &F;
3101 AllowLocs = AreDebugLocsAllowed::Yes;
3102 break;
3103 }
3104 case LLVMContext::MD_prof:
3105 ++NumProfAttachments;
3106 Check(NumProfAttachments == 1,
3107 "function must have a single !prof attachment", &F, I.second);
3108 break;
3109 case LLVMContext::MD_kcfi_type:
3110 ++NumKCFIAttachments;
3111 Check(NumKCFIAttachments == 1,
3112 "function must have a single !kcfi_type attachment", &F,
3113 I.second);
3114 break;
3115 }
3116
3117 // Verify the metadata itself.
3118 visitMDNode(*I.second, AllowLocs);
3119 }
3120 }
3121
3122 // If this function is actually an intrinsic, verify that it is only used in
3123 // direct call/invokes, never having its "address taken".
3124 // Only do this if the module is materialized, otherwise we don't have all the
3125 // uses.
3126 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3127 const User *U;
3128 if (F.hasAddressTaken(&U, false, true, false,
3129 /*IgnoreARCAttachedCall=*/true))
3130 Check(false, "Invalid user of intrinsic instruction!", U);
3131 }
3132
3133 // Check intrinsics' signatures.
3134 switch (F.getIntrinsicID()) {
3135 case Intrinsic::experimental_gc_get_pointer_base: {
3136 FunctionType *FT = F.getFunctionType();
3137 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3138 Check(isa<PointerType>(F.getReturnType()),
3139 "gc.get.pointer.base must return a pointer", F);
3140 Check(FT->getParamType(0) == F.getReturnType(),
3141 "gc.get.pointer.base operand and result must be of the same type", F);
3142 break;
3143 }
3144 case Intrinsic::experimental_gc_get_pointer_offset: {
3145 FunctionType *FT = F.getFunctionType();
3146 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3147 Check(isa<PointerType>(FT->getParamType(0)),
3148 "gc.get.pointer.offset operand must be a pointer", F);
3149 Check(F.getReturnType()->isIntegerTy(),
3150 "gc.get.pointer.offset must return integer", F);
3151 break;
3152 }
3153 }
3154
3155 auto *N = F.getSubprogram();
3156 HasDebugInfo = (N != nullptr);
3157 if (!HasDebugInfo)
3158 return;
3159
3160 // Check that all !dbg attachments lead to back to N.
3161 //
3162 // FIXME: Check this incrementally while visiting !dbg attachments.
3163 // FIXME: Only check when N is the canonical subprogram for F.
3164 SmallPtrSet<const MDNode *, 32> Seen;
3165 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3166 // Be careful about using DILocation here since we might be dealing with
3167 // broken code (this is the Verifier after all).
3168 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3169 if (!DL)
3170 return;
3171 if (!Seen.insert(DL).second)
3172 return;
3173
3174 Metadata *Parent = DL->getRawScope();
3175 CheckDI(Parent && isa<DILocalScope>(Parent),
3176 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3177
3178 DILocalScope *Scope = DL->getInlinedAtScope();
3179 Check(Scope, "Failed to find DILocalScope", DL);
3180
3181 if (!Seen.insert(Scope).second)
3182 return;
3183
3184 DISubprogram *SP = Scope->getSubprogram();
3185
3186 // Scope and SP could be the same MDNode and we don't want to skip
3187 // validation in that case
3188 if ((Scope != SP) && !Seen.insert(SP).second)
3189 return;
3190
3191 CheckDI(SP->describes(&F),
3192 "!dbg attachment points at wrong subprogram for function", N, &F,
3193 &I, DL, Scope, SP);
3194 };
3195 for (auto &BB : F)
3196 for (auto &I : BB) {
3197 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3198 // The llvm.loop annotations also contain two DILocations.
3199 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3200 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3201 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3202 if (BrokenDebugInfo)
3203 return;
3204 }
3205}
3206
3207// verifyBasicBlock - Verify that a basic block is well formed...
3208//
3209void Verifier::visitBasicBlock(BasicBlock &BB) {
3210 InstsInThisBlock.clear();
3211 ConvergenceVerifyHelper.visit(BB);
3212
3213 // Ensure that basic blocks have terminators!
3214 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3215
3216 // Check constraints that this basic block imposes on all of the PHI nodes in
3217 // it.
3218 if (isa<PHINode>(BB.front())) {
3219 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3221 llvm::sort(Preds);
3222 for (const PHINode &PN : BB.phis()) {
3223 Check(PN.getNumIncomingValues() == Preds.size(),
3224 "PHINode should have one entry for each predecessor of its "
3225 "parent basic block!",
3226 &PN);
3227
3228 // Get and sort all incoming values in the PHI node...
3229 Values.clear();
3230 Values.reserve(PN.getNumIncomingValues());
3231 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3232 Values.push_back(
3233 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3234 llvm::sort(Values);
3235
3236 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3237 // Check to make sure that if there is more than one entry for a
3238 // particular basic block in this PHI node, that the incoming values are
3239 // all identical.
3240 //
3241 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3242 Values[i].second == Values[i - 1].second,
3243 "PHI node has multiple entries for the same basic block with "
3244 "different incoming values!",
3245 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3246
3247 // Check to make sure that the predecessors and PHI node entries are
3248 // matched up.
3249 Check(Values[i].first == Preds[i],
3250 "PHI node entries do not match predecessors!", &PN,
3251 Values[i].first, Preds[i]);
3252 }
3253 }
3254 }
3255
3256 // Check that all instructions have their parent pointers set up correctly.
3257 for (auto &I : BB)
3258 {
3259 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3260 }
3261
3262 // Confirm that no issues arise from the debug program.
3263 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3264 &BB);
3265}
3266
3267void Verifier::visitTerminator(Instruction &I) {
3268 // Ensure that terminators only exist at the end of the basic block.
3269 Check(&I == I.getParent()->getTerminator(),
3270 "Terminator found in the middle of a basic block!", I.getParent());
3271 visitInstruction(I);
3272}
3273
3274void Verifier::visitBranchInst(BranchInst &BI) {
3275 if (BI.isConditional()) {
3277 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3278 }
3279 visitTerminator(BI);
3280}
3281
3282void Verifier::visitReturnInst(ReturnInst &RI) {
3283 Function *F = RI.getParent()->getParent();
3284 unsigned N = RI.getNumOperands();
3285 if (F->getReturnType()->isVoidTy())
3286 Check(N == 0,
3287 "Found return instr that returns non-void in Function of void "
3288 "return type!",
3289 &RI, F->getReturnType());
3290 else
3291 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3292 "Function return type does not match operand "
3293 "type of return inst!",
3294 &RI, F->getReturnType());
3295
3296 // Check to make sure that the return value has necessary properties for
3297 // terminators...
3298 visitTerminator(RI);
3299}
3300
3301void Verifier::visitSwitchInst(SwitchInst &SI) {
3302 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3303 // Check to make sure that all of the constants in the switch instruction
3304 // have the same type as the switched-on value.
3305 Type *SwitchTy = SI.getCondition()->getType();
3306 SmallPtrSet<ConstantInt*, 32> Constants;
3307 for (auto &Case : SI.cases()) {
3308 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3309 "Case value is not a constant integer.", &SI);
3310 Check(Case.getCaseValue()->getType() == SwitchTy,
3311 "Switch constants must all be same type as switch value!", &SI);
3312 Check(Constants.insert(Case.getCaseValue()).second,
3313 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3314 }
3315
3316 visitTerminator(SI);
3317}
3318
3319void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3321 "Indirectbr operand must have pointer type!", &BI);
3322 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3324 "Indirectbr destinations must all have pointer type!", &BI);
3325
3326 visitTerminator(BI);
3327}
3328
3329void Verifier::visitCallBrInst(CallBrInst &CBI) {
3330 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3331 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3332 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3333
3334 verifyInlineAsmCall(CBI);
3335 visitTerminator(CBI);
3336}
3337
3338void Verifier::visitSelectInst(SelectInst &SI) {
3339 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3340 SI.getOperand(2)),
3341 "Invalid operands for select instruction!", &SI);
3342
3343 Check(SI.getTrueValue()->getType() == SI.getType(),
3344 "Select values must have same type as select instruction!", &SI);
3345 visitInstruction(SI);
3346}
3347
3348/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3349/// a pass, if any exist, it's an error.
3350///
3351void Verifier::visitUserOp1(Instruction &I) {
3352 Check(false, "User-defined operators should not live outside of a pass!", &I);
3353}
3354
3355void Verifier::visitTruncInst(TruncInst &I) {
3356 // Get the source and destination types
3357 Type *SrcTy = I.getOperand(0)->getType();
3358 Type *DestTy = I.getType();
3359
3360 // Get the size of the types in bits, we'll need this later
3361 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3362 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3363
3364 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3365 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3366 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3367 "trunc source and destination must both be a vector or neither", &I);
3368 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3369
3370 visitInstruction(I);
3371}
3372
3373void Verifier::visitZExtInst(ZExtInst &I) {
3374 // Get the source and destination types
3375 Type *SrcTy = I.getOperand(0)->getType();
3376 Type *DestTy = I.getType();
3377
3378 // Get the size of the types in bits, we'll need this later
3379 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3380 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3381 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3382 "zext source and destination must both be a vector or neither", &I);
3383 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3384 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3385
3386 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3387
3388 visitInstruction(I);
3389}
3390
3391void Verifier::visitSExtInst(SExtInst &I) {
3392 // Get the source and destination types
3393 Type *SrcTy = I.getOperand(0)->getType();
3394 Type *DestTy = I.getType();
3395
3396 // Get the size of the types in bits, we'll need this later
3397 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3398 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3399
3400 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3401 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3402 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3403 "sext source and destination must both be a vector or neither", &I);
3404 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3405
3406 visitInstruction(I);
3407}
3408
3409void Verifier::visitFPTruncInst(FPTruncInst &I) {
3410 // Get the source and destination types
3411 Type *SrcTy = I.getOperand(0)->getType();
3412 Type *DestTy = I.getType();
3413 // Get the size of the types in bits, we'll need this later
3414 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3415 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3416
3417 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3418 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3419 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3420 "fptrunc source and destination must both be a vector or neither", &I);
3421 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3422
3423 visitInstruction(I);
3424}
3425
3426void Verifier::visitFPExtInst(FPExtInst &I) {
3427 // Get the source and destination types
3428 Type *SrcTy = I.getOperand(0)->getType();
3429 Type *DestTy = I.getType();
3430
3431 // Get the size of the types in bits, we'll need this later
3432 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3433 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3434
3435 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3436 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3437 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3438 "fpext source and destination must both be a vector or neither", &I);
3439 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3440
3441 visitInstruction(I);
3442}
3443
3444void Verifier::visitUIToFPInst(UIToFPInst &I) {
3445 // Get the source and destination types
3446 Type *SrcTy = I.getOperand(0)->getType();
3447 Type *DestTy = I.getType();
3448
3449 bool SrcVec = SrcTy->isVectorTy();
3450 bool DstVec = DestTy->isVectorTy();
3451
3452 Check(SrcVec == DstVec,
3453 "UIToFP source and dest must both be vector or scalar", &I);
3454 Check(SrcTy->isIntOrIntVectorTy(),
3455 "UIToFP source must be integer or integer vector", &I);
3456 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3457 &I);
3458
3459 if (SrcVec && DstVec)
3460 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3461 cast<VectorType>(DestTy)->getElementCount(),
3462 "UIToFP source and dest vector length mismatch", &I);
3463
3464 visitInstruction(I);
3465}
3466
3467void Verifier::visitSIToFPInst(SIToFPInst &I) {
3468 // Get the source and destination types
3469 Type *SrcTy = I.getOperand(0)->getType();
3470 Type *DestTy = I.getType();
3471
3472 bool SrcVec = SrcTy->isVectorTy();
3473 bool DstVec = DestTy->isVectorTy();
3474
3475 Check(SrcVec == DstVec,
3476 "SIToFP source and dest must both be vector or scalar", &I);
3477 Check(SrcTy->isIntOrIntVectorTy(),
3478 "SIToFP source must be integer or integer vector", &I);
3479 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3480 &I);
3481
3482 if (SrcVec && DstVec)
3483 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3484 cast<VectorType>(DestTy)->getElementCount(),
3485 "SIToFP source and dest vector length mismatch", &I);
3486
3487 visitInstruction(I);
3488}
3489
3490void Verifier::visitFPToUIInst(FPToUIInst &I) {
3491 // Get the source and destination types
3492 Type *SrcTy = I.getOperand(0)->getType();
3493 Type *DestTy = I.getType();
3494
3495 bool SrcVec = SrcTy->isVectorTy();
3496 bool DstVec = DestTy->isVectorTy();
3497
3498 Check(SrcVec == DstVec,
3499 "FPToUI source and dest must both be vector or scalar", &I);
3500 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3501 Check(DestTy->isIntOrIntVectorTy(),
3502 "FPToUI result must be integer or integer vector", &I);
3503
3504 if (SrcVec && DstVec)
3505 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3506 cast<VectorType>(DestTy)->getElementCount(),
3507 "FPToUI source and dest vector length mismatch", &I);
3508
3509 visitInstruction(I);
3510}
3511
3512void Verifier::visitFPToSIInst(FPToSIInst &I) {
3513 // Get the source and destination types
3514 Type *SrcTy = I.getOperand(0)->getType();
3515 Type *DestTy = I.getType();
3516
3517 bool SrcVec = SrcTy->isVectorTy();
3518 bool DstVec = DestTy->isVectorTy();
3519
3520 Check(SrcVec == DstVec,
3521 "FPToSI source and dest must both be vector or scalar", &I);
3522 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3523 Check(DestTy->isIntOrIntVectorTy(),
3524 "FPToSI result must be integer or integer vector", &I);
3525
3526 if (SrcVec && DstVec)
3527 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3528 cast<VectorType>(DestTy)->getElementCount(),
3529 "FPToSI source and dest vector length mismatch", &I);
3530
3531 visitInstruction(I);
3532}
3533
3534void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3535 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3536 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3537 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3538 V);
3539
3540 if (SrcTy->isVectorTy()) {
3541 auto *VSrc = cast<VectorType>(SrcTy);
3542 auto *VDest = cast<VectorType>(DestTy);
3543 Check(VSrc->getElementCount() == VDest->getElementCount(),
3544 "PtrToAddr vector length mismatch", V);
3545 }
3546
3547 Type *AddrTy = DL.getAddressType(SrcTy);
3548 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3549}
3550
3551void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3552 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3553 visitInstruction(I);
3554}
3555
3556void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3557 // Get the source and destination types
3558 Type *SrcTy = I.getOperand(0)->getType();
3559 Type *DestTy = I.getType();
3560
3561 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3562
3563 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3564 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3565 &I);
3566
3567 if (SrcTy->isVectorTy()) {
3568 auto *VSrc = cast<VectorType>(SrcTy);
3569 auto *VDest = cast<VectorType>(DestTy);
3570 Check(VSrc->getElementCount() == VDest->getElementCount(),
3571 "PtrToInt Vector length mismatch", &I);
3572 }
3573
3574 visitInstruction(I);
3575}
3576
3577void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3578 // Get the source and destination types
3579 Type *SrcTy = I.getOperand(0)->getType();
3580 Type *DestTy = I.getType();
3581
3582 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3583 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3584
3585 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3586 &I);
3587 if (SrcTy->isVectorTy()) {
3588 auto *VSrc = cast<VectorType>(SrcTy);
3589 auto *VDest = cast<VectorType>(DestTy);
3590 Check(VSrc->getElementCount() == VDest->getElementCount(),
3591 "IntToPtr Vector length mismatch", &I);
3592 }
3593 visitInstruction(I);
3594}
3595
3596void Verifier::visitBitCastInst(BitCastInst &I) {
3597 Check(
3598 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3599 "Invalid bitcast", &I);
3600 visitInstruction(I);
3601}
3602
3603void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3604 Type *SrcTy = I.getOperand(0)->getType();
3605 Type *DestTy = I.getType();
3606
3607 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3608 &I);
3609 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3610 &I);
3612 "AddrSpaceCast must be between different address spaces", &I);
3613 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3614 Check(SrcVTy->getElementCount() ==
3615 cast<VectorType>(DestTy)->getElementCount(),
3616 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3617 visitInstruction(I);
3618}
3619
3620/// visitPHINode - Ensure that a PHI node is well formed.
3621///
3622void Verifier::visitPHINode(PHINode &PN) {
3623 // Ensure that the PHI nodes are all grouped together at the top of the block.
3624 // This can be tested by checking whether the instruction before this is
3625 // either nonexistent (because this is begin()) or is a PHI node. If not,
3626 // then there is some other instruction before a PHI.
3627 Check(&PN == &PN.getParent()->front() ||
3629 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3630
3631 // Check that a PHI doesn't yield a Token.
3632 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3633
3634 // Check that all of the values of the PHI node have the same type as the
3635 // result.
3636 for (Value *IncValue : PN.incoming_values()) {
3637 Check(PN.getType() == IncValue->getType(),
3638 "PHI node operands are not the same type as the result!", &PN);
3639 }
3640
3641 // All other PHI node constraints are checked in the visitBasicBlock method.
3642
3643 visitInstruction(PN);
3644}
3645
3646void Verifier::visitCallBase(CallBase &Call) {
3648 "Called function must be a pointer!", Call);
3649 FunctionType *FTy = Call.getFunctionType();
3650
3651 // Verify that the correct number of arguments are being passed
3652 if (FTy->isVarArg())
3653 Check(Call.arg_size() >= FTy->getNumParams(),
3654 "Called function requires more parameters than were provided!", Call);
3655 else
3656 Check(Call.arg_size() == FTy->getNumParams(),
3657 "Incorrect number of arguments passed to called function!", Call);
3658
3659 // Verify that all arguments to the call match the function type.
3660 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3661 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3662 "Call parameter type does not match function signature!",
3663 Call.getArgOperand(i), FTy->getParamType(i), Call);
3664
3665 AttributeList Attrs = Call.getAttributes();
3666
3667 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3668 "Attribute after last parameter!", Call);
3669
3670 Function *Callee =
3672 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3673 if (IsIntrinsic)
3674 Check(Callee->getValueType() == FTy,
3675 "Intrinsic called with incompatible signature", Call);
3676
3677 // Verify if the calling convention of the callee is callable.
3679 "calling convention does not permit calls", Call);
3680
3681 // Disallow passing/returning values with alignment higher than we can
3682 // represent.
3683 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3684 // necessary.
3685 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3686 if (!Ty->isSized())
3687 return;
3688 Align ABIAlign = DL.getABITypeAlign(Ty);
3689 Check(ABIAlign.value() <= Value::MaximumAlignment,
3690 "Incorrect alignment of " + Message + " to called function!", Call);
3691 };
3692
3693 if (!IsIntrinsic) {
3694 VerifyTypeAlign(FTy->getReturnType(), "return type");
3695 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3696 Type *Ty = FTy->getParamType(i);
3697 VerifyTypeAlign(Ty, "argument passed");
3698 }
3699 }
3700
3701 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3702 // Don't allow speculatable on call sites, unless the underlying function
3703 // declaration is also speculatable.
3704 Check(Callee && Callee->isSpeculatable(),
3705 "speculatable attribute may not apply to call sites", Call);
3706 }
3707
3708 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3709 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3710 "preallocated as a call site attribute can only be on "
3711 "llvm.call.preallocated.arg");
3712 }
3713
3714 // Verify call attributes.
3715 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3716
3717 // Conservatively check the inalloca argument.
3718 // We have a bug if we can find that there is an underlying alloca without
3719 // inalloca.
3720 if (Call.hasInAllocaArgument()) {
3721 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3722 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3723 Check(AI->isUsedWithInAlloca(),
3724 "inalloca argument for call has mismatched alloca", AI, Call);
3725 }
3726
3727 // For each argument of the callsite, if it has the swifterror argument,
3728 // make sure the underlying alloca/parameter it comes from has a swifterror as
3729 // well.
3730 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3731 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3732 Value *SwiftErrorArg = Call.getArgOperand(i);
3733 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3734 Check(AI->isSwiftError(),
3735 "swifterror argument for call has mismatched alloca", AI, Call);
3736 continue;
3737 }
3738 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3739 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3740 SwiftErrorArg, Call);
3741 Check(ArgI->hasSwiftErrorAttr(),
3742 "swifterror argument for call has mismatched parameter", ArgI,
3743 Call);
3744 }
3745
3746 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3747 // Don't allow immarg on call sites, unless the underlying declaration
3748 // also has the matching immarg.
3749 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3750 "immarg may not apply only to call sites", Call.getArgOperand(i),
3751 Call);
3752 }
3753
3754 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3755 Value *ArgVal = Call.getArgOperand(i);
3756 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3757 "immarg operand has non-immediate parameter", ArgVal, Call);
3758
3759 // If the imm-arg is an integer and also has a range attached,
3760 // check if the given value is within the range.
3761 if (Call.paramHasAttr(i, Attribute::Range)) {
3762 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3763 const ConstantRange &CR =
3764 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3765 Check(CR.contains(CI->getValue()),
3766 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3767 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3768 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3769 Call);
3770 }
3771 }
3772 }
3773
3774 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3775 Value *ArgVal = Call.getArgOperand(i);
3776 bool hasOB =
3778 bool isMustTail = Call.isMustTailCall();
3779 Check(hasOB != isMustTail,
3780 "preallocated operand either requires a preallocated bundle or "
3781 "the call to be musttail (but not both)",
3782 ArgVal, Call);
3783 }
3784 }
3785
3786 if (FTy->isVarArg()) {
3787 // FIXME? is 'nest' even legal here?
3788 bool SawNest = false;
3789 bool SawReturned = false;
3790
3791 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3792 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3793 SawNest = true;
3794 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3795 SawReturned = true;
3796 }
3797
3798 // Check attributes on the varargs part.
3799 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3800 Type *Ty = Call.getArgOperand(Idx)->getType();
3801 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3802 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3803
3804 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3805 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3806 SawNest = true;
3807 }
3808
3809 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3810 Check(!SawReturned, "More than one parameter has attribute returned!",
3811 Call);
3812 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3813 "Incompatible argument and return types for 'returned' "
3814 "attribute",
3815 Call);
3816 SawReturned = true;
3817 }
3818
3819 // Statepoint intrinsic is vararg but the wrapped function may be not.
3820 // Allow sret here and check the wrapped function in verifyStatepoint.
3821 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
3822 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3823 "Attribute 'sret' cannot be used for vararg call arguments!",
3824 Call);
3825
3826 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3827 Check(Idx == Call.arg_size() - 1,
3828 "inalloca isn't on the last argument!", Call);
3829 }
3830 }
3831
3832 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3833 if (!IsIntrinsic) {
3834 for (Type *ParamTy : FTy->params()) {
3835 Check(!ParamTy->isMetadataTy(),
3836 "Function has metadata parameter but isn't an intrinsic", Call);
3837 Check(!ParamTy->isTokenLikeTy(),
3838 "Function has token parameter but isn't an intrinsic", Call);
3839 }
3840 }
3841
3842 // Verify that indirect calls don't return tokens.
3843 if (!Call.getCalledFunction()) {
3844 Check(!FTy->getReturnType()->isTokenLikeTy(),
3845 "Return type cannot be token for indirect call!");
3846 Check(!FTy->getReturnType()->isX86_AMXTy(),
3847 "Return type cannot be x86_amx for indirect call!");
3848 }
3849
3851 visitIntrinsicCall(ID, Call);
3852
3853 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3854 // most one "gc-transition", at most one "cfguardtarget", at most one
3855 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3856 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3857 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3858 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3859 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3860 FoundAttachedCallBundle = false;
3861 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3862 OperandBundleUse BU = Call.getOperandBundleAt(i);
3863 uint32_t Tag = BU.getTagID();
3864 if (Tag == LLVMContext::OB_deopt) {
3865 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3866 FoundDeoptBundle = true;
3867 } else if (Tag == LLVMContext::OB_gc_transition) {
3868 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3869 Call);
3870 FoundGCTransitionBundle = true;
3871 } else if (Tag == LLVMContext::OB_funclet) {
3872 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3873 FoundFuncletBundle = true;
3874 Check(BU.Inputs.size() == 1,
3875 "Expected exactly one funclet bundle operand", Call);
3876 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3877 "Funclet bundle operands should correspond to a FuncletPadInst",
3878 Call);
3879 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3880 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3881 Call);
3882 FoundCFGuardTargetBundle = true;
3883 Check(BU.Inputs.size() == 1,
3884 "Expected exactly one cfguardtarget bundle operand", Call);
3885 } else if (Tag == LLVMContext::OB_ptrauth) {
3886 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3887 FoundPtrauthBundle = true;
3888 Check(BU.Inputs.size() == 2,
3889 "Expected exactly two ptrauth bundle operands", Call);
3890 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3891 BU.Inputs[0]->getType()->isIntegerTy(32),
3892 "Ptrauth bundle key operand must be an i32 constant", Call);
3893 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3894 "Ptrauth bundle discriminator operand must be an i64", Call);
3895 } else if (Tag == LLVMContext::OB_kcfi) {
3896 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3897 FoundKCFIBundle = true;
3898 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3899 Call);
3900 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3901 BU.Inputs[0]->getType()->isIntegerTy(32),
3902 "Kcfi bundle operand must be an i32 constant", Call);
3903 } else if (Tag == LLVMContext::OB_preallocated) {
3904 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3905 Call);
3906 FoundPreallocatedBundle = true;
3907 Check(BU.Inputs.size() == 1,
3908 "Expected exactly one preallocated bundle operand", Call);
3909 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3910 Check(Input &&
3911 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3912 "\"preallocated\" argument must be a token from "
3913 "llvm.call.preallocated.setup",
3914 Call);
3915 } else if (Tag == LLVMContext::OB_gc_live) {
3916 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3917 FoundGCLiveBundle = true;
3919 Check(!FoundAttachedCallBundle,
3920 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3921 FoundAttachedCallBundle = true;
3922 verifyAttachedCallBundle(Call, BU);
3923 }
3924 }
3925
3926 // Verify that callee and callsite agree on whether to use pointer auth.
3927 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3928 "Direct call cannot have a ptrauth bundle", Call);
3929
3930 // Verify that each inlinable callsite of a debug-info-bearing function in a
3931 // debug-info-bearing function has a debug location attached to it. Failure to
3932 // do so causes assertion failures when the inliner sets up inline scope info
3933 // (Interposable functions are not inlinable, neither are functions without
3934 // definitions.)
3940 "inlinable function call in a function with "
3941 "debug info must have a !dbg location",
3942 Call);
3943
3944 if (Call.isInlineAsm())
3945 verifyInlineAsmCall(Call);
3946
3947 ConvergenceVerifyHelper.visit(Call);
3948
3949 visitInstruction(Call);
3950}
3951
3952void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
3953 StringRef Context) {
3954 Check(!Attrs.contains(Attribute::InAlloca),
3955 Twine("inalloca attribute not allowed in ") + Context);
3956 Check(!Attrs.contains(Attribute::InReg),
3957 Twine("inreg attribute not allowed in ") + Context);
3958 Check(!Attrs.contains(Attribute::SwiftError),
3959 Twine("swifterror attribute not allowed in ") + Context);
3960 Check(!Attrs.contains(Attribute::Preallocated),
3961 Twine("preallocated attribute not allowed in ") + Context);
3962 Check(!Attrs.contains(Attribute::ByRef),
3963 Twine("byref attribute not allowed in ") + Context);
3964}
3965
3966/// Two types are "congruent" if they are identical, or if they are both pointer
3967/// types with different pointee types and the same address space.
3968static bool isTypeCongruent(Type *L, Type *R) {
3969 if (L == R)
3970 return true;
3973 if (!PL || !PR)
3974 return false;
3975 return PL->getAddressSpace() == PR->getAddressSpace();
3976}
3977
3978static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
3979 static const Attribute::AttrKind ABIAttrs[] = {
3980 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
3981 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
3982 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
3983 Attribute::ByRef};
3984 AttrBuilder Copy(C);
3985 for (auto AK : ABIAttrs) {
3986 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
3987 if (Attr.isValid())
3988 Copy.addAttribute(Attr);
3989 }
3990
3991 // `align` is ABI-affecting only in combination with `byval` or `byref`.
3992 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
3993 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
3994 Attrs.hasParamAttr(I, Attribute::ByRef)))
3995 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
3996 return Copy;
3997}
3998
3999void Verifier::verifyMustTailCall(CallInst &CI) {
4000 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4001
4002 Function *F = CI.getParent()->getParent();
4003 FunctionType *CallerTy = F->getFunctionType();
4004 FunctionType *CalleeTy = CI.getFunctionType();
4005 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4006 "cannot guarantee tail call due to mismatched varargs", &CI);
4007 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4008 "cannot guarantee tail call due to mismatched return types", &CI);
4009
4010 // - The calling conventions of the caller and callee must match.
4011 Check(F->getCallingConv() == CI.getCallingConv(),
4012 "cannot guarantee tail call due to mismatched calling conv", &CI);
4013
4014 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4015 // or a pointer bitcast followed by a ret instruction.
4016 // - The ret instruction must return the (possibly bitcasted) value
4017 // produced by the call or void.
4018 Value *RetVal = &CI;
4020
4021 // Handle the optional bitcast.
4022 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4023 Check(BI->getOperand(0) == RetVal,
4024 "bitcast following musttail call must use the call", BI);
4025 RetVal = BI;
4026 Next = BI->getNextNode();
4027 }
4028
4029 // Check the return.
4030 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4031 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4032 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4033 isa<UndefValue>(Ret->getReturnValue()),
4034 "musttail call result must be returned", Ret);
4035
4036 AttributeList CallerAttrs = F->getAttributes();
4037 AttributeList CalleeAttrs = CI.getAttributes();
4038 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4039 CI.getCallingConv() == CallingConv::Tail) {
4040 StringRef CCName =
4041 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4042
4043 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4044 // are allowed in swifttailcc call
4045 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4046 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4047 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4048 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4049 }
4050 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4051 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4052 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4053 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4054 }
4055 // - Varargs functions are not allowed
4056 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4057 " tail call for varargs function");
4058 return;
4059 }
4060
4061 // - The caller and callee prototypes must match. Pointer types of
4062 // parameters or return types may differ in pointee type, but not
4063 // address space.
4064 if (!CI.getIntrinsicID()) {
4065 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4066 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4067 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4068 Check(
4069 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4070 "cannot guarantee tail call due to mismatched parameter types", &CI);
4071 }
4072 }
4073
4074 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4075 // returned, preallocated, and inalloca, must match.
4076 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4077 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4078 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4079 Check(CallerABIAttrs == CalleeABIAttrs,
4080 "cannot guarantee tail call due to mismatched ABI impacting "
4081 "function attributes",
4082 &CI, CI.getOperand(I));
4083 }
4084}
4085
4086void Verifier::visitCallInst(CallInst &CI) {
4087 visitCallBase(CI);
4088
4089 if (CI.isMustTailCall())
4090 verifyMustTailCall(CI);
4091}
4092
4093void Verifier::visitInvokeInst(InvokeInst &II) {
4094 visitCallBase(II);
4095
4096 // Verify that the first non-PHI instruction of the unwind destination is an
4097 // exception handling instruction.
4098 Check(
4099 II.getUnwindDest()->isEHPad(),
4100 "The unwind destination does not have an exception handling instruction!",
4101 &II);
4102
4103 visitTerminator(II);
4104}
4105
4106/// visitUnaryOperator - Check the argument to the unary operator.
4107///
4108void Verifier::visitUnaryOperator(UnaryOperator &U) {
4109 Check(U.getType() == U.getOperand(0)->getType(),
4110 "Unary operators must have same type for"
4111 "operands and result!",
4112 &U);
4113
4114 switch (U.getOpcode()) {
4115 // Check that floating-point arithmetic operators are only used with
4116 // floating-point operands.
4117 case Instruction::FNeg:
4118 Check(U.getType()->isFPOrFPVectorTy(),
4119 "FNeg operator only works with float types!", &U);
4120 break;
4121 default:
4122 llvm_unreachable("Unknown UnaryOperator opcode!");
4123 }
4124
4125 visitInstruction(U);
4126}
4127
4128/// visitBinaryOperator - Check that both arguments to the binary operator are
4129/// of the same type!
4130///
4131void Verifier::visitBinaryOperator(BinaryOperator &B) {
4132 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4133 "Both operands to a binary operator are not of the same type!", &B);
4134
4135 switch (B.getOpcode()) {
4136 // Check that integer arithmetic operators are only used with
4137 // integral operands.
4138 case Instruction::Add:
4139 case Instruction::Sub:
4140 case Instruction::Mul:
4141 case Instruction::SDiv:
4142 case Instruction::UDiv:
4143 case Instruction::SRem:
4144 case Instruction::URem:
4145 Check(B.getType()->isIntOrIntVectorTy(),
4146 "Integer arithmetic operators only work with integral types!", &B);
4147 Check(B.getType() == B.getOperand(0)->getType(),
4148 "Integer arithmetic operators must have same type "
4149 "for operands and result!",
4150 &B);
4151 break;
4152 // Check that floating-point arithmetic operators are only used with
4153 // floating-point operands.
4154 case Instruction::FAdd:
4155 case Instruction::FSub:
4156 case Instruction::FMul:
4157 case Instruction::FDiv:
4158 case Instruction::FRem:
4159 Check(B.getType()->isFPOrFPVectorTy(),
4160 "Floating-point arithmetic operators only work with "
4161 "floating-point types!",
4162 &B);
4163 Check(B.getType() == B.getOperand(0)->getType(),
4164 "Floating-point arithmetic operators must have same type "
4165 "for operands and result!",
4166 &B);
4167 break;
4168 // Check that logical operators are only used with integral operands.
4169 case Instruction::And:
4170 case Instruction::Or:
4171 case Instruction::Xor:
4172 Check(B.getType()->isIntOrIntVectorTy(),
4173 "Logical operators only work with integral types!", &B);
4174 Check(B.getType() == B.getOperand(0)->getType(),
4175 "Logical operators must have same type for operands and result!", &B);
4176 break;
4177 case Instruction::Shl:
4178 case Instruction::LShr:
4179 case Instruction::AShr:
4180 Check(B.getType()->isIntOrIntVectorTy(),
4181 "Shifts only work with integral types!", &B);
4182 Check(B.getType() == B.getOperand(0)->getType(),
4183 "Shift return type must be same as operands!", &B);
4184 break;
4185 default:
4186 llvm_unreachable("Unknown BinaryOperator opcode!");
4187 }
4188
4189 visitInstruction(B);
4190}
4191
4192void Verifier::visitICmpInst(ICmpInst &IC) {
4193 // Check that the operands are the same type
4194 Type *Op0Ty = IC.getOperand(0)->getType();
4195 Type *Op1Ty = IC.getOperand(1)->getType();
4196 Check(Op0Ty == Op1Ty,
4197 "Both operands to ICmp instruction are not of the same type!", &IC);
4198 // Check that the operands are the right type
4199 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4200 "Invalid operand types for ICmp instruction", &IC);
4201 // Check that the predicate is valid.
4202 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4203
4204 visitInstruction(IC);
4205}
4206
4207void Verifier::visitFCmpInst(FCmpInst &FC) {
4208 // Check that the operands are the same type
4209 Type *Op0Ty = FC.getOperand(0)->getType();
4210 Type *Op1Ty = FC.getOperand(1)->getType();
4211 Check(Op0Ty == Op1Ty,
4212 "Both operands to FCmp instruction are not of the same type!", &FC);
4213 // Check that the operands are the right type
4214 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4215 &FC);
4216 // Check that the predicate is valid.
4217 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4218
4219 visitInstruction(FC);
4220}
4221
4222void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4224 "Invalid extractelement operands!", &EI);
4225 visitInstruction(EI);
4226}
4227
4228void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4229 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4230 IE.getOperand(2)),
4231 "Invalid insertelement operands!", &IE);
4232 visitInstruction(IE);
4233}
4234
4235void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4237 SV.getShuffleMask()),
4238 "Invalid shufflevector operands!", &SV);
4239 visitInstruction(SV);
4240}
4241
4242void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4243 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4244
4245 Check(isa<PointerType>(TargetTy),
4246 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4247 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4248
4249 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4250 Check(!STy->isScalableTy(),
4251 "getelementptr cannot target structure that contains scalable vector"
4252 "type",
4253 &GEP);
4254 }
4255
4256 SmallVector<Value *, 16> Idxs(GEP.indices());
4257 Check(
4258 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4259 "GEP indexes must be integers", &GEP);
4260 Type *ElTy =
4261 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4262 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4263
4264 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4265
4266 Check(PtrTy && GEP.getResultElementType() == ElTy,
4267 "GEP is not of right type for indices!", &GEP, ElTy);
4268
4269 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4270 // Additional checks for vector GEPs.
4271 ElementCount GEPWidth = GEPVTy->getElementCount();
4272 if (GEP.getPointerOperandType()->isVectorTy())
4273 Check(
4274 GEPWidth ==
4275 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4276 "Vector GEP result width doesn't match operand's", &GEP);
4277 for (Value *Idx : Idxs) {
4278 Type *IndexTy = Idx->getType();
4279 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4280 ElementCount IndexWidth = IndexVTy->getElementCount();
4281 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4282 }
4283 Check(IndexTy->isIntOrIntVectorTy(),
4284 "All GEP indices should be of integer type");
4285 }
4286 }
4287
4288 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4289 "GEP address space doesn't match type", &GEP);
4290
4291 visitInstruction(GEP);
4292}
4293
4294static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4295 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4296}
4297
4298/// Verify !range and !absolute_symbol metadata. These have the same
4299/// restrictions, except !absolute_symbol allows the full set.
4300void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4301 Type *Ty, RangeLikeMetadataKind Kind) {
4302 unsigned NumOperands = Range->getNumOperands();
4303 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4304 unsigned NumRanges = NumOperands / 2;
4305 Check(NumRanges >= 1, "It should have at least one range!", Range);
4306
4307 ConstantRange LastRange(1, true); // Dummy initial value
4308 for (unsigned i = 0; i < NumRanges; ++i) {
4309 ConstantInt *Low =
4310 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4311 Check(Low, "The lower limit must be an integer!", Low);
4312 ConstantInt *High =
4313 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4314 Check(High, "The upper limit must be an integer!", High);
4315
4316 Check(High->getType() == Low->getType(), "Range pair types must match!",
4317 &I);
4318
4319 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4320 Check(High->getType()->isIntegerTy(32),
4321 "noalias.addrspace type must be i32!", &I);
4322 } else {
4323 Check(High->getType() == Ty->getScalarType(),
4324 "Range types must match instruction type!", &I);
4325 }
4326
4327 APInt HighV = High->getValue();
4328 APInt LowV = Low->getValue();
4329
4330 // ConstantRange asserts if the ranges are the same except for the min/max
4331 // value. Leave the cases it tolerates for the empty range error below.
4332 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4333 "The upper and lower limits cannot be the same value", &I);
4334
4335 ConstantRange CurRange(LowV, HighV);
4336 Check(!CurRange.isEmptySet() &&
4337 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4338 !CurRange.isFullSet()),
4339 "Range must not be empty!", Range);
4340 if (i != 0) {
4341 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4342 "Intervals are overlapping", Range);
4343 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4344 Range);
4345 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4346 Range);
4347 }
4348 LastRange = ConstantRange(LowV, HighV);
4349 }
4350 if (NumRanges > 2) {
4351 APInt FirstLow =
4352 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4353 APInt FirstHigh =
4354 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4355 ConstantRange FirstRange(FirstLow, FirstHigh);
4356 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4357 "Intervals are overlapping", Range);
4358 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4359 Range);
4360 }
4361}
4362
4363void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4364 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4365 "precondition violation");
4366 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4367}
4368
4369void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4370 Type *Ty) {
4371 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4372 "precondition violation");
4373 verifyRangeLikeMetadata(I, Range, Ty,
4374 RangeLikeMetadataKind::NoaliasAddrspace);
4375}
4376
4377void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4378 unsigned Size = DL.getTypeSizeInBits(Ty);
4379 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4380 Check(!(Size & (Size - 1)),
4381 "atomic memory access' operand must have a power-of-two size", Ty, I);
4382}
4383
4384void Verifier::visitLoadInst(LoadInst &LI) {
4386 Check(PTy, "Load operand must be a pointer.", &LI);
4387 Type *ElTy = LI.getType();
4388 if (MaybeAlign A = LI.getAlign()) {
4389 Check(A->value() <= Value::MaximumAlignment,
4390 "huge alignment values are unsupported", &LI);
4391 }
4392 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4393 if (LI.isAtomic()) {
4394 Check(LI.getOrdering() != AtomicOrdering::Release &&
4395 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4396 "Load cannot have Release ordering", &LI);
4397 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4398 "atomic load operand must have integer, pointer, or floating point "
4399 "type!",
4400 ElTy, &LI);
4401 checkAtomicMemAccessSize(ElTy, &LI);
4402 } else {
4404 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4405 }
4406
4407 visitInstruction(LI);
4408}
4409
4410void Verifier::visitStoreInst(StoreInst &SI) {
4411 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4412 Check(PTy, "Store operand must be a pointer.", &SI);
4413 Type *ElTy = SI.getOperand(0)->getType();
4414 if (MaybeAlign A = SI.getAlign()) {
4415 Check(A->value() <= Value::MaximumAlignment,
4416 "huge alignment values are unsupported", &SI);
4417 }
4418 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4419 if (SI.isAtomic()) {
4420 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4421 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4422 "Store cannot have Acquire ordering", &SI);
4423 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4424 "atomic store operand must have integer, pointer, or floating point "
4425 "type!",
4426 ElTy, &SI);
4427 checkAtomicMemAccessSize(ElTy, &SI);
4428 } else {
4429 Check(SI.getSyncScopeID() == SyncScope::System,
4430 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4431 }
4432 visitInstruction(SI);
4433}
4434
4435/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4436void Verifier::verifySwiftErrorCall(CallBase &Call,
4437 const Value *SwiftErrorVal) {
4438 for (const auto &I : llvm::enumerate(Call.args())) {
4439 if (I.value() == SwiftErrorVal) {
4440 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4441 "swifterror value when used in a callsite should be marked "
4442 "with swifterror attribute",
4443 SwiftErrorVal, Call);
4444 }
4445 }
4446}
4447
4448void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4449 // Check that swifterror value is only used by loads, stores, or as
4450 // a swifterror argument.
4451 for (const User *U : SwiftErrorVal->users()) {
4453 isa<InvokeInst>(U),
4454 "swifterror value can only be loaded and stored from, or "
4455 "as a swifterror argument!",
4456 SwiftErrorVal, U);
4457 // If it is used by a store, check it is the second operand.
4458 if (auto StoreI = dyn_cast<StoreInst>(U))
4459 Check(StoreI->getOperand(1) == SwiftErrorVal,
4460 "swifterror value should be the second operand when used "
4461 "by stores",
4462 SwiftErrorVal, U);
4463 if (auto *Call = dyn_cast<CallBase>(U))
4464 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4465 }
4466}
4467
4468void Verifier::visitAllocaInst(AllocaInst &AI) {
4469 Type *Ty = AI.getAllocatedType();
4470 SmallPtrSet<Type*, 4> Visited;
4471 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4472 // Check if it's a target extension type that disallows being used on the
4473 // stack.
4475 "Alloca has illegal target extension type", &AI);
4477 "Alloca array size must have integer type", &AI);
4478 if (MaybeAlign A = AI.getAlign()) {
4479 Check(A->value() <= Value::MaximumAlignment,
4480 "huge alignment values are unsupported", &AI);
4481 }
4482
4483 if (AI.isSwiftError()) {
4484 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4486 "swifterror alloca must not be array allocation", &AI);
4487 verifySwiftErrorValue(&AI);
4488 }
4489
4490 if (TT.isAMDGPU()) {
4492 "alloca on amdgpu must be in addrspace(5)", &AI);
4493 }
4494
4495 visitInstruction(AI);
4496}
4497
4498void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4499 Type *ElTy = CXI.getOperand(1)->getType();
4500 Check(ElTy->isIntOrPtrTy(),
4501 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4502 checkAtomicMemAccessSize(ElTy, &CXI);
4503 visitInstruction(CXI);
4504}
4505
4506void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4507 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4508 "atomicrmw instructions cannot be unordered.", &RMWI);
4509 auto Op = RMWI.getOperation();
4510 Type *ElTy = RMWI.getOperand(1)->getType();
4511 if (Op == AtomicRMWInst::Xchg) {
4512 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4513 ElTy->isPointerTy(),
4514 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4515 " operand must have integer or floating point type!",
4516 &RMWI, ElTy);
4517 } else if (AtomicRMWInst::isFPOperation(Op)) {
4519 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4520 " operand must have floating-point or fixed vector of floating-point "
4521 "type!",
4522 &RMWI, ElTy);
4523 } else {
4524 Check(ElTy->isIntegerTy(),
4525 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4526 " operand must have integer type!",
4527 &RMWI, ElTy);
4528 }
4529 checkAtomicMemAccessSize(ElTy, &RMWI);
4531 "Invalid binary operation!", &RMWI);
4532 visitInstruction(RMWI);
4533}
4534
4535void Verifier::visitFenceInst(FenceInst &FI) {
4536 const AtomicOrdering Ordering = FI.getOrdering();
4537 Check(Ordering == AtomicOrdering::Acquire ||
4538 Ordering == AtomicOrdering::Release ||
4539 Ordering == AtomicOrdering::AcquireRelease ||
4540 Ordering == AtomicOrdering::SequentiallyConsistent,
4541 "fence instructions may only have acquire, release, acq_rel, or "
4542 "seq_cst ordering.",
4543 &FI);
4544 visitInstruction(FI);
4545}
4546
4547void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4549 EVI.getIndices()) == EVI.getType(),
4550 "Invalid ExtractValueInst operands!", &EVI);
4551
4552 visitInstruction(EVI);
4553}
4554
4555void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4557 IVI.getIndices()) ==
4558 IVI.getOperand(1)->getType(),
4559 "Invalid InsertValueInst operands!", &IVI);
4560
4561 visitInstruction(IVI);
4562}
4563
4564static Value *getParentPad(Value *EHPad) {
4565 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4566 return FPI->getParentPad();
4567
4568 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4569}
4570
4571void Verifier::visitEHPadPredecessors(Instruction &I) {
4572 assert(I.isEHPad());
4573
4574 BasicBlock *BB = I.getParent();
4575 Function *F = BB->getParent();
4576
4577 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4578
4579 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4580 // The landingpad instruction defines its parent as a landing pad block. The
4581 // landing pad block may be branched to only by the unwind edge of an
4582 // invoke.
4583 for (BasicBlock *PredBB : predecessors(BB)) {
4584 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4585 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4586 "Block containing LandingPadInst must be jumped to "
4587 "only by the unwind edge of an invoke.",
4588 LPI);
4589 }
4590 return;
4591 }
4592 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4593 if (!pred_empty(BB))
4594 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4595 "Block containg CatchPadInst must be jumped to "
4596 "only by its catchswitch.",
4597 CPI);
4598 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4599 "Catchswitch cannot unwind to one of its catchpads",
4600 CPI->getCatchSwitch(), CPI);
4601 return;
4602 }
4603
4604 // Verify that each pred has a legal terminator with a legal to/from EH
4605 // pad relationship.
4606 Instruction *ToPad = &I;
4607 Value *ToPadParent = getParentPad(ToPad);
4608 for (BasicBlock *PredBB : predecessors(BB)) {
4609 Instruction *TI = PredBB->getTerminator();
4610 Value *FromPad;
4611 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4612 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4613 "EH pad must be jumped to via an unwind edge", ToPad, II);
4614 auto *CalledFn =
4615 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4616 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4617 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4618 continue;
4619 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4620 FromPad = Bundle->Inputs[0];
4621 else
4622 FromPad = ConstantTokenNone::get(II->getContext());
4623 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4624 FromPad = CRI->getOperand(0);
4625 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4626 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4627 FromPad = CSI;
4628 } else {
4629 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4630 }
4631
4632 // The edge may exit from zero or more nested pads.
4633 SmallPtrSet<Value *, 8> Seen;
4634 for (;; FromPad = getParentPad(FromPad)) {
4635 Check(FromPad != ToPad,
4636 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4637 if (FromPad == ToPadParent) {
4638 // This is a legal unwind edge.
4639 break;
4640 }
4641 Check(!isa<ConstantTokenNone>(FromPad),
4642 "A single unwind edge may only enter one EH pad", TI);
4643 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4644 FromPad);
4645
4646 // This will be diagnosed on the corresponding instruction already. We
4647 // need the extra check here to make sure getParentPad() works.
4648 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4649 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4650 }
4651 }
4652}
4653
4654void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4655 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4656 // isn't a cleanup.
4657 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4658 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4659
4660 visitEHPadPredecessors(LPI);
4661
4662 if (!LandingPadResultTy)
4663 LandingPadResultTy = LPI.getType();
4664 else
4665 Check(LandingPadResultTy == LPI.getType(),
4666 "The landingpad instruction should have a consistent result type "
4667 "inside a function.",
4668 &LPI);
4669
4670 Function *F = LPI.getParent()->getParent();
4671 Check(F->hasPersonalityFn(),
4672 "LandingPadInst needs to be in a function with a personality.", &LPI);
4673
4674 // The landingpad instruction must be the first non-PHI instruction in the
4675 // block.
4676 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4677 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4678
4679 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4680 Constant *Clause = LPI.getClause(i);
4681 if (LPI.isCatch(i)) {
4682 Check(isa<PointerType>(Clause->getType()),
4683 "Catch operand does not have pointer type!", &LPI);
4684 } else {
4685 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4687 "Filter operand is not an array of constants!", &LPI);
4688 }
4689 }
4690
4691 visitInstruction(LPI);
4692}
4693
4694void Verifier::visitResumeInst(ResumeInst &RI) {
4696 "ResumeInst needs to be in a function with a personality.", &RI);
4697
4698 if (!LandingPadResultTy)
4699 LandingPadResultTy = RI.getValue()->getType();
4700 else
4701 Check(LandingPadResultTy == RI.getValue()->getType(),
4702 "The resume instruction should have a consistent result type "
4703 "inside a function.",
4704 &RI);
4705
4706 visitTerminator(RI);
4707}
4708
4709void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4710 BasicBlock *BB = CPI.getParent();
4711
4712 Function *F = BB->getParent();
4713 Check(F->hasPersonalityFn(),
4714 "CatchPadInst needs to be in a function with a personality.", &CPI);
4715
4717 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4718 CPI.getParentPad());
4719
4720 // The catchpad instruction must be the first non-PHI instruction in the
4721 // block.
4722 Check(&*BB->getFirstNonPHIIt() == &CPI,
4723 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4724
4725 visitEHPadPredecessors(CPI);
4726 visitFuncletPadInst(CPI);
4727}
4728
4729void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4730 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4731 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4732 CatchReturn.getOperand(0));
4733
4734 visitTerminator(CatchReturn);
4735}
4736
4737void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4738 BasicBlock *BB = CPI.getParent();
4739
4740 Function *F = BB->getParent();
4741 Check(F->hasPersonalityFn(),
4742 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4743
4744 // The cleanuppad instruction must be the first non-PHI instruction in the
4745 // block.
4746 Check(&*BB->getFirstNonPHIIt() == &CPI,
4747 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4748
4749 auto *ParentPad = CPI.getParentPad();
4750 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4751 "CleanupPadInst has an invalid parent.", &CPI);
4752
4753 visitEHPadPredecessors(CPI);
4754 visitFuncletPadInst(CPI);
4755}
4756
4757void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4758 User *FirstUser = nullptr;
4759 Value *FirstUnwindPad = nullptr;
4760 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4761 SmallPtrSet<FuncletPadInst *, 8> Seen;
4762
4763 while (!Worklist.empty()) {
4764 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4765 Check(Seen.insert(CurrentPad).second,
4766 "FuncletPadInst must not be nested within itself", CurrentPad);
4767 Value *UnresolvedAncestorPad = nullptr;
4768 for (User *U : CurrentPad->users()) {
4769 BasicBlock *UnwindDest;
4770 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4771 UnwindDest = CRI->getUnwindDest();
4772 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4773 // We allow catchswitch unwind to caller to nest
4774 // within an outer pad that unwinds somewhere else,
4775 // because catchswitch doesn't have a nounwind variant.
4776 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4777 if (CSI->unwindsToCaller())
4778 continue;
4779 UnwindDest = CSI->getUnwindDest();
4780 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4781 UnwindDest = II->getUnwindDest();
4782 } else if (isa<CallInst>(U)) {
4783 // Calls which don't unwind may be found inside funclet
4784 // pads that unwind somewhere else. We don't *require*
4785 // such calls to be annotated nounwind.
4786 continue;
4787 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4788 // The unwind dest for a cleanup can only be found by
4789 // recursive search. Add it to the worklist, and we'll
4790 // search for its first use that determines where it unwinds.
4791 Worklist.push_back(CPI);
4792 continue;
4793 } else {
4794 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4795 continue;
4796 }
4797
4798 Value *UnwindPad;
4799 bool ExitsFPI;
4800 if (UnwindDest) {
4801 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4802 if (!cast<Instruction>(UnwindPad)->isEHPad())
4803 continue;
4804 Value *UnwindParent = getParentPad(UnwindPad);
4805 // Ignore unwind edges that don't exit CurrentPad.
4806 if (UnwindParent == CurrentPad)
4807 continue;
4808 // Determine whether the original funclet pad is exited,
4809 // and if we are scanning nested pads determine how many
4810 // of them are exited so we can stop searching their
4811 // children.
4812 Value *ExitedPad = CurrentPad;
4813 ExitsFPI = false;
4814 do {
4815 if (ExitedPad == &FPI) {
4816 ExitsFPI = true;
4817 // Now we can resolve any ancestors of CurrentPad up to
4818 // FPI, but not including FPI since we need to make sure
4819 // to check all direct users of FPI for consistency.
4820 UnresolvedAncestorPad = &FPI;
4821 break;
4822 }
4823 Value *ExitedParent = getParentPad(ExitedPad);
4824 if (ExitedParent == UnwindParent) {
4825 // ExitedPad is the ancestor-most pad which this unwind
4826 // edge exits, so we can resolve up to it, meaning that
4827 // ExitedParent is the first ancestor still unresolved.
4828 UnresolvedAncestorPad = ExitedParent;
4829 break;
4830 }
4831 ExitedPad = ExitedParent;
4832 } while (!isa<ConstantTokenNone>(ExitedPad));
4833 } else {
4834 // Unwinding to caller exits all pads.
4835 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4836 ExitsFPI = true;
4837 UnresolvedAncestorPad = &FPI;
4838 }
4839
4840 if (ExitsFPI) {
4841 // This unwind edge exits FPI. Make sure it agrees with other
4842 // such edges.
4843 if (FirstUser) {
4844 Check(UnwindPad == FirstUnwindPad,
4845 "Unwind edges out of a funclet "
4846 "pad must have the same unwind "
4847 "dest",
4848 &FPI, U, FirstUser);
4849 } else {
4850 FirstUser = U;
4851 FirstUnwindPad = UnwindPad;
4852 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4853 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4854 getParentPad(UnwindPad) == getParentPad(&FPI))
4855 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4856 }
4857 }
4858 // Make sure we visit all uses of FPI, but for nested pads stop as
4859 // soon as we know where they unwind to.
4860 if (CurrentPad != &FPI)
4861 break;
4862 }
4863 if (UnresolvedAncestorPad) {
4864 if (CurrentPad == UnresolvedAncestorPad) {
4865 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4866 // we've found an unwind edge that exits it, because we need to verify
4867 // all direct uses of FPI.
4868 assert(CurrentPad == &FPI);
4869 continue;
4870 }
4871 // Pop off the worklist any nested pads that we've found an unwind
4872 // destination for. The pads on the worklist are the uncles,
4873 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4874 // for all ancestors of CurrentPad up to but not including
4875 // UnresolvedAncestorPad.
4876 Value *ResolvedPad = CurrentPad;
4877 while (!Worklist.empty()) {
4878 Value *UnclePad = Worklist.back();
4879 Value *AncestorPad = getParentPad(UnclePad);
4880 // Walk ResolvedPad up the ancestor list until we either find the
4881 // uncle's parent or the last resolved ancestor.
4882 while (ResolvedPad != AncestorPad) {
4883 Value *ResolvedParent = getParentPad(ResolvedPad);
4884 if (ResolvedParent == UnresolvedAncestorPad) {
4885 break;
4886 }
4887 ResolvedPad = ResolvedParent;
4888 }
4889 // If the resolved ancestor search didn't find the uncle's parent,
4890 // then the uncle is not yet resolved.
4891 if (ResolvedPad != AncestorPad)
4892 break;
4893 // This uncle is resolved, so pop it from the worklist.
4894 Worklist.pop_back();
4895 }
4896 }
4897 }
4898
4899 if (FirstUnwindPad) {
4900 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4901 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4902 Value *SwitchUnwindPad;
4903 if (SwitchUnwindDest)
4904 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
4905 else
4906 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4907 Check(SwitchUnwindPad == FirstUnwindPad,
4908 "Unwind edges out of a catch must have the same unwind dest as "
4909 "the parent catchswitch",
4910 &FPI, FirstUser, CatchSwitch);
4911 }
4912 }
4913
4914 visitInstruction(FPI);
4915}
4916
4917void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4918 BasicBlock *BB = CatchSwitch.getParent();
4919
4920 Function *F = BB->getParent();
4921 Check(F->hasPersonalityFn(),
4922 "CatchSwitchInst needs to be in a function with a personality.",
4923 &CatchSwitch);
4924
4925 // The catchswitch instruction must be the first non-PHI instruction in the
4926 // block.
4927 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
4928 "CatchSwitchInst not the first non-PHI instruction in the block.",
4929 &CatchSwitch);
4930
4931 auto *ParentPad = CatchSwitch.getParentPad();
4932 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4933 "CatchSwitchInst has an invalid parent.", ParentPad);
4934
4935 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4936 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
4937 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4938 "CatchSwitchInst must unwind to an EH block which is not a "
4939 "landingpad.",
4940 &CatchSwitch);
4941
4942 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4943 if (getParentPad(&*I) == ParentPad)
4944 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4945 }
4946
4947 Check(CatchSwitch.getNumHandlers() != 0,
4948 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4949
4950 for (BasicBlock *Handler : CatchSwitch.handlers()) {
4951 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
4952 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4953 }
4954
4955 visitEHPadPredecessors(CatchSwitch);
4956 visitTerminator(CatchSwitch);
4957}
4958
4959void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4961 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4962 CRI.getOperand(0));
4963
4964 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4965 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
4966 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4967 "CleanupReturnInst must unwind to an EH block which is not a "
4968 "landingpad.",
4969 &CRI);
4970 }
4971
4972 visitTerminator(CRI);
4973}
4974
4975void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4976 Instruction *Op = cast<Instruction>(I.getOperand(i));
4977 // If the we have an invalid invoke, don't try to compute the dominance.
4978 // We already reject it in the invoke specific checks and the dominance
4979 // computation doesn't handle multiple edges.
4980 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4981 if (II->getNormalDest() == II->getUnwindDest())
4982 return;
4983 }
4984
4985 // Quick check whether the def has already been encountered in the same block.
4986 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4987 // uses are defined to happen on the incoming edge, not at the instruction.
4988 //
4989 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4990 // wrapping an SSA value, assert that we've already encountered it. See
4991 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
4992 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
4993 return;
4994
4995 const Use &U = I.getOperandUse(i);
4996 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
4997}
4998
4999void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5000 Check(I.getType()->isPointerTy(),
5001 "dereferenceable, dereferenceable_or_null "
5002 "apply only to pointer types",
5003 &I);
5005 "dereferenceable, dereferenceable_or_null apply only to load"
5006 " and inttoptr instructions, use attributes for calls or invokes",
5007 &I);
5008 Check(MD->getNumOperands() == 1,
5009 "dereferenceable, dereferenceable_or_null "
5010 "take one operand!",
5011 &I);
5012 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5013 Check(CI && CI->getType()->isIntegerTy(64),
5014 "dereferenceable, "
5015 "dereferenceable_or_null metadata value must be an i64!",
5016 &I);
5017}
5018
5019void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5020 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5021 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5022 &I);
5023 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5024}
5025
5026void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5027 auto GetBranchingTerminatorNumOperands = [&]() {
5028 unsigned ExpectedNumOperands = 0;
5029 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5030 ExpectedNumOperands = BI->getNumSuccessors();
5031 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5032 ExpectedNumOperands = SI->getNumSuccessors();
5033 else if (isa<CallInst>(&I))
5034 ExpectedNumOperands = 1;
5035 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5036 ExpectedNumOperands = IBI->getNumDestinations();
5037 else if (isa<SelectInst>(&I))
5038 ExpectedNumOperands = 2;
5039 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5040 ExpectedNumOperands = CI->getNumSuccessors();
5041 return ExpectedNumOperands;
5042 };
5043 Check(MD->getNumOperands() >= 1,
5044 "!prof annotations should have at least 1 operand", MD);
5045 // Check first operand.
5046 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5048 "expected string with name of the !prof annotation", MD);
5049 MDString *MDS = cast<MDString>(MD->getOperand(0));
5050 StringRef ProfName = MDS->getString();
5051
5053 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5054 "'unknown' !prof should only appear on instructions on which "
5055 "'branch_weights' would",
5056 MD);
5057 Check(MD->getNumOperands() == 1,
5058 "'unknown' !prof should have no additional operands", MD);
5059 return;
5060 }
5061
5062 Check(MD->getNumOperands() >= 2,
5063 "!prof annotations should have no less than 2 operands", MD);
5064
5065 // Check consistency of !prof branch_weights metadata.
5066 if (ProfName == MDProfLabels::BranchWeights) {
5067 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5068 if (isa<InvokeInst>(&I)) {
5069 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5070 "Wrong number of InvokeInst branch_weights operands", MD);
5071 } else {
5072 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5073 if (ExpectedNumOperands == 0)
5074 CheckFailed("!prof branch_weights are not allowed for this instruction",
5075 MD);
5076
5077 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5078 MD);
5079 }
5080 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5081 ++i) {
5082 auto &MDO = MD->getOperand(i);
5083 Check(MDO, "second operand should not be null", MD);
5085 "!prof brunch_weights operand is not a const int");
5086 }
5087 } else if (ProfName == MDProfLabels::ValueProfile) {
5088 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5089 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5090 Check(KindInt, "VP !prof missing kind argument", MD);
5091
5092 auto Kind = KindInt->getZExtValue();
5093 Check(Kind >= InstrProfValueKind::IPVK_First &&
5094 Kind <= InstrProfValueKind::IPVK_Last,
5095 "Invalid VP !prof kind", MD);
5096 Check(MD->getNumOperands() % 2 == 1,
5097 "VP !prof should have an even number "
5098 "of arguments after 'VP'",
5099 MD);
5100 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5101 Kind == InstrProfValueKind::IPVK_MemOPSize)
5103 "VP !prof indirect call or memop size expected to be applied to "
5104 "CallBase instructions only",
5105 MD);
5106 } else {
5107 CheckFailed("expected either branch_weights or VP profile name", MD);
5108 }
5109}
5110
5111void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5112 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5113 // DIAssignID metadata must be attached to either an alloca or some form of
5114 // store/memory-writing instruction.
5115 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5116 // possible store intrinsics.
5117 bool ExpectedInstTy =
5119 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5120 I, MD);
5121 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5122 // only be found as DbgAssignIntrinsic operands.
5123 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5124 for (auto *User : AsValue->users()) {
5126 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5127 MD, User);
5128 // All of the dbg.assign intrinsics should be in the same function as I.
5129 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5130 CheckDI(DAI->getFunction() == I.getFunction(),
5131 "dbg.assign not in same function as inst", DAI, &I);
5132 }
5133 }
5134 for (DbgVariableRecord *DVR :
5135 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5136 CheckDI(DVR->isDbgAssign(),
5137 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5138 CheckDI(DVR->getFunction() == I.getFunction(),
5139 "DVRAssign not in same function as inst", DVR, &I);
5140 }
5141}
5142
5143void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5145 "!mmra metadata attached to unexpected instruction kind", I, MD);
5146
5147 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5148 // list of tags such as !2 in the following example:
5149 // !0 = !{!"a", !"b"}
5150 // !1 = !{!"c", !"d"}
5151 // !2 = !{!0, !1}
5152 if (MMRAMetadata::isTagMD(MD))
5153 return;
5154
5155 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5156 for (const MDOperand &MDOp : MD->operands())
5157 Check(MMRAMetadata::isTagMD(MDOp.get()),
5158 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5159}
5160
5161void Verifier::visitCallStackMetadata(MDNode *MD) {
5162 // Call stack metadata should consist of a list of at least 1 constant int
5163 // (representing a hash of the location).
5164 Check(MD->getNumOperands() >= 1,
5165 "call stack metadata should have at least 1 operand", MD);
5166
5167 for (const auto &Op : MD->operands())
5169 "call stack metadata operand should be constant integer", Op);
5170}
5171
5172void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5173 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5174 Check(MD->getNumOperands() >= 1,
5175 "!memprof annotations should have at least 1 metadata operand "
5176 "(MemInfoBlock)",
5177 MD);
5178
5179 // Check each MIB
5180 for (auto &MIBOp : MD->operands()) {
5181 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5182 // The first operand of an MIB should be the call stack metadata.
5183 // There rest of the operands should be MDString tags, and there should be
5184 // at least one.
5185 Check(MIB->getNumOperands() >= 2,
5186 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5187
5188 // Check call stack metadata (first operand).
5189 Check(MIB->getOperand(0) != nullptr,
5190 "!memprof MemInfoBlock first operand should not be null", MIB);
5191 Check(isa<MDNode>(MIB->getOperand(0)),
5192 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5193 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5194 visitCallStackMetadata(StackMD);
5195
5196 // The next set of 1 or more operands should be MDString.
5197 unsigned I = 1;
5198 for (; I < MIB->getNumOperands(); ++I) {
5199 if (!isa<MDString>(MIB->getOperand(I))) {
5200 Check(I > 1,
5201 "!memprof MemInfoBlock second operand should be an MDString",
5202 MIB);
5203 break;
5204 }
5205 }
5206
5207 // Any remaining should be MDNode that are pairs of integers
5208 for (; I < MIB->getNumOperands(); ++I) {
5209 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5210 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5211 MIB);
5212 Check(OpNode->getNumOperands() == 2,
5213 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5214 "operands",
5215 MIB);
5216 // Check that all of Op's operands are ConstantInt.
5217 Check(llvm::all_of(OpNode->operands(),
5218 [](const MDOperand &Op) {
5219 return mdconst::hasa<ConstantInt>(Op);
5220 }),
5221 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5222 "ConstantInt operands",
5223 MIB);
5224 }
5225 }
5226}
5227
5228void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5229 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5230 // Verify the partial callstack annotated from memprof profiles. This callsite
5231 // is a part of a profiled allocation callstack.
5232 visitCallStackMetadata(MD);
5233}
5234
5235static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5236 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5237 return isa<ConstantInt>(VAL->getValue());
5238 return false;
5239}
5240
5241void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5242 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5243 &I);
5244 for (Metadata *Op : MD->operands()) {
5246 "The callee_type metadata must be a list of type metadata nodes", Op);
5247 auto *TypeMD = cast<MDNode>(Op);
5248 Check(TypeMD->getNumOperands() == 2,
5249 "Well-formed generalized type metadata must contain exactly two "
5250 "operands",
5251 Op);
5252 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5253 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5254 "The first operand of type metadata for functions must be zero", Op);
5255 Check(TypeMD->hasGeneralizedMDString(),
5256 "Only generalized type metadata can be part of the callee_type "
5257 "metadata list",
5258 Op);
5259 }
5260}
5261
5262void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5263 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5264 Check(Annotation->getNumOperands() >= 1,
5265 "annotation must have at least one operand");
5266 for (const MDOperand &Op : Annotation->operands()) {
5267 bool TupleOfStrings =
5268 isa<MDTuple>(Op.get()) &&
5269 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5270 return isa<MDString>(Annotation.get());
5271 });
5272 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5273 "operands must be a string or a tuple of strings");
5274 }
5275}
5276
5277void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5278 unsigned NumOps = MD->getNumOperands();
5279 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5280 MD);
5281 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5282 "first scope operand must be self-referential or string", MD);
5283 if (NumOps == 3)
5285 "third scope operand must be string (if used)", MD);
5286
5287 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5288 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5289
5290 unsigned NumDomainOps = Domain->getNumOperands();
5291 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5292 "domain must have one or two operands", Domain);
5293 Check(Domain->getOperand(0).get() == Domain ||
5294 isa<MDString>(Domain->getOperand(0)),
5295 "first domain operand must be self-referential or string", Domain);
5296 if (NumDomainOps == 2)
5297 Check(isa<MDString>(Domain->getOperand(1)),
5298 "second domain operand must be string (if used)", Domain);
5299}
5300
5301void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5302 for (const MDOperand &Op : MD->operands()) {
5303 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5304 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5305 visitAliasScopeMetadata(OpMD);
5306 }
5307}
5308
5309void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5310 auto IsValidAccessScope = [](const MDNode *MD) {
5311 return MD->getNumOperands() == 0 && MD->isDistinct();
5312 };
5313
5314 // It must be either an access scope itself...
5315 if (IsValidAccessScope(MD))
5316 return;
5317
5318 // ...or a list of access scopes.
5319 for (const MDOperand &Op : MD->operands()) {
5320 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5321 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5322 Check(IsValidAccessScope(OpMD),
5323 "Access scope list contains invalid access scope", MD);
5324 }
5325}
5326
5327/// verifyInstruction - Verify that an instruction is well formed.
5328///
5329void Verifier::visitInstruction(Instruction &I) {
5330 BasicBlock *BB = I.getParent();
5331 Check(BB, "Instruction not embedded in basic block!", &I);
5332
5333 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5334 for (User *U : I.users()) {
5335 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5336 "Only PHI nodes may reference their own value!", &I);
5337 }
5338 }
5339
5340 // Check that void typed values don't have names
5341 Check(!I.getType()->isVoidTy() || !I.hasName(),
5342 "Instruction has a name, but provides a void value!", &I);
5343
5344 // Check that the return value of the instruction is either void or a legal
5345 // value type.
5346 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5347 "Instruction returns a non-scalar type!", &I);
5348
5349 // Check that the instruction doesn't produce metadata. Calls are already
5350 // checked against the callee type.
5351 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5352 "Invalid use of metadata!", &I);
5353
5354 // Check that all uses of the instruction, if they are instructions
5355 // themselves, actually have parent basic blocks. If the use is not an
5356 // instruction, it is an error!
5357 for (Use &U : I.uses()) {
5358 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5359 Check(Used->getParent() != nullptr,
5360 "Instruction referencing"
5361 " instruction not embedded in a basic block!",
5362 &I, Used);
5363 else {
5364 CheckFailed("Use of instruction is not an instruction!", U);
5365 return;
5366 }
5367 }
5368
5369 // Get a pointer to the call base of the instruction if it is some form of
5370 // call.
5371 const CallBase *CBI = dyn_cast<CallBase>(&I);
5372
5373 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5374 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5375
5376 // Check to make sure that only first-class-values are operands to
5377 // instructions.
5378 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5379 Check(false, "Instruction operands must be first-class values!", &I);
5380 }
5381
5382 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5383 // This code checks whether the function is used as the operand of a
5384 // clang_arc_attachedcall operand bundle.
5385 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5386 int Idx) {
5387 return CBI && CBI->isOperandBundleOfType(
5389 };
5390
5391 // Check to make sure that the "address of" an intrinsic function is never
5392 // taken. Ignore cases where the address of the intrinsic function is used
5393 // as the argument of operand bundle "clang.arc.attachedcall" as those
5394 // cases are handled in verifyAttachedCallBundle.
5395 Check((!F->isIntrinsic() ||
5396 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5397 IsAttachedCallOperand(F, CBI, i)),
5398 "Cannot take the address of an intrinsic!", &I);
5399 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5400 F->getIntrinsicID() == Intrinsic::donothing ||
5401 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5402 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5403 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5404 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5405 F->getIntrinsicID() == Intrinsic::coro_resume ||
5406 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5407 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5408 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5409 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5410 F->getIntrinsicID() ==
5411 Intrinsic::experimental_patchpoint_void ||
5412 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5413 F->getIntrinsicID() == Intrinsic::fake_use ||
5414 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5415 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5416 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5417 IsAttachedCallOperand(F, CBI, i),
5418 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5419 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5420 "wasm.(re)throw",
5421 &I);
5422 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5423 &M, F, F->getParent());
5424 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5425 Check(OpBB->getParent() == BB->getParent(),
5426 "Referring to a basic block in another function!", &I);
5427 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5428 Check(OpArg->getParent() == BB->getParent(),
5429 "Referring to an argument in another function!", &I);
5430 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5431 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5432 &M, GV, GV->getParent());
5433 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5434 Check(OpInst->getFunction() == BB->getParent(),
5435 "Referring to an instruction in another function!", &I);
5436 verifyDominatesUse(I, i);
5437 } else if (isa<InlineAsm>(I.getOperand(i))) {
5438 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5439 "Cannot take the address of an inline asm!", &I);
5440 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5441 visitConstantExprsRecursively(CPA);
5442 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5443 if (CE->getType()->isPtrOrPtrVectorTy()) {
5444 // If we have a ConstantExpr pointer, we need to see if it came from an
5445 // illegal bitcast.
5446 visitConstantExprsRecursively(CE);
5447 }
5448 }
5449 }
5450
5451 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5452 Check(I.getType()->isFPOrFPVectorTy(),
5453 "fpmath requires a floating point result!", &I);
5454 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5455 if (ConstantFP *CFP0 =
5457 const APFloat &Accuracy = CFP0->getValueAPF();
5458 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5459 "fpmath accuracy must have float type", &I);
5460 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5461 "fpmath accuracy not a positive number!", &I);
5462 } else {
5463 Check(false, "invalid fpmath accuracy!", &I);
5464 }
5465 }
5466
5467 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5469 "Ranges are only for loads, calls and invokes!", &I);
5470 visitRangeMetadata(I, Range, I.getType());
5471 }
5472
5473 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5476 "noalias.addrspace are only for memory operations!", &I);
5477 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5478 }
5479
5480 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5482 "invariant.group metadata is only for loads and stores", &I);
5483 }
5484
5485 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5486 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5487 &I);
5489 "nonnull applies only to load instructions, use attributes"
5490 " for calls or invokes",
5491 &I);
5492 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5493 }
5494
5495 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5496 visitDereferenceableMetadata(I, MD);
5497
5498 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5499 visitDereferenceableMetadata(I, MD);
5500
5501 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5502 visitNofreeMetadata(I, MD);
5503
5504 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5505 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
5506
5507 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5508 visitAliasScopeListMetadata(MD);
5509 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5510 visitAliasScopeListMetadata(MD);
5511
5512 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5513 visitAccessGroupMetadata(MD);
5514
5515 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5516 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5517 &I);
5519 "align applies only to load instructions, "
5520 "use attributes for calls or invokes",
5521 &I);
5522 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5523 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5524 Check(CI && CI->getType()->isIntegerTy(64),
5525 "align metadata value must be an i64!", &I);
5526 uint64_t Align = CI->getZExtValue();
5527 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5528 &I);
5529 Check(Align <= Value::MaximumAlignment,
5530 "alignment is larger that implementation defined limit", &I);
5531 }
5532
5533 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5534 visitProfMetadata(I, MD);
5535
5536 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5537 visitMemProfMetadata(I, MD);
5538
5539 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5540 visitCallsiteMetadata(I, MD);
5541
5542 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5543 visitCalleeTypeMetadata(I, MD);
5544
5545 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5546 visitDIAssignIDMetadata(I, MD);
5547
5548 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5549 visitMMRAMetadata(I, MMRA);
5550
5551 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5552 visitAnnotationMetadata(Annotation);
5553
5554 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5555 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5556 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5557
5558 if (auto *DL = dyn_cast<DILocation>(N)) {
5559 if (DL->getAtomGroup()) {
5560 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5561 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5562 "Instructions enabled",
5563 DL, DL->getScope()->getSubprogram());
5564 }
5565 }
5566 }
5567
5569 I.getAllMetadata(MDs);
5570 for (auto Attachment : MDs) {
5571 unsigned Kind = Attachment.first;
5572 auto AllowLocs =
5573 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5574 ? AreDebugLocsAllowed::Yes
5575 : AreDebugLocsAllowed::No;
5576 visitMDNode(*Attachment.second, AllowLocs);
5577 }
5578
5579 InstsInThisBlock.insert(&I);
5580}
5581
5582/// Allow intrinsics to be verified in different ways.
5583void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5585 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5586 IF);
5587
5588 // Verify that the intrinsic prototype lines up with what the .td files
5589 // describe.
5590 FunctionType *IFTy = IF->getFunctionType();
5591 bool IsVarArg = IFTy->isVarArg();
5592
5596
5597 // Walk the descriptors to extract overloaded types.
5602 "Intrinsic has incorrect return type!", IF);
5604 "Intrinsic has incorrect argument type!", IF);
5605
5606 // Verify if the intrinsic call matches the vararg property.
5607 if (IsVarArg)
5609 "Intrinsic was not defined with variable arguments!", IF);
5610 else
5612 "Callsite was not defined with variable arguments!", IF);
5613
5614 // All descriptors should be absorbed by now.
5615 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5616
5617 // Now that we have the intrinsic ID and the actual argument types (and we
5618 // know they are legal for the intrinsic!) get the intrinsic name through the
5619 // usual means. This allows us to verify the mangling of argument types into
5620 // the name.
5621 const std::string ExpectedName =
5622 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5623 Check(ExpectedName == IF->getName(),
5624 "Intrinsic name not mangled correctly for type arguments! "
5625 "Should be: " +
5626 ExpectedName,
5627 IF);
5628
5629 // If the intrinsic takes MDNode arguments, verify that they are either global
5630 // or are local to *this* function.
5631 for (Value *V : Call.args()) {
5632 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5633 visitMetadataAsValue(*MD, Call.getCaller());
5634 if (auto *Const = dyn_cast<Constant>(V))
5635 Check(!Const->getType()->isX86_AMXTy(),
5636 "const x86_amx is not allowed in argument!");
5637 }
5638
5639 switch (ID) {
5640 default:
5641 break;
5642 case Intrinsic::assume: {
5643 for (auto &Elem : Call.bundle_op_infos()) {
5644 unsigned ArgCount = Elem.End - Elem.Begin;
5645 // Separate storage assumptions are special insofar as they're the only
5646 // operand bundles allowed on assumes that aren't parameter attributes.
5647 if (Elem.Tag->getKey() == "separate_storage") {
5648 Check(ArgCount == 2,
5649 "separate_storage assumptions should have 2 arguments", Call);
5650 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5651 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5652 "arguments to separate_storage assumptions should be pointers",
5653 Call);
5654 continue;
5655 }
5656 Check(Elem.Tag->getKey() == "ignore" ||
5657 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5658 "tags must be valid attribute names", Call);
5659 Attribute::AttrKind Kind =
5660 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5661 if (Kind == Attribute::Alignment) {
5662 Check(ArgCount <= 3 && ArgCount >= 2,
5663 "alignment assumptions should have 2 or 3 arguments", Call);
5664 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5665 "first argument should be a pointer", Call);
5666 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5667 "second argument should be an integer", Call);
5668 if (ArgCount == 3)
5669 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5670 "third argument should be an integer if present", Call);
5671 continue;
5672 }
5673 if (Kind == Attribute::Dereferenceable) {
5674 Check(ArgCount == 2,
5675 "dereferenceable assumptions should have 2 arguments", Call);
5676 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5677 "first argument should be a pointer", Call);
5678 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5679 "second argument should be an integer", Call);
5680 continue;
5681 }
5682 Check(ArgCount <= 2, "too many arguments", Call);
5683 if (Kind == Attribute::None)
5684 break;
5685 if (Attribute::isIntAttrKind(Kind)) {
5686 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5687 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5688 "the second argument should be a constant integral value", Call);
5689 } else if (Attribute::canUseAsParamAttr(Kind)) {
5690 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5691 } else if (Attribute::canUseAsFnAttr(Kind)) {
5692 Check((ArgCount) == 0, "this attribute has no argument", Call);
5693 }
5694 }
5695 break;
5696 }
5697 case Intrinsic::ucmp:
5698 case Intrinsic::scmp: {
5699 Type *SrcTy = Call.getOperand(0)->getType();
5700 Type *DestTy = Call.getType();
5701
5702 Check(DestTy->getScalarSizeInBits() >= 2,
5703 "result type must be at least 2 bits wide", Call);
5704
5705 bool IsDestTypeVector = DestTy->isVectorTy();
5706 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5707 "ucmp/scmp argument and result types must both be either vector or "
5708 "scalar types",
5709 Call);
5710 if (IsDestTypeVector) {
5711 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5712 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5713 Check(SrcVecLen == DestVecLen,
5714 "return type and arguments must have the same number of "
5715 "elements",
5716 Call);
5717 }
5718 break;
5719 }
5720 case Intrinsic::coro_id: {
5721 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5722 if (isa<ConstantPointerNull>(InfoArg))
5723 break;
5724 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5725 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5726 "info argument of llvm.coro.id must refer to an initialized "
5727 "constant");
5728 Constant *Init = GV->getInitializer();
5730 "info argument of llvm.coro.id must refer to either a struct or "
5731 "an array");
5732 break;
5733 }
5734 case Intrinsic::is_fpclass: {
5735 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5736 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5737 "unsupported bits for llvm.is.fpclass test mask");
5738 break;
5739 }
5740 case Intrinsic::fptrunc_round: {
5741 // Check the rounding mode
5742 Metadata *MD = nullptr;
5744 if (MAV)
5745 MD = MAV->getMetadata();
5746
5747 Check(MD != nullptr, "missing rounding mode argument", Call);
5748
5749 Check(isa<MDString>(MD),
5750 ("invalid value for llvm.fptrunc.round metadata operand"
5751 " (the operand should be a string)"),
5752 MD);
5753
5754 std::optional<RoundingMode> RoundMode =
5755 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5756 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5757 "unsupported rounding mode argument", Call);
5758 break;
5759 }
5760#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5761#include "llvm/IR/VPIntrinsics.def"
5762#undef BEGIN_REGISTER_VP_INTRINSIC
5763 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5764 break;
5765#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5766 case Intrinsic::INTRINSIC:
5767#include "llvm/IR/ConstrainedOps.def"
5768#undef INSTRUCTION
5769 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5770 break;
5771 case Intrinsic::dbg_declare: // llvm.dbg.declare
5772 case Intrinsic::dbg_value: // llvm.dbg.value
5773 case Intrinsic::dbg_assign: // llvm.dbg.assign
5774 case Intrinsic::dbg_label: // llvm.dbg.label
5775 // We no longer interpret debug intrinsics (the old variable-location
5776 // design). They're meaningless as far as LLVM is concerned we could make
5777 // it an error for them to appear, but it's possible we'll have users
5778 // converting back to intrinsics for the forseeable future (such as DXIL),
5779 // so tolerate their existance.
5780 break;
5781 case Intrinsic::memcpy:
5782 case Intrinsic::memcpy_inline:
5783 case Intrinsic::memmove:
5784 case Intrinsic::memset:
5785 case Intrinsic::memset_inline:
5786 break;
5787 case Intrinsic::experimental_memset_pattern: {
5788 const auto Memset = cast<MemSetPatternInst>(&Call);
5789 Check(Memset->getValue()->getType()->isSized(),
5790 "unsized types cannot be used as memset patterns", Call);
5791 break;
5792 }
5793 case Intrinsic::memcpy_element_unordered_atomic:
5794 case Intrinsic::memmove_element_unordered_atomic:
5795 case Intrinsic::memset_element_unordered_atomic: {
5796 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
5797
5798 ConstantInt *ElementSizeCI =
5799 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5800 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5801 Check(ElementSizeVal.isPowerOf2(),
5802 "element size of the element-wise atomic memory intrinsic "
5803 "must be a power of 2",
5804 Call);
5805
5806 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5807 return Alignment && ElementSizeVal.ule(Alignment->value());
5808 };
5809 Check(IsValidAlignment(AMI->getDestAlign()),
5810 "incorrect alignment of the destination argument", Call);
5811 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
5812 Check(IsValidAlignment(AMT->getSourceAlign()),
5813 "incorrect alignment of the source argument", Call);
5814 }
5815 break;
5816 }
5817 case Intrinsic::call_preallocated_setup: {
5818 auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5819 Check(NumArgs != nullptr,
5820 "llvm.call.preallocated.setup argument must be a constant");
5821 bool FoundCall = false;
5822 for (User *U : Call.users()) {
5823 auto *UseCall = dyn_cast<CallBase>(U);
5824 Check(UseCall != nullptr,
5825 "Uses of llvm.call.preallocated.setup must be calls");
5826 Intrinsic::ID IID = UseCall->getIntrinsicID();
5827 if (IID == Intrinsic::call_preallocated_arg) {
5828 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5829 Check(AllocArgIndex != nullptr,
5830 "llvm.call.preallocated.alloc arg index must be a constant");
5831 auto AllocArgIndexInt = AllocArgIndex->getValue();
5832 Check(AllocArgIndexInt.sge(0) &&
5833 AllocArgIndexInt.slt(NumArgs->getValue()),
5834 "llvm.call.preallocated.alloc arg index must be between 0 and "
5835 "corresponding "
5836 "llvm.call.preallocated.setup's argument count");
5837 } else if (IID == Intrinsic::call_preallocated_teardown) {
5838 // nothing to do
5839 } else {
5840 Check(!FoundCall, "Can have at most one call corresponding to a "
5841 "llvm.call.preallocated.setup");
5842 FoundCall = true;
5843 size_t NumPreallocatedArgs = 0;
5844 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5845 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5846 ++NumPreallocatedArgs;
5847 }
5848 }
5849 Check(NumPreallocatedArgs != 0,
5850 "cannot use preallocated intrinsics on a call without "
5851 "preallocated arguments");
5852 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5853 "llvm.call.preallocated.setup arg size must be equal to number "
5854 "of preallocated arguments "
5855 "at call site",
5856 Call, *UseCall);
5857 // getOperandBundle() cannot be called if more than one of the operand
5858 // bundle exists. There is already a check elsewhere for this, so skip
5859 // here if we see more than one.
5860 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5861 1) {
5862 return;
5863 }
5864 auto PreallocatedBundle =
5865 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5866 Check(PreallocatedBundle,
5867 "Use of llvm.call.preallocated.setup outside intrinsics "
5868 "must be in \"preallocated\" operand bundle");
5869 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5870 "preallocated bundle must have token from corresponding "
5871 "llvm.call.preallocated.setup");
5872 }
5873 }
5874 break;
5875 }
5876 case Intrinsic::call_preallocated_arg: {
5877 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5878 Check(Token &&
5879 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5880 "llvm.call.preallocated.arg token argument must be a "
5881 "llvm.call.preallocated.setup");
5882 Check(Call.hasFnAttr(Attribute::Preallocated),
5883 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5884 "call site attribute");
5885 break;
5886 }
5887 case Intrinsic::call_preallocated_teardown: {
5888 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5889 Check(Token &&
5890 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5891 "llvm.call.preallocated.teardown token argument must be a "
5892 "llvm.call.preallocated.setup");
5893 break;
5894 }
5895 case Intrinsic::gcroot:
5896 case Intrinsic::gcwrite:
5897 case Intrinsic::gcread:
5898 if (ID == Intrinsic::gcroot) {
5899 AllocaInst *AI =
5901 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5903 "llvm.gcroot parameter #2 must be a constant.", Call);
5904 if (!AI->getAllocatedType()->isPointerTy()) {
5906 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5907 "or argument #2 must be a non-null constant.",
5908 Call);
5909 }
5910 }
5911
5912 Check(Call.getParent()->getParent()->hasGC(),
5913 "Enclosing function does not use GC.", Call);
5914 break;
5915 case Intrinsic::init_trampoline:
5917 "llvm.init_trampoline parameter #2 must resolve to a function.",
5918 Call);
5919 break;
5920 case Intrinsic::prefetch:
5921 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5922 "rw argument to llvm.prefetch must be 0-1", Call);
5923 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5924 "locality argument to llvm.prefetch must be 0-3", Call);
5925 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
5926 "cache type argument to llvm.prefetch must be 0-1", Call);
5927 break;
5928 case Intrinsic::stackprotector:
5930 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5931 break;
5932 case Intrinsic::localescape: {
5933 BasicBlock *BB = Call.getParent();
5934 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
5935 Call);
5936 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5937 Call);
5938 for (Value *Arg : Call.args()) {
5939 if (isa<ConstantPointerNull>(Arg))
5940 continue; // Null values are allowed as placeholders.
5941 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5942 Check(AI && AI->isStaticAlloca(),
5943 "llvm.localescape only accepts static allocas", Call);
5944 }
5945 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
5946 SawFrameEscape = true;
5947 break;
5948 }
5949 case Intrinsic::localrecover: {
5951 Function *Fn = dyn_cast<Function>(FnArg);
5952 Check(Fn && !Fn->isDeclaration(),
5953 "llvm.localrecover first "
5954 "argument must be function defined in this module",
5955 Call);
5956 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
5957 auto &Entry = FrameEscapeInfo[Fn];
5958 Entry.second = unsigned(
5959 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
5960 break;
5961 }
5962
5963 case Intrinsic::experimental_gc_statepoint:
5964 if (auto *CI = dyn_cast<CallInst>(&Call))
5965 Check(!CI->isInlineAsm(),
5966 "gc.statepoint support for inline assembly unimplemented", CI);
5967 Check(Call.getParent()->getParent()->hasGC(),
5968 "Enclosing function does not use GC.", Call);
5969
5970 verifyStatepoint(Call);
5971 break;
5972 case Intrinsic::experimental_gc_result: {
5973 Check(Call.getParent()->getParent()->hasGC(),
5974 "Enclosing function does not use GC.", Call);
5975
5976 auto *Statepoint = Call.getArgOperand(0);
5977 if (isa<UndefValue>(Statepoint))
5978 break;
5979
5980 // Are we tied to a statepoint properly?
5981 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
5982 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
5983 Intrinsic::experimental_gc_statepoint,
5984 "gc.result operand #1 must be from a statepoint", Call,
5985 Call.getArgOperand(0));
5986
5987 // Check that result type matches wrapped callee.
5988 auto *TargetFuncType =
5989 cast<FunctionType>(StatepointCall->getParamElementType(2));
5990 Check(Call.getType() == TargetFuncType->getReturnType(),
5991 "gc.result result type does not match wrapped callee", Call);
5992 break;
5993 }
5994 case Intrinsic::experimental_gc_relocate: {
5995 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
5996
5998 "gc.relocate must return a pointer or a vector of pointers", Call);
5999
6000 // Check that this relocate is correctly tied to the statepoint
6001
6002 // This is case for relocate on the unwinding path of an invoke statepoint
6003 if (LandingPadInst *LandingPad =
6005
6006 const BasicBlock *InvokeBB =
6007 LandingPad->getParent()->getUniquePredecessor();
6008
6009 // Landingpad relocates should have only one predecessor with invoke
6010 // statepoint terminator
6011 Check(InvokeBB, "safepoints should have unique landingpads",
6012 LandingPad->getParent());
6013 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6014 InvokeBB);
6016 "gc relocate should be linked to a statepoint", InvokeBB);
6017 } else {
6018 // In all other cases relocate should be tied to the statepoint directly.
6019 // This covers relocates on a normal return path of invoke statepoint and
6020 // relocates of a call statepoint.
6021 auto *Token = Call.getArgOperand(0);
6023 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6024 }
6025
6026 // Verify rest of the relocate arguments.
6027 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6028
6029 // Both the base and derived must be piped through the safepoint.
6032 "gc.relocate operand #2 must be integer offset", Call);
6033
6034 Value *Derived = Call.getArgOperand(2);
6035 Check(isa<ConstantInt>(Derived),
6036 "gc.relocate operand #3 must be integer offset", Call);
6037
6038 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6039 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6040
6041 // Check the bounds
6042 if (isa<UndefValue>(StatepointCall))
6043 break;
6044 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6045 .getOperandBundle(LLVMContext::OB_gc_live)) {
6046 Check(BaseIndex < Opt->Inputs.size(),
6047 "gc.relocate: statepoint base index out of bounds", Call);
6048 Check(DerivedIndex < Opt->Inputs.size(),
6049 "gc.relocate: statepoint derived index out of bounds", Call);
6050 }
6051
6052 // Relocated value must be either a pointer type or vector-of-pointer type,
6053 // but gc_relocate does not need to return the same pointer type as the
6054 // relocated pointer. It can be casted to the correct type later if it's
6055 // desired. However, they must have the same address space and 'vectorness'
6056 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6057 auto *ResultType = Call.getType();
6058 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6059 auto *BaseType = Relocate.getBasePtr()->getType();
6060
6061 Check(BaseType->isPtrOrPtrVectorTy(),
6062 "gc.relocate: relocated value must be a pointer", Call);
6063 Check(DerivedType->isPtrOrPtrVectorTy(),
6064 "gc.relocate: relocated value must be a pointer", Call);
6065
6066 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6067 "gc.relocate: vector relocates to vector and pointer to pointer",
6068 Call);
6069 Check(
6070 ResultType->getPointerAddressSpace() ==
6071 DerivedType->getPointerAddressSpace(),
6072 "gc.relocate: relocating a pointer shouldn't change its address space",
6073 Call);
6074
6075 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6076 Check(GC, "gc.relocate: calling function must have GCStrategy",
6077 Call.getFunction());
6078 if (GC) {
6079 auto isGCPtr = [&GC](Type *PTy) {
6080 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6081 };
6082 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6083 Check(isGCPtr(BaseType),
6084 "gc.relocate: relocated value must be a gc pointer", Call);
6085 Check(isGCPtr(DerivedType),
6086 "gc.relocate: relocated value must be a gc pointer", Call);
6087 }
6088 break;
6089 }
6090 case Intrinsic::experimental_patchpoint: {
6091 if (Call.getCallingConv() == CallingConv::AnyReg) {
6093 "patchpoint: invalid return type used with anyregcc", Call);
6094 }
6095 break;
6096 }
6097 case Intrinsic::eh_exceptioncode:
6098 case Intrinsic::eh_exceptionpointer: {
6100 "eh.exceptionpointer argument must be a catchpad", Call);
6101 break;
6102 }
6103 case Intrinsic::get_active_lane_mask: {
6105 "get_active_lane_mask: must return a "
6106 "vector",
6107 Call);
6108 auto *ElemTy = Call.getType()->getScalarType();
6109 Check(ElemTy->isIntegerTy(1),
6110 "get_active_lane_mask: element type is not "
6111 "i1",
6112 Call);
6113 break;
6114 }
6115 case Intrinsic::experimental_get_vector_length: {
6116 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6117 Check(!VF->isNegative() && !VF->isZero(),
6118 "get_vector_length: VF must be positive", Call);
6119 break;
6120 }
6121 case Intrinsic::masked_load: {
6122 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6123 Call);
6124
6125 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
6127 Value *PassThru = Call.getArgOperand(3);
6128 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6129 Call);
6130 Check(Alignment->getValue().isPowerOf2(),
6131 "masked_load: alignment must be a power of 2", Call);
6132 Check(PassThru->getType() == Call.getType(),
6133 "masked_load: pass through and return type must match", Call);
6134 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6135 cast<VectorType>(Call.getType())->getElementCount(),
6136 "masked_load: vector mask must be same length as return", Call);
6137 break;
6138 }
6139 case Intrinsic::masked_store: {
6140 Value *Val = Call.getArgOperand(0);
6141 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
6143 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6144 Call);
6145 Check(Alignment->getValue().isPowerOf2(),
6146 "masked_store: alignment must be a power of 2", Call);
6147 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6148 cast<VectorType>(Val->getType())->getElementCount(),
6149 "masked_store: vector mask must be same length as value", Call);
6150 break;
6151 }
6152
6153 case Intrinsic::masked_gather: {
6154 const APInt &Alignment =
6156 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6157 "masked_gather: alignment must be 0 or a power of 2", Call);
6158 break;
6159 }
6160 case Intrinsic::masked_scatter: {
6161 const APInt &Alignment =
6162 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
6163 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6164 "masked_scatter: alignment must be 0 or a power of 2", Call);
6165 break;
6166 }
6167
6168 case Intrinsic::experimental_guard: {
6169 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6171 "experimental_guard must have exactly one "
6172 "\"deopt\" operand bundle");
6173 break;
6174 }
6175
6176 case Intrinsic::experimental_deoptimize: {
6177 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6178 Call);
6180 "experimental_deoptimize must have exactly one "
6181 "\"deopt\" operand bundle");
6183 "experimental_deoptimize return type must match caller return type");
6184
6185 if (isa<CallInst>(Call)) {
6187 Check(RI,
6188 "calls to experimental_deoptimize must be followed by a return");
6189
6190 if (!Call.getType()->isVoidTy() && RI)
6191 Check(RI->getReturnValue() == &Call,
6192 "calls to experimental_deoptimize must be followed by a return "
6193 "of the value computed by experimental_deoptimize");
6194 }
6195
6196 break;
6197 }
6198 case Intrinsic::vastart: {
6200 "va_start called in a non-varargs function");
6201 break;
6202 }
6203 case Intrinsic::get_dynamic_area_offset: {
6204 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6205 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6206 IntTy->getBitWidth(),
6207 "get_dynamic_area_offset result type must be scalar integer matching "
6208 "alloca address space width",
6209 Call);
6210 break;
6211 }
6212 case Intrinsic::vector_reduce_and:
6213 case Intrinsic::vector_reduce_or:
6214 case Intrinsic::vector_reduce_xor:
6215 case Intrinsic::vector_reduce_add:
6216 case Intrinsic::vector_reduce_mul:
6217 case Intrinsic::vector_reduce_smax:
6218 case Intrinsic::vector_reduce_smin:
6219 case Intrinsic::vector_reduce_umax:
6220 case Intrinsic::vector_reduce_umin: {
6221 Type *ArgTy = Call.getArgOperand(0)->getType();
6222 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6223 "Intrinsic has incorrect argument type!");
6224 break;
6225 }
6226 case Intrinsic::vector_reduce_fmax:
6227 case Intrinsic::vector_reduce_fmin: {
6228 Type *ArgTy = Call.getArgOperand(0)->getType();
6229 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6230 "Intrinsic has incorrect argument type!");
6231 break;
6232 }
6233 case Intrinsic::vector_reduce_fadd:
6234 case Intrinsic::vector_reduce_fmul: {
6235 // Unlike the other reductions, the first argument is a start value. The
6236 // second argument is the vector to be reduced.
6237 Type *ArgTy = Call.getArgOperand(1)->getType();
6238 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6239 "Intrinsic has incorrect argument type!");
6240 break;
6241 }
6242 case Intrinsic::smul_fix:
6243 case Intrinsic::smul_fix_sat:
6244 case Intrinsic::umul_fix:
6245 case Intrinsic::umul_fix_sat:
6246 case Intrinsic::sdiv_fix:
6247 case Intrinsic::sdiv_fix_sat:
6248 case Intrinsic::udiv_fix:
6249 case Intrinsic::udiv_fix_sat: {
6250 Value *Op1 = Call.getArgOperand(0);
6251 Value *Op2 = Call.getArgOperand(1);
6253 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6254 "vector of ints");
6256 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6257 "vector of ints");
6258
6259 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6260 Check(Op3->getType()->isIntegerTy(),
6261 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6262 Check(Op3->getBitWidth() <= 32,
6263 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6264
6265 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6266 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6267 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6268 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6269 "the operands");
6270 } else {
6271 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6272 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6273 "to the width of the operands");
6274 }
6275 break;
6276 }
6277 case Intrinsic::lrint:
6278 case Intrinsic::llrint:
6279 case Intrinsic::lround:
6280 case Intrinsic::llround: {
6281 Type *ValTy = Call.getArgOperand(0)->getType();
6282 Type *ResultTy = Call.getType();
6283 auto *VTy = dyn_cast<VectorType>(ValTy);
6284 auto *RTy = dyn_cast<VectorType>(ResultTy);
6285 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6286 ExpectedName + ": argument must be floating-point or vector "
6287 "of floating-points, and result must be integer or "
6288 "vector of integers",
6289 &Call);
6290 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6291 ExpectedName + ": argument and result disagree on vector use", &Call);
6292 if (VTy) {
6293 Check(VTy->getElementCount() == RTy->getElementCount(),
6294 ExpectedName + ": argument must be same length as result", &Call);
6295 }
6296 break;
6297 }
6298 case Intrinsic::bswap: {
6299 Type *Ty = Call.getType();
6300 unsigned Size = Ty->getScalarSizeInBits();
6301 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6302 break;
6303 }
6304 case Intrinsic::invariant_start: {
6305 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6306 Check(InvariantSize &&
6307 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6308 "invariant_start parameter must be -1, 0 or a positive number",
6309 &Call);
6310 break;
6311 }
6312 case Intrinsic::matrix_multiply:
6313 case Intrinsic::matrix_transpose:
6314 case Intrinsic::matrix_column_major_load:
6315 case Intrinsic::matrix_column_major_store: {
6317 ConstantInt *Stride = nullptr;
6318 ConstantInt *NumRows;
6319 ConstantInt *NumColumns;
6320 VectorType *ResultTy;
6321 Type *Op0ElemTy = nullptr;
6322 Type *Op1ElemTy = nullptr;
6323 switch (ID) {
6324 case Intrinsic::matrix_multiply: {
6325 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6326 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6327 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6329 ->getNumElements() ==
6330 NumRows->getZExtValue() * N->getZExtValue(),
6331 "First argument of a matrix operation does not match specified "
6332 "shape!");
6334 ->getNumElements() ==
6335 N->getZExtValue() * NumColumns->getZExtValue(),
6336 "Second argument of a matrix operation does not match specified "
6337 "shape!");
6338
6339 ResultTy = cast<VectorType>(Call.getType());
6340 Op0ElemTy =
6341 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6342 Op1ElemTy =
6343 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6344 break;
6345 }
6346 case Intrinsic::matrix_transpose:
6347 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6348 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6349 ResultTy = cast<VectorType>(Call.getType());
6350 Op0ElemTy =
6351 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6352 break;
6353 case Intrinsic::matrix_column_major_load: {
6355 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6356 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6357 ResultTy = cast<VectorType>(Call.getType());
6358 break;
6359 }
6360 case Intrinsic::matrix_column_major_store: {
6362 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6363 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6364 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6365 Op0ElemTy =
6366 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6367 break;
6368 }
6369 default:
6370 llvm_unreachable("unexpected intrinsic");
6371 }
6372
6373 Check(ResultTy->getElementType()->isIntegerTy() ||
6374 ResultTy->getElementType()->isFloatingPointTy(),
6375 "Result type must be an integer or floating-point type!", IF);
6376
6377 if (Op0ElemTy)
6378 Check(ResultTy->getElementType() == Op0ElemTy,
6379 "Vector element type mismatch of the result and first operand "
6380 "vector!",
6381 IF);
6382
6383 if (Op1ElemTy)
6384 Check(ResultTy->getElementType() == Op1ElemTy,
6385 "Vector element type mismatch of the result and second operand "
6386 "vector!",
6387 IF);
6388
6390 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6391 "Result of a matrix operation does not fit in the returned vector!");
6392
6393 if (Stride)
6394 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6395 "Stride must be greater or equal than the number of rows!", IF);
6396
6397 break;
6398 }
6399 case Intrinsic::vector_splice: {
6401 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6402 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6403 if (Call.getParent() && Call.getParent()->getParent()) {
6404 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6405 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6406 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6407 }
6408 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6409 (Idx >= 0 && Idx < KnownMinNumElements),
6410 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6411 "known minimum number of elements in the vector. For scalable "
6412 "vectors the minimum number of elements is determined from "
6413 "vscale_range.",
6414 &Call);
6415 break;
6416 }
6417 case Intrinsic::stepvector: {
6419 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6420 VecTy->getScalarSizeInBits() >= 8,
6421 "stepvector only supported for vectors of integers "
6422 "with a bitwidth of at least 8.",
6423 &Call);
6424 break;
6425 }
6426 case Intrinsic::experimental_vector_match: {
6427 Value *Op1 = Call.getArgOperand(0);
6428 Value *Op2 = Call.getArgOperand(1);
6430
6431 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6432 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6433 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6434
6435 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6437 "Second operand must be a fixed length vector.", &Call);
6438 Check(Op1Ty->getElementType()->isIntegerTy(),
6439 "First operand must be a vector of integers.", &Call);
6440 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6441 "First two operands must have the same element type.", &Call);
6442 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6443 "First operand and mask must have the same number of elements.",
6444 &Call);
6445 Check(MaskTy->getElementType()->isIntegerTy(1),
6446 "Mask must be a vector of i1's.", &Call);
6447 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6448 &Call);
6449 break;
6450 }
6451 case Intrinsic::vector_insert: {
6452 Value *Vec = Call.getArgOperand(0);
6453 Value *SubVec = Call.getArgOperand(1);
6454 Value *Idx = Call.getArgOperand(2);
6455 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6456
6457 VectorType *VecTy = cast<VectorType>(Vec->getType());
6458 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6459
6460 ElementCount VecEC = VecTy->getElementCount();
6461 ElementCount SubVecEC = SubVecTy->getElementCount();
6462 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6463 "vector_insert parameters must have the same element "
6464 "type.",
6465 &Call);
6466 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6467 "vector_insert index must be a constant multiple of "
6468 "the subvector's known minimum vector length.");
6469
6470 // If this insertion is not the 'mixed' case where a fixed vector is
6471 // inserted into a scalable vector, ensure that the insertion of the
6472 // subvector does not overrun the parent vector.
6473 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6474 Check(IdxN < VecEC.getKnownMinValue() &&
6475 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6476 "subvector operand of vector_insert would overrun the "
6477 "vector being inserted into.");
6478 }
6479 break;
6480 }
6481 case Intrinsic::vector_extract: {
6482 Value *Vec = Call.getArgOperand(0);
6483 Value *Idx = Call.getArgOperand(1);
6484 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6485
6486 VectorType *ResultTy = cast<VectorType>(Call.getType());
6487 VectorType *VecTy = cast<VectorType>(Vec->getType());
6488
6489 ElementCount VecEC = VecTy->getElementCount();
6490 ElementCount ResultEC = ResultTy->getElementCount();
6491
6492 Check(ResultTy->getElementType() == VecTy->getElementType(),
6493 "vector_extract result must have the same element "
6494 "type as the input vector.",
6495 &Call);
6496 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6497 "vector_extract index must be a constant multiple of "
6498 "the result type's known minimum vector length.");
6499
6500 // If this extraction is not the 'mixed' case where a fixed vector is
6501 // extracted from a scalable vector, ensure that the extraction does not
6502 // overrun the parent vector.
6503 if (VecEC.isScalable() == ResultEC.isScalable()) {
6504 Check(IdxN < VecEC.getKnownMinValue() &&
6505 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6506 "vector_extract would overrun.");
6507 }
6508 break;
6509 }
6510 case Intrinsic::experimental_vector_partial_reduce_add: {
6513
6514 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6515 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6516
6517 Check((VecWidth % AccWidth) == 0,
6518 "Invalid vector widths for partial "
6519 "reduction. The width of the input vector "
6520 "must be a positive integer multiple of "
6521 "the width of the accumulator vector.");
6522 break;
6523 }
6524 case Intrinsic::experimental_noalias_scope_decl: {
6525 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6526 break;
6527 }
6528 case Intrinsic::preserve_array_access_index:
6529 case Intrinsic::preserve_struct_access_index:
6530 case Intrinsic::aarch64_ldaxr:
6531 case Intrinsic::aarch64_ldxr:
6532 case Intrinsic::arm_ldaex:
6533 case Intrinsic::arm_ldrex: {
6534 Type *ElemTy = Call.getParamElementType(0);
6535 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6536 &Call);
6537 break;
6538 }
6539 case Intrinsic::aarch64_stlxr:
6540 case Intrinsic::aarch64_stxr:
6541 case Intrinsic::arm_stlex:
6542 case Intrinsic::arm_strex: {
6543 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6544 Check(ElemTy,
6545 "Intrinsic requires elementtype attribute on second argument.",
6546 &Call);
6547 break;
6548 }
6549 case Intrinsic::aarch64_prefetch: {
6550 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6551 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6552 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6553 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6554 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6555 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6556 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6557 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6558 break;
6559 }
6560 case Intrinsic::callbr_landingpad: {
6561 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6562 Check(CBR, "intrinstic requires callbr operand", &Call);
6563 if (!CBR)
6564 break;
6565
6566 const BasicBlock *LandingPadBB = Call.getParent();
6567 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6568 if (!PredBB) {
6569 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6570 break;
6571 }
6572 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6573 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6574 &Call);
6575 break;
6576 }
6577 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6578 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6579 "block in indirect destination list",
6580 &Call);
6581 const Instruction &First = *LandingPadBB->begin();
6582 Check(&First == &Call, "No other instructions may proceed intrinsic",
6583 &Call);
6584 break;
6585 }
6586 case Intrinsic::amdgcn_cs_chain: {
6587 auto CallerCC = Call.getCaller()->getCallingConv();
6588 switch (CallerCC) {
6589 case CallingConv::AMDGPU_CS:
6590 case CallingConv::AMDGPU_CS_Chain:
6591 case CallingConv::AMDGPU_CS_ChainPreserve:
6592 break;
6593 default:
6594 CheckFailed("Intrinsic can only be used from functions with the "
6595 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6596 "calling conventions",
6597 &Call);
6598 break;
6599 }
6600
6601 Check(Call.paramHasAttr(2, Attribute::InReg),
6602 "SGPR arguments must have the `inreg` attribute", &Call);
6603 Check(!Call.paramHasAttr(3, Attribute::InReg),
6604 "VGPR arguments must not have the `inreg` attribute", &Call);
6605
6606 auto *Next = Call.getNextNode();
6607 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6608 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6609 Intrinsic::amdgcn_unreachable;
6610 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6611 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6612 break;
6613 }
6614 case Intrinsic::amdgcn_init_exec_from_input: {
6615 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6616 Check(Arg && Arg->hasInRegAttr(),
6617 "only inreg arguments to the parent function are valid as inputs to "
6618 "this intrinsic",
6619 &Call);
6620 break;
6621 }
6622 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6623 auto CallerCC = Call.getCaller()->getCallingConv();
6624 switch (CallerCC) {
6625 case CallingConv::AMDGPU_CS_Chain:
6626 case CallingConv::AMDGPU_CS_ChainPreserve:
6627 break;
6628 default:
6629 CheckFailed("Intrinsic can only be used from functions with the "
6630 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6631 "calling conventions",
6632 &Call);
6633 break;
6634 }
6635
6636 unsigned InactiveIdx = 1;
6637 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6638 "Value for inactive lanes must not have the `inreg` attribute",
6639 &Call);
6640 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6641 "Value for inactive lanes must be a function argument", &Call);
6642 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6643 "Value for inactive lanes must be a VGPR function argument", &Call);
6644 break;
6645 }
6646 case Intrinsic::amdgcn_call_whole_wave: {
6648 Check(F, "Indirect whole wave calls are not allowed", &Call);
6649
6650 CallingConv::ID CC = F->getCallingConv();
6651 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6652 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6653 &Call);
6654
6655 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6656
6657 Check(Call.arg_size() == F->arg_size(),
6658 "Call argument count must match callee argument count", &Call);
6659
6660 // The first argument of the call is the callee, and the first argument of
6661 // the callee is the active mask. The rest of the arguments must match.
6662 Check(F->arg_begin()->getType()->isIntegerTy(1),
6663 "Callee must have i1 as its first argument", &Call);
6664 for (auto [CallArg, FuncArg] :
6665 drop_begin(zip_equal(Call.args(), F->args()))) {
6666 Check(CallArg->getType() == FuncArg.getType(),
6667 "Argument types must match", &Call);
6668
6669 // Check that inreg attributes match between call site and function
6670 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6671 FuncArg.hasInRegAttr(),
6672 "Argument inreg attributes must match", &Call);
6673 }
6674 break;
6675 }
6676 case Intrinsic::amdgcn_s_prefetch_data: {
6677 Check(
6680 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6681 break;
6682 }
6683 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6684 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6685 Value *Src0 = Call.getArgOperand(0);
6686 Value *Src1 = Call.getArgOperand(1);
6687
6688 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6689 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6690 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6691 Call.getArgOperand(3));
6692 Check(BLGP <= 4, "invalid value for blgp format", Call,
6693 Call.getArgOperand(4));
6694
6695 // AMDGPU::MFMAScaleFormats values
6696 auto getFormatNumRegs = [](unsigned FormatVal) {
6697 switch (FormatVal) {
6698 case 0:
6699 case 1:
6700 return 8u;
6701 case 2:
6702 case 3:
6703 return 6u;
6704 case 4:
6705 return 4u;
6706 default:
6707 llvm_unreachable("invalid format value");
6708 }
6709 };
6710
6711 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6712 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6713 return false;
6714 unsigned NumElts = Ty->getNumElements();
6715 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6716 };
6717
6718 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6719 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6720 Check(isValidSrcASrcBVector(Src0Ty),
6721 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6722 Check(isValidSrcASrcBVector(Src1Ty),
6723 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6724
6725 // Permit excess registers for the format.
6726 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6727 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6728 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6729 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6730 break;
6731 }
6732 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6733 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6734 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6735 Value *Src0 = Call.getArgOperand(1);
6736 Value *Src1 = Call.getArgOperand(3);
6737
6738 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6739 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6740 Check(FmtA <= 4, "invalid value for matrix format", Call,
6741 Call.getArgOperand(0));
6742 Check(FmtB <= 4, "invalid value for matrix format", Call,
6743 Call.getArgOperand(2));
6744
6745 // AMDGPU::MatrixFMT values
6746 auto getFormatNumRegs = [](unsigned FormatVal) {
6747 switch (FormatVal) {
6748 case 0:
6749 case 1:
6750 return 16u;
6751 case 2:
6752 case 3:
6753 return 12u;
6754 case 4:
6755 return 8u;
6756 default:
6757 llvm_unreachable("invalid format value");
6758 }
6759 };
6760
6761 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6762 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6763 return false;
6764 unsigned NumElts = Ty->getNumElements();
6765 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6766 };
6767
6768 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6769 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6770 Check(isValidSrcASrcBVector(Src0Ty),
6771 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6772 Check(isValidSrcASrcBVector(Src1Ty),
6773 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6774
6775 // Permit excess registers for the format.
6776 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6777 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6778 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6779 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6780 break;
6781 }
6782 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
6783 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
6784 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
6785 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
6786 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
6787 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
6788 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
6789 Value *PtrArg = Call.getArgOperand(0);
6790 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
6792 "cooperative atomic intrinsics require a generic or global pointer",
6793 &Call, PtrArg);
6794
6795 // Last argument must be a MD string
6797 MDNode *MD = cast<MDNode>(Op->getMetadata());
6798 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
6799 "cooperative atomic intrinsics require that the last argument is a "
6800 "metadata string",
6801 &Call, Op);
6802 break;
6803 }
6804 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6805 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6806 Value *V = Call.getArgOperand(0);
6807 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6808 Check(RegCount % 8 == 0,
6809 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6810 break;
6811 }
6812 case Intrinsic::experimental_convergence_entry:
6813 case Intrinsic::experimental_convergence_anchor:
6814 break;
6815 case Intrinsic::experimental_convergence_loop:
6816 break;
6817 case Intrinsic::ptrmask: {
6818 Type *Ty0 = Call.getArgOperand(0)->getType();
6819 Type *Ty1 = Call.getArgOperand(1)->getType();
6821 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6822 "of pointers",
6823 &Call);
6824 Check(
6825 Ty0->isVectorTy() == Ty1->isVectorTy(),
6826 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6827 &Call);
6828 if (Ty0->isVectorTy())
6829 Check(cast<VectorType>(Ty0)->getElementCount() ==
6830 cast<VectorType>(Ty1)->getElementCount(),
6831 "llvm.ptrmask intrinsic arguments must have the same number of "
6832 "elements",
6833 &Call);
6834 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6835 "llvm.ptrmask intrinsic second argument bitwidth must match "
6836 "pointer index type size of first argument",
6837 &Call);
6838 break;
6839 }
6840 case Intrinsic::thread_pointer: {
6842 DL.getDefaultGlobalsAddressSpace(),
6843 "llvm.thread.pointer intrinsic return type must be for the globals "
6844 "address space",
6845 &Call);
6846 break;
6847 }
6848 case Intrinsic::threadlocal_address: {
6849 const Value &Arg0 = *Call.getArgOperand(0);
6850 Check(isa<GlobalValue>(Arg0),
6851 "llvm.threadlocal.address first argument must be a GlobalValue");
6852 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6853 "llvm.threadlocal.address operand isThreadLocal() must be true");
6854 break;
6855 }
6856 case Intrinsic::lifetime_start:
6857 case Intrinsic::lifetime_end: {
6860 "llvm.lifetime.start/end can only be used on alloca or poison",
6861 &Call);
6862 break;
6863 }
6864 };
6865
6866 // Verify that there aren't any unmediated control transfers between funclets.
6868 Function *F = Call.getParent()->getParent();
6869 if (F->hasPersonalityFn() &&
6870 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6871 // Run EH funclet coloring on-demand and cache results for other intrinsic
6872 // calls in this function
6873 if (BlockEHFuncletColors.empty())
6874 BlockEHFuncletColors = colorEHFunclets(*F);
6875
6876 // Check for catch-/cleanup-pad in first funclet block
6877 bool InEHFunclet = false;
6878 BasicBlock *CallBB = Call.getParent();
6879 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6880 assert(CV.size() > 0 && "Uncolored block");
6881 for (BasicBlock *ColorFirstBB : CV)
6882 if (auto It = ColorFirstBB->getFirstNonPHIIt();
6883 It != ColorFirstBB->end())
6885 InEHFunclet = true;
6886
6887 // Check for funclet operand bundle
6888 bool HasToken = false;
6889 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6891 HasToken = true;
6892
6893 // This would cause silent code truncation in WinEHPrepare
6894 if (InEHFunclet)
6895 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6896 }
6897 }
6898}
6899
6900/// Carefully grab the subprogram from a local scope.
6901///
6902/// This carefully grabs the subprogram from a local scope, avoiding the
6903/// built-in assertions that would typically fire.
6905 if (!LocalScope)
6906 return nullptr;
6907
6908 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6909 return SP;
6910
6911 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6912 return getSubprogram(LB->getRawScope());
6913
6914 // Just return null; broken scope chains are checked elsewhere.
6915 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6916 return nullptr;
6917}
6918
6919void Verifier::visit(DbgLabelRecord &DLR) {
6921 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6922
6923 // Ignore broken !dbg attachments; they're checked elsewhere.
6924 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
6925 if (!isa<DILocation>(N))
6926 return;
6927
6928 BasicBlock *BB = DLR.getParent();
6929 Function *F = BB ? BB->getParent() : nullptr;
6930
6931 // The scopes for variables and !dbg attachments must agree.
6932 DILabel *Label = DLR.getLabel();
6933 DILocation *Loc = DLR.getDebugLoc();
6934 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
6935
6936 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6937 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6938 if (!LabelSP || !LocSP)
6939 return;
6940
6941 CheckDI(LabelSP == LocSP,
6942 "mismatched subprogram between #dbg_label label and !dbg attachment",
6943 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6944 Loc->getScope()->getSubprogram());
6945}
6946
6947void Verifier::visit(DbgVariableRecord &DVR) {
6948 BasicBlock *BB = DVR.getParent();
6949 Function *F = BB->getParent();
6950
6951 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
6952 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
6953 DVR.getType() == DbgVariableRecord::LocationType::Assign,
6954 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
6955
6956 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
6957 // DIArgList, or an empty MDNode (which is a legacy representation for an
6958 // "undef" location).
6959 auto *MD = DVR.getRawLocation();
6960 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6961 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
6962 "invalid #dbg record address/value", &DVR, MD, BB, F);
6963 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
6964 visitValueAsMetadata(*VAM, F);
6965 if (DVR.isDbgDeclare()) {
6966 // Allow integers here to support inttoptr salvage.
6967 Type *Ty = VAM->getValue()->getType();
6968 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
6969 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
6970 F);
6971 }
6972 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
6973 visitDIArgList(*AL, F);
6974 }
6975
6977 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
6978 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
6979
6981 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
6982 F);
6983 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
6984
6985 if (DVR.isDbgAssign()) {
6987 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
6988 F);
6989 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
6990 AreDebugLocsAllowed::No);
6991
6992 const auto *RawAddr = DVR.getRawAddress();
6993 // Similarly to the location above, the address for an assign
6994 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
6995 // represents an undef address.
6996 CheckDI(
6997 isa<ValueAsMetadata>(RawAddr) ||
6998 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6999 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7000 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7001 visitValueAsMetadata(*VAM, F);
7002
7004 "invalid #dbg_assign address expression", &DVR,
7005 DVR.getRawAddressExpression(), BB, F);
7006 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7007
7008 // All of the linked instructions should be in the same function as DVR.
7009 for (Instruction *I : at::getAssignmentInsts(&DVR))
7010 CheckDI(DVR.getFunction() == I->getFunction(),
7011 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7012 }
7013
7014 // This check is redundant with one in visitLocalVariable().
7015 DILocalVariable *Var = DVR.getVariable();
7016 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7017 BB, F);
7018
7019 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7020 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7021 &DVR, DLNode, BB, F);
7022 DILocation *Loc = DVR.getDebugLoc();
7023
7024 // The scopes for variables and !dbg attachments must agree.
7025 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7026 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7027 if (!VarSP || !LocSP)
7028 return; // Broken scope chains are checked elsewhere.
7029
7030 CheckDI(VarSP == LocSP,
7031 "mismatched subprogram between #dbg record variable and DILocation",
7032 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7033 Loc->getScope()->getSubprogram(), BB, F);
7034
7035 verifyFnArgs(DVR);
7036}
7037
7038void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7039 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7040 auto *RetTy = cast<VectorType>(VPCast->getType());
7041 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7042 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7043 "VP cast intrinsic first argument and result vector lengths must be "
7044 "equal",
7045 *VPCast);
7046
7047 switch (VPCast->getIntrinsicID()) {
7048 default:
7049 llvm_unreachable("Unknown VP cast intrinsic");
7050 case Intrinsic::vp_trunc:
7051 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7052 "llvm.vp.trunc intrinsic first argument and result element type "
7053 "must be integer",
7054 *VPCast);
7055 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7056 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7057 "larger than the bit size of the return type",
7058 *VPCast);
7059 break;
7060 case Intrinsic::vp_zext:
7061 case Intrinsic::vp_sext:
7062 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7063 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7064 "element type must be integer",
7065 *VPCast);
7066 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7067 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7068 "argument must be smaller than the bit size of the return type",
7069 *VPCast);
7070 break;
7071 case Intrinsic::vp_fptoui:
7072 case Intrinsic::vp_fptosi:
7073 case Intrinsic::vp_lrint:
7074 case Intrinsic::vp_llrint:
7075 Check(
7076 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7077 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7078 "type must be floating-point and result element type must be integer",
7079 *VPCast);
7080 break;
7081 case Intrinsic::vp_uitofp:
7082 case Intrinsic::vp_sitofp:
7083 Check(
7084 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7085 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7086 "type must be integer and result element type must be floating-point",
7087 *VPCast);
7088 break;
7089 case Intrinsic::vp_fptrunc:
7090 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7091 "llvm.vp.fptrunc intrinsic first argument and result element type "
7092 "must be floating-point",
7093 *VPCast);
7094 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7095 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7096 "larger than the bit size of the return type",
7097 *VPCast);
7098 break;
7099 case Intrinsic::vp_fpext:
7100 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7101 "llvm.vp.fpext intrinsic first argument and result element type "
7102 "must be floating-point",
7103 *VPCast);
7104 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7105 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7106 "smaller than the bit size of the return type",
7107 *VPCast);
7108 break;
7109 case Intrinsic::vp_ptrtoint:
7110 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7111 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7112 "pointer and result element type must be integer",
7113 *VPCast);
7114 break;
7115 case Intrinsic::vp_inttoptr:
7116 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7117 "llvm.vp.inttoptr intrinsic first argument element type must be "
7118 "integer and result element type must be pointer",
7119 *VPCast);
7120 break;
7121 }
7122 }
7123
7124 switch (VPI.getIntrinsicID()) {
7125 case Intrinsic::vp_fcmp: {
7126 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7128 "invalid predicate for VP FP comparison intrinsic", &VPI);
7129 break;
7130 }
7131 case Intrinsic::vp_icmp: {
7132 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7134 "invalid predicate for VP integer comparison intrinsic", &VPI);
7135 break;
7136 }
7137 case Intrinsic::vp_is_fpclass: {
7138 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7139 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7140 "unsupported bits for llvm.vp.is.fpclass test mask");
7141 break;
7142 }
7143 case Intrinsic::experimental_vp_splice: {
7144 VectorType *VecTy = cast<VectorType>(VPI.getType());
7145 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7146 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7147 if (VPI.getParent() && VPI.getParent()->getParent()) {
7148 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7149 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7150 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7151 }
7152 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7153 (Idx >= 0 && Idx < KnownMinNumElements),
7154 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7155 "known minimum number of elements in the vector. For scalable "
7156 "vectors the minimum number of elements is determined from "
7157 "vscale_range.",
7158 &VPI);
7159 break;
7160 }
7161 }
7162}
7163
7164void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7165 unsigned NumOperands = FPI.getNonMetadataArgCount();
7166 bool HasRoundingMD =
7168
7169 // Add the expected number of metadata operands.
7170 NumOperands += (1 + HasRoundingMD);
7171
7172 // Compare intrinsics carry an extra predicate metadata operand.
7174 NumOperands += 1;
7175 Check((FPI.arg_size() == NumOperands),
7176 "invalid arguments for constrained FP intrinsic", &FPI);
7177
7178 switch (FPI.getIntrinsicID()) {
7179 case Intrinsic::experimental_constrained_lrint:
7180 case Intrinsic::experimental_constrained_llrint: {
7181 Type *ValTy = FPI.getArgOperand(0)->getType();
7182 Type *ResultTy = FPI.getType();
7183 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7184 "Intrinsic does not support vectors", &FPI);
7185 break;
7186 }
7187
7188 case Intrinsic::experimental_constrained_lround:
7189 case Intrinsic::experimental_constrained_llround: {
7190 Type *ValTy = FPI.getArgOperand(0)->getType();
7191 Type *ResultTy = FPI.getType();
7192 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7193 "Intrinsic does not support vectors", &FPI);
7194 break;
7195 }
7196
7197 case Intrinsic::experimental_constrained_fcmp:
7198 case Intrinsic::experimental_constrained_fcmps: {
7199 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7201 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7202 break;
7203 }
7204
7205 case Intrinsic::experimental_constrained_fptosi:
7206 case Intrinsic::experimental_constrained_fptoui: {
7207 Value *Operand = FPI.getArgOperand(0);
7208 ElementCount SrcEC;
7209 Check(Operand->getType()->isFPOrFPVectorTy(),
7210 "Intrinsic first argument must be floating point", &FPI);
7211 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7212 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7213 }
7214
7215 Operand = &FPI;
7216 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7217 "Intrinsic first argument and result disagree on vector use", &FPI);
7218 Check(Operand->getType()->isIntOrIntVectorTy(),
7219 "Intrinsic result must be an integer", &FPI);
7220 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7221 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7222 "Intrinsic first argument and result vector lengths must be equal",
7223 &FPI);
7224 }
7225 break;
7226 }
7227
7228 case Intrinsic::experimental_constrained_sitofp:
7229 case Intrinsic::experimental_constrained_uitofp: {
7230 Value *Operand = FPI.getArgOperand(0);
7231 ElementCount SrcEC;
7232 Check(Operand->getType()->isIntOrIntVectorTy(),
7233 "Intrinsic first argument must be integer", &FPI);
7234 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7235 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7236 }
7237
7238 Operand = &FPI;
7239 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7240 "Intrinsic first argument and result disagree on vector use", &FPI);
7241 Check(Operand->getType()->isFPOrFPVectorTy(),
7242 "Intrinsic result must be a floating point", &FPI);
7243 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7244 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7245 "Intrinsic first argument and result vector lengths must be equal",
7246 &FPI);
7247 }
7248 break;
7249 }
7250
7251 case Intrinsic::experimental_constrained_fptrunc:
7252 case Intrinsic::experimental_constrained_fpext: {
7253 Value *Operand = FPI.getArgOperand(0);
7254 Type *OperandTy = Operand->getType();
7255 Value *Result = &FPI;
7256 Type *ResultTy = Result->getType();
7257 Check(OperandTy->isFPOrFPVectorTy(),
7258 "Intrinsic first argument must be FP or FP vector", &FPI);
7259 Check(ResultTy->isFPOrFPVectorTy(),
7260 "Intrinsic result must be FP or FP vector", &FPI);
7261 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7262 "Intrinsic first argument and result disagree on vector use", &FPI);
7263 if (OperandTy->isVectorTy()) {
7264 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7265 cast<VectorType>(ResultTy)->getElementCount(),
7266 "Intrinsic first argument and result vector lengths must be equal",
7267 &FPI);
7268 }
7269 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7270 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7271 "Intrinsic first argument's type must be larger than result type",
7272 &FPI);
7273 } else {
7274 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7275 "Intrinsic first argument's type must be smaller than result type",
7276 &FPI);
7277 }
7278 break;
7279 }
7280
7281 default:
7282 break;
7283 }
7284
7285 // If a non-metadata argument is passed in a metadata slot then the
7286 // error will be caught earlier when the incorrect argument doesn't
7287 // match the specification in the intrinsic call table. Thus, no
7288 // argument type check is needed here.
7289
7290 Check(FPI.getExceptionBehavior().has_value(),
7291 "invalid exception behavior argument", &FPI);
7292 if (HasRoundingMD) {
7293 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7294 &FPI);
7295 }
7296}
7297
7298void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7299 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7300 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7301
7302 // We don't know whether this intrinsic verified correctly.
7303 if (!V || !E || !E->isValid())
7304 return;
7305
7306 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7307 auto Fragment = E->getFragmentInfo();
7308 if (!Fragment)
7309 return;
7310
7311 // The frontend helps out GDB by emitting the members of local anonymous
7312 // unions as artificial local variables with shared storage. When SROA splits
7313 // the storage for artificial local variables that are smaller than the entire
7314 // union, the overhang piece will be outside of the allotted space for the
7315 // variable and this check fails.
7316 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7317 if (V->isArtificial())
7318 return;
7319
7320 verifyFragmentExpression(*V, *Fragment, &DVR);
7321}
7322
7323template <typename ValueOrMetadata>
7324void Verifier::verifyFragmentExpression(const DIVariable &V,
7326 ValueOrMetadata *Desc) {
7327 // If there's no size, the type is broken, but that should be checked
7328 // elsewhere.
7329 auto VarSize = V.getSizeInBits();
7330 if (!VarSize)
7331 return;
7332
7333 unsigned FragSize = Fragment.SizeInBits;
7334 unsigned FragOffset = Fragment.OffsetInBits;
7335 CheckDI(FragSize + FragOffset <= *VarSize,
7336 "fragment is larger than or outside of variable", Desc, &V);
7337 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7338}
7339
7340void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7341 // This function does not take the scope of noninlined function arguments into
7342 // account. Don't run it if current function is nodebug, because it may
7343 // contain inlined debug intrinsics.
7344 if (!HasDebugInfo)
7345 return;
7346
7347 // For performance reasons only check non-inlined ones.
7348 if (DVR.getDebugLoc()->getInlinedAt())
7349 return;
7350
7351 DILocalVariable *Var = DVR.getVariable();
7352 CheckDI(Var, "#dbg record without variable");
7353
7354 unsigned ArgNo = Var->getArg();
7355 if (!ArgNo)
7356 return;
7357
7358 // Verify there are no duplicate function argument debug info entries.
7359 // These will cause hard-to-debug assertions in the DWARF backend.
7360 if (DebugFnArgs.size() < ArgNo)
7361 DebugFnArgs.resize(ArgNo, nullptr);
7362
7363 auto *Prev = DebugFnArgs[ArgNo - 1];
7364 DebugFnArgs[ArgNo - 1] = Var;
7365 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7366 Prev, Var);
7367}
7368
7369void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7370 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7371
7372 // We don't know whether this intrinsic verified correctly.
7373 if (!E || !E->isValid())
7374 return;
7375
7377 Value *VarValue = DVR.getVariableLocationOp(0);
7378 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7379 return;
7380 // We allow EntryValues for swift async arguments, as they have an
7381 // ABI-guarantee to be turned into a specific register.
7382 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7383 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7384 return;
7385 }
7386
7387 CheckDI(!E->isEntryValue(),
7388 "Entry values are only allowed in MIR unless they target a "
7389 "swiftasync Argument",
7390 &DVR);
7391}
7392
7393void Verifier::verifyCompileUnits() {
7394 // When more than one Module is imported into the same context, such as during
7395 // an LTO build before linking the modules, ODR type uniquing may cause types
7396 // to point to a different CU. This check does not make sense in this case.
7397 if (M.getContext().isODRUniquingDebugTypes())
7398 return;
7399 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7400 SmallPtrSet<const Metadata *, 2> Listed;
7401 if (CUs)
7402 Listed.insert_range(CUs->operands());
7403 for (const auto *CU : CUVisited)
7404 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7405 CUVisited.clear();
7406}
7407
7408void Verifier::verifyDeoptimizeCallingConvs() {
7409 if (DeoptimizeDeclarations.empty())
7410 return;
7411
7412 const Function *First = DeoptimizeDeclarations[0];
7413 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7414 Check(First->getCallingConv() == F->getCallingConv(),
7415 "All llvm.experimental.deoptimize declarations must have the same "
7416 "calling convention",
7417 First, F);
7418 }
7419}
7420
7421void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7422 const OperandBundleUse &BU) {
7423 FunctionType *FTy = Call.getFunctionType();
7424
7425 Check((FTy->getReturnType()->isPointerTy() ||
7426 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7427 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7428 "function returning a pointer or a non-returning function that has a "
7429 "void return type",
7430 Call);
7431
7432 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7433 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7434 "an argument",
7435 Call);
7436
7437 auto *Fn = cast<Function>(BU.Inputs.front());
7438 Intrinsic::ID IID = Fn->getIntrinsicID();
7439
7440 if (IID) {
7441 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7442 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7443 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7444 "invalid function argument", Call);
7445 } else {
7446 StringRef FnName = Fn->getName();
7447 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7448 FnName == "objc_claimAutoreleasedReturnValue" ||
7449 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7450 "invalid function argument", Call);
7451 }
7452}
7453
7454void Verifier::verifyNoAliasScopeDecl() {
7455 if (NoAliasScopeDecls.empty())
7456 return;
7457
7458 // only a single scope must be declared at a time.
7459 for (auto *II : NoAliasScopeDecls) {
7460 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7461 "Not a llvm.experimental.noalias.scope.decl ?");
7462 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7464 Check(ScopeListMV != nullptr,
7465 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7466 "argument",
7467 II);
7468
7469 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7470 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7471 Check(ScopeListMD->getNumOperands() == 1,
7472 "!id.scope.list must point to a list with a single scope", II);
7473 visitAliasScopeListMetadata(ScopeListMD);
7474 }
7475
7476 // Only check the domination rule when requested. Once all passes have been
7477 // adapted this option can go away.
7479 return;
7480
7481 // Now sort the intrinsics based on the scope MDNode so that declarations of
7482 // the same scopes are next to each other.
7483 auto GetScope = [](IntrinsicInst *II) {
7484 const auto *ScopeListMV = cast<MetadataAsValue>(
7486 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7487 };
7488
7489 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7490 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7491 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7492 return GetScope(Lhs) < GetScope(Rhs);
7493 };
7494
7495 llvm::sort(NoAliasScopeDecls, Compare);
7496
7497 // Go over the intrinsics and check that for the same scope, they are not
7498 // dominating each other.
7499 auto ItCurrent = NoAliasScopeDecls.begin();
7500 while (ItCurrent != NoAliasScopeDecls.end()) {
7501 auto CurScope = GetScope(*ItCurrent);
7502 auto ItNext = ItCurrent;
7503 do {
7504 ++ItNext;
7505 } while (ItNext != NoAliasScopeDecls.end() &&
7506 GetScope(*ItNext) == CurScope);
7507
7508 // [ItCurrent, ItNext) represents the declarations for the same scope.
7509 // Ensure they are not dominating each other.. but only if it is not too
7510 // expensive.
7511 if (ItNext - ItCurrent < 32)
7512 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7513 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7514 if (I != J)
7515 Check(!DT.dominates(I, J),
7516 "llvm.experimental.noalias.scope.decl dominates another one "
7517 "with the same scope",
7518 I);
7519 ItCurrent = ItNext;
7520 }
7521}
7522
7523//===----------------------------------------------------------------------===//
7524// Implement the public interfaces to this file...
7525//===----------------------------------------------------------------------===//
7526
7528 Function &F = const_cast<Function &>(f);
7529
7530 // Don't use a raw_null_ostream. Printing IR is expensive.
7531 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7532
7533 // Note that this function's return value is inverted from what you would
7534 // expect of a function called "verify".
7535 return !V.verify(F);
7536}
7537
7539 bool *BrokenDebugInfo) {
7540 // Don't use a raw_null_ostream. Printing IR is expensive.
7541 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7542
7543 bool Broken = false;
7544 for (const Function &F : M)
7545 Broken |= !V.verify(F);
7546
7547 Broken |= !V.verify();
7548 if (BrokenDebugInfo)
7549 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7550 // Note that this function's return value is inverted from what you would
7551 // expect of a function called "verify".
7552 return Broken;
7553}
7554
7555namespace {
7556
7557struct VerifierLegacyPass : public FunctionPass {
7558 static char ID;
7559
7560 std::unique_ptr<Verifier> V;
7561 bool FatalErrors = true;
7562
7563 VerifierLegacyPass() : FunctionPass(ID) {
7565 }
7566 explicit VerifierLegacyPass(bool FatalErrors)
7567 : FunctionPass(ID),
7568 FatalErrors(FatalErrors) {
7570 }
7571
7572 bool doInitialization(Module &M) override {
7573 V = std::make_unique<Verifier>(
7574 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7575 return false;
7576 }
7577
7578 bool runOnFunction(Function &F) override {
7579 if (!V->verify(F) && FatalErrors) {
7580 errs() << "in function " << F.getName() << '\n';
7581 report_fatal_error("Broken function found, compilation aborted!");
7582 }
7583 return false;
7584 }
7585
7586 bool doFinalization(Module &M) override {
7587 bool HasErrors = false;
7588 for (Function &F : M)
7589 if (F.isDeclaration())
7590 HasErrors |= !V->verify(F);
7591
7592 HasErrors |= !V->verify();
7593 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7594 report_fatal_error("Broken module found, compilation aborted!");
7595 return false;
7596 }
7597
7598 void getAnalysisUsage(AnalysisUsage &AU) const override {
7599 AU.setPreservesAll();
7600 }
7601};
7602
7603} // end anonymous namespace
7604
7605/// Helper to issue failure from the TBAA verification
7606template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7607 if (Diagnostic)
7608 return Diagnostic->CheckFailed(Args...);
7609}
7610
7611#define CheckTBAA(C, ...) \
7612 do { \
7613 if (!(C)) { \
7614 CheckFailed(__VA_ARGS__); \
7615 return false; \
7616 } \
7617 } while (false)
7618
7619/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7620/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7621/// struct-type node describing an aggregate data structure (like a struct).
7622TBAAVerifier::TBAABaseNodeSummary
7623TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
7624 bool IsNewFormat) {
7625 if (BaseNode->getNumOperands() < 2) {
7626 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
7627 return {true, ~0u};
7628 }
7629
7630 auto Itr = TBAABaseNodes.find(BaseNode);
7631 if (Itr != TBAABaseNodes.end())
7632 return Itr->second;
7633
7634 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7635 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7636 (void)InsertResult;
7637 assert(InsertResult.second && "We just checked!");
7638 return Result;
7639}
7640
7641TBAAVerifier::TBAABaseNodeSummary
7642TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
7643 bool IsNewFormat) {
7644 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7645
7646 if (BaseNode->getNumOperands() == 2) {
7647 // Scalar nodes can only be accessed at offset 0.
7648 return isValidScalarTBAANode(BaseNode)
7649 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7650 : InvalidNode;
7651 }
7652
7653 if (IsNewFormat) {
7654 if (BaseNode->getNumOperands() % 3 != 0) {
7655 CheckFailed("Access tag nodes must have the number of operands that is a "
7656 "multiple of 3!", BaseNode);
7657 return InvalidNode;
7658 }
7659 } else {
7660 if (BaseNode->getNumOperands() % 2 != 1) {
7661 CheckFailed("Struct tag nodes must have an odd number of operands!",
7662 BaseNode);
7663 return InvalidNode;
7664 }
7665 }
7666
7667 // Check the type size field.
7668 if (IsNewFormat) {
7669 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7670 BaseNode->getOperand(1));
7671 if (!TypeSizeNode) {
7672 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
7673 return InvalidNode;
7674 }
7675 }
7676
7677 // Check the type name field. In the new format it can be anything.
7678 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7679 CheckFailed("Struct tag nodes have a string as their first operand",
7680 BaseNode);
7681 return InvalidNode;
7682 }
7683
7684 bool Failed = false;
7685
7686 std::optional<APInt> PrevOffset;
7687 unsigned BitWidth = ~0u;
7688
7689 // We've already checked that BaseNode is not a degenerate root node with one
7690 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7691 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7692 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7693 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7694 Idx += NumOpsPerField) {
7695 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7696 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7697 if (!isa<MDNode>(FieldTy)) {
7698 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
7699 Failed = true;
7700 continue;
7701 }
7702
7703 auto *OffsetEntryCI =
7705 if (!OffsetEntryCI) {
7706 CheckFailed("Offset entries must be constants!", &I, BaseNode);
7707 Failed = true;
7708 continue;
7709 }
7710
7711 if (BitWidth == ~0u)
7712 BitWidth = OffsetEntryCI->getBitWidth();
7713
7714 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7715 CheckFailed(
7716 "Bitwidth between the offsets and struct type entries must match", &I,
7717 BaseNode);
7718 Failed = true;
7719 continue;
7720 }
7721
7722 // NB! As far as I can tell, we generate a non-strictly increasing offset
7723 // sequence only from structs that have zero size bit fields. When
7724 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7725 // pick the field lexically the latest in struct type metadata node. This
7726 // mirrors the actual behavior of the alias analysis implementation.
7727 bool IsAscending =
7728 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7729
7730 if (!IsAscending) {
7731 CheckFailed("Offsets must be increasing!", &I, BaseNode);
7732 Failed = true;
7733 }
7734
7735 PrevOffset = OffsetEntryCI->getValue();
7736
7737 if (IsNewFormat) {
7738 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7739 BaseNode->getOperand(Idx + 2));
7740 if (!MemberSizeNode) {
7741 CheckFailed("Member size entries must be constants!", &I, BaseNode);
7742 Failed = true;
7743 continue;
7744 }
7745 }
7746 }
7747
7748 return Failed ? InvalidNode
7749 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7750}
7751
7752static bool IsRootTBAANode(const MDNode *MD) {
7753 return MD->getNumOperands() < 2;
7754}
7755
7756static bool IsScalarTBAANodeImpl(const MDNode *MD,
7758 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7759 return false;
7760
7761 if (!isa<MDString>(MD->getOperand(0)))
7762 return false;
7763
7764 if (MD->getNumOperands() == 3) {
7766 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7767 return false;
7768 }
7769
7770 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7771 return Parent && Visited.insert(Parent).second &&
7772 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7773}
7774
7775bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7776 auto ResultIt = TBAAScalarNodes.find(MD);
7777 if (ResultIt != TBAAScalarNodes.end())
7778 return ResultIt->second;
7779
7780 SmallPtrSet<const MDNode *, 4> Visited;
7781 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7782 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7783 (void)InsertResult;
7784 assert(InsertResult.second && "Just checked!");
7785
7786 return Result;
7787}
7788
7789/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7790/// Offset in place to be the offset within the field node returned.
7791///
7792/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7793MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
7794 const MDNode *BaseNode,
7795 APInt &Offset,
7796 bool IsNewFormat) {
7797 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7798
7799 // Scalar nodes have only one possible "field" -- their parent in the access
7800 // hierarchy. Offset must be zero at this point, but our caller is supposed
7801 // to check that.
7802 if (BaseNode->getNumOperands() == 2)
7803 return cast<MDNode>(BaseNode->getOperand(1));
7804
7805 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7806 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7807 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7808 Idx += NumOpsPerField) {
7809 auto *OffsetEntryCI =
7810 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7811 if (OffsetEntryCI->getValue().ugt(Offset)) {
7812 if (Idx == FirstFieldOpNo) {
7813 CheckFailed("Could not find TBAA parent in struct type node", &I,
7814 BaseNode, &Offset);
7815 return nullptr;
7816 }
7817
7818 unsigned PrevIdx = Idx - NumOpsPerField;
7819 auto *PrevOffsetEntryCI =
7820 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7821 Offset -= PrevOffsetEntryCI->getValue();
7822 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7823 }
7824 }
7825
7826 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7827 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7828 BaseNode->getOperand(LastIdx + 1));
7829 Offset -= LastOffsetEntryCI->getValue();
7830 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7831}
7832
7834 if (!Type || Type->getNumOperands() < 3)
7835 return false;
7836
7837 // In the new format type nodes shall have a reference to the parent type as
7838 // its first operand.
7839 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7840}
7841
7843 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands",
7844 &I, MD);
7845
7849 "This instruction shall not have a TBAA access tag!", &I);
7850
7851 bool IsStructPathTBAA =
7852 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7853
7854 CheckTBAA(IsStructPathTBAA,
7855 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7856 &I);
7857
7858 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7859 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7860
7861 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7862
7863 if (IsNewFormat) {
7864 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7865 "Access tag metadata must have either 4 or 5 operands", &I, MD);
7866 } else {
7867 CheckTBAA(MD->getNumOperands() < 5,
7868 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
7869 }
7870
7871 // Check the access size field.
7872 if (IsNewFormat) {
7873 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7874 MD->getOperand(3));
7875 CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
7876 }
7877
7878 // Check the immutability flag.
7879 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7880 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7881 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7882 MD->getOperand(ImmutabilityFlagOpNo));
7883 CheckTBAA(IsImmutableCI,
7884 "Immutability tag on struct tag metadata must be a constant", &I,
7885 MD);
7886 CheckTBAA(
7887 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7888 "Immutability part of the struct tag metadata must be either 0 or 1",
7889 &I, MD);
7890 }
7891
7892 CheckTBAA(BaseNode && AccessType,
7893 "Malformed struct tag metadata: base and access-type "
7894 "should be non-null and point to Metadata nodes",
7895 &I, MD, BaseNode, AccessType);
7896
7897 if (!IsNewFormat) {
7898 CheckTBAA(isValidScalarTBAANode(AccessType),
7899 "Access type node must be a valid scalar type", &I, MD,
7900 AccessType);
7901 }
7902
7904 CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
7905
7906 APInt Offset = OffsetCI->getValue();
7907 bool SeenAccessTypeInPath = false;
7908
7909 SmallPtrSet<MDNode *, 4> StructPath;
7910
7911 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7912 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
7913 IsNewFormat)) {
7914 if (!StructPath.insert(BaseNode).second) {
7915 CheckFailed("Cycle detected in struct path", &I, MD);
7916 return false;
7917 }
7918
7919 bool Invalid;
7920 unsigned BaseNodeBitWidth;
7921 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
7922 IsNewFormat);
7923
7924 // If the base node is invalid in itself, then we've already printed all the
7925 // errors we wanted to print.
7926 if (Invalid)
7927 return false;
7928
7929 SeenAccessTypeInPath |= BaseNode == AccessType;
7930
7931 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
7932 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
7933 &I, MD, &Offset);
7934
7935 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
7936 (BaseNodeBitWidth == 0 && Offset == 0) ||
7937 (IsNewFormat && BaseNodeBitWidth == ~0u),
7938 "Access bit-width not the same as description bit-width", &I, MD,
7939 BaseNodeBitWidth, Offset.getBitWidth());
7940
7941 if (IsNewFormat && SeenAccessTypeInPath)
7942 break;
7943 }
7944
7945 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
7946 MD);
7947 return true;
7948}
7949
7950char VerifierLegacyPass::ID = 0;
7951INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
7952
7954 return new VerifierLegacyPass(FatalErrors);
7955}
7956
7957AnalysisKey VerifierAnalysis::Key;
7964
7969
7971 auto Res = AM.getResult<VerifierAnalysis>(M);
7972 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
7973 report_fatal_error("Broken module found, compilation aborted!");
7974
7975 return PreservedAnalyses::all();
7976}
7977
7979 auto res = AM.getResult<VerifierAnalysis>(F);
7980 if (res.IRBroken && FatalErrors)
7981 report_fatal_error("Broken function found, compilation aborted!");
7982
7983 return PreservedAnalyses::all();
7984}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:678
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:719
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
bool isFiniteNonZero() const
Definition APFloat.h:1459
bool isNegative() const
Definition APFloat.h:1449
const fltSemantics & getSemantics() const
Definition APFloat.h:1457
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1201
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:380
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:417
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:440
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1562
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:399
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:784
bool isIntPredicate() const
Definition InstrTypes.h:785
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:778
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:226
bool isNegative() const
Definition Constants.h:209
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:214
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1072
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1059
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1062
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1065
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI Function * getFunction()
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:291
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:187
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:165
bool empty() const
Definition DenseMap.h:107
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:214
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:831
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:652
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:316
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1077
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1445
bool isTemporary() const
Definition Metadata.h:1261
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1443
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1451
bool isDistinct() const
Definition Metadata.h:1260
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1257
LLVMContext & getContext() const
Definition Metadata.h:1241
Metadata * get() const
Definition Metadata.h:928
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:617
static LLVM_ABI bool isTagMD(const Metadata *MD)
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:111
Metadata * getMetadata() const
Definition Metadata.h:200
Root of the metadata hierarchy.
Definition Metadata.h:63
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:103
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
iterator_range< op_iterator > operands()
Definition Metadata.h:1849
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:480
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:269
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:151
static constexpr size_t npos
Definition StringRef.h:57
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:719
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:441
LLVM_ABI bool visitTBAAMetadata(Instruction &I, const MDNode *MD)
Visit an instruction and return true if it is valid, return false if an invalid TBAA is attached.
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:75
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:91
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:62
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1058
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:154
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:497
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:812
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1101
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:108
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:156
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:359
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:240
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:241
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:39
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:804
@ DW_MACINFO_start_file
Definition Dwarf.h:805
@ DW_MACINFO_define
Definition Dwarf.h:803
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:707
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:694
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:666
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:330
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:262
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1727
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:853
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2474
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
LLVM_ABI bool isExplicitlyUnknownProfileMetadata(const MDNode &MD)
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2138
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:293
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:682
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:288
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1652
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:24
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1899
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:85
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * BranchWeights
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:304
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:156
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:150
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:297
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:152
LLVMContext & Context
Definition Verifier.cpp:147
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:154
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:286
const Module & M
Definition Verifier.cpp:143
const DataLayout & DL
Definition Verifier.cpp:146
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:313
const Triple & TT
Definition Verifier.cpp:145
ModuleSlotTracker MST
Definition Verifier.cpp:144