LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
139namespace llvm {
140
143 const Module &M;
145 const Triple &TT;
148
149 /// Track the brokenness of the module while recursively visiting.
150 bool Broken = false;
151 /// Broken debug info can be "recovered" from by stripping the debug info.
152 bool BrokenDebugInfo = false;
153 /// Whether to treat broken debug info as an error.
155
157 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
158 Context(M.getContext()) {}
159
160private:
161 void Write(const Module *M) {
162 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
163 }
164
165 void Write(const Value *V) {
166 if (V)
167 Write(*V);
168 }
169
170 void Write(const Value &V) {
171 if (isa<Instruction>(V)) {
172 V.print(*OS, MST);
173 *OS << '\n';
174 } else {
175 V.printAsOperand(*OS, true, MST);
176 *OS << '\n';
177 }
178 }
179
180 void Write(const DbgRecord *DR) {
181 if (DR) {
182 DR->print(*OS, MST, false);
183 *OS << '\n';
184 }
185 }
186
188 switch (Type) {
190 *OS << "value";
191 break;
193 *OS << "declare";
194 break;
196 *OS << "assign";
197 break;
199 *OS << "end";
200 break;
202 *OS << "any";
203 break;
204 };
205 }
206
207 void Write(const Metadata *MD) {
208 if (!MD)
209 return;
210 MD->print(*OS, MST, &M);
211 *OS << '\n';
212 }
213
214 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
215 Write(MD.get());
216 }
217
218 void Write(const NamedMDNode *NMD) {
219 if (!NMD)
220 return;
221 NMD->print(*OS, MST);
222 *OS << '\n';
223 }
224
225 void Write(Type *T) {
226 if (!T)
227 return;
228 *OS << ' ' << *T;
229 }
230
231 void Write(const Comdat *C) {
232 if (!C)
233 return;
234 *OS << *C;
235 }
236
237 void Write(const APInt *AI) {
238 if (!AI)
239 return;
240 *OS << *AI << '\n';
241 }
242
243 void Write(const unsigned i) { *OS << i << '\n'; }
244
245 // NOLINTNEXTLINE(readability-identifier-naming)
246 void Write(const Attribute *A) {
247 if (!A)
248 return;
249 *OS << A->getAsString() << '\n';
250 }
251
252 // NOLINTNEXTLINE(readability-identifier-naming)
253 void Write(const AttributeSet *AS) {
254 if (!AS)
255 return;
256 *OS << AS->getAsString() << '\n';
257 }
258
259 // NOLINTNEXTLINE(readability-identifier-naming)
260 void Write(const AttributeList *AL) {
261 if (!AL)
262 return;
263 AL->print(*OS);
264 }
265
266 void Write(Printable P) { *OS << P << '\n'; }
267
268 template <typename T> void Write(ArrayRef<T> Vs) {
269 for (const T &V : Vs)
270 Write(V);
271 }
272
273 template <typename T1, typename... Ts>
274 void WriteTs(const T1 &V1, const Ts &... Vs) {
275 Write(V1);
276 WriteTs(Vs...);
277 }
278
279 template <typename... Ts> void WriteTs() {}
280
281public:
282 /// A check failed, so printout out the condition and the message.
283 ///
284 /// This provides a nice place to put a breakpoint if you want to see why
285 /// something is not correct.
286 void CheckFailed(const Twine &Message) {
287 if (OS)
288 *OS << Message << '\n';
289 Broken = true;
290 }
291
292 /// A check failed (with values to print).
293 ///
294 /// This calls the Message-only version so that the above is easier to set a
295 /// breakpoint on.
296 template <typename T1, typename... Ts>
297 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
298 CheckFailed(Message);
299 if (OS)
300 WriteTs(V1, Vs...);
301 }
302
303 /// A debug info check failed.
304 void DebugInfoCheckFailed(const Twine &Message) {
305 if (OS)
306 *OS << Message << '\n';
308 BrokenDebugInfo = true;
309 }
310
311 /// A debug info check failed (with values to print).
312 template <typename T1, typename... Ts>
313 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
314 const Ts &... Vs) {
315 DebugInfoCheckFailed(Message);
316 if (OS)
317 WriteTs(V1, Vs...);
318 }
319};
320
321} // namespace llvm
322
323namespace {
324
325class Verifier : public InstVisitor<Verifier>, VerifierSupport {
326 friend class InstVisitor<Verifier>;
327 DominatorTree DT;
328
329 /// When verifying a basic block, keep track of all of the
330 /// instructions we have seen so far.
331 ///
332 /// This allows us to do efficient dominance checks for the case when an
333 /// instruction has an operand that is an instruction in the same block.
334 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
335
336 /// Keep track of the metadata nodes that have been checked already.
337 SmallPtrSet<const Metadata *, 32> MDNodes;
338
339 /// Keep track which DISubprogram is attached to which function.
340 DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
341
342 /// Track all DICompileUnits visited.
343 SmallPtrSet<const Metadata *, 2> CUVisited;
344
345 /// The result type for a landingpad.
346 Type *LandingPadResultTy;
347
348 /// Whether we've seen a call to @llvm.localescape in this function
349 /// already.
350 bool SawFrameEscape;
351
352 /// Whether the current function has a DISubprogram attached to it.
353 bool HasDebugInfo = false;
354
355 /// Stores the count of how many objects were passed to llvm.localescape for a
356 /// given function and the largest index passed to llvm.localrecover.
357 DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
358
359 // Maps catchswitches and cleanuppads that unwind to siblings to the
360 // terminators that indicate the unwind, used to detect cycles therein.
361 MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
362
363 /// Cache which blocks are in which funclet, if an EH funclet personality is
364 /// in use. Otherwise empty.
365 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
366
367 /// Cache of constants visited in search of ConstantExprs.
368 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
369
370 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
371 SmallVector<const Function *, 4> DeoptimizeDeclarations;
372
373 /// Cache of attribute lists verified.
374 SmallPtrSet<const void *, 32> AttributeListsVisited;
375
376 // Verify that this GlobalValue is only used in this module.
377 // This map is used to avoid visiting uses twice. We can arrive at a user
378 // twice, if they have multiple operands. In particular for very large
379 // constant expressions, we can arrive at a particular user many times.
380 SmallPtrSet<const Value *, 32> GlobalValueVisited;
381
382 // Keeps track of duplicate function argument debug info.
384
385 TBAAVerifier TBAAVerifyHelper;
386 ConvergenceVerifier ConvergenceVerifyHelper;
387
388 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
389
390 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
391
392public:
393 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
394 const Module &M)
395 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
396 SawFrameEscape(false), TBAAVerifyHelper(this) {
397 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
398 }
399
400 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
401
402 bool verify(const Function &F) {
403 llvm::TimeTraceScope timeScope("Verifier");
404 assert(F.getParent() == &M &&
405 "An instance of this class only works with a specific module!");
406
407 // First ensure the function is well-enough formed to compute dominance
408 // information, and directly compute a dominance tree. We don't rely on the
409 // pass manager to provide this as it isolates us from a potentially
410 // out-of-date dominator tree and makes it significantly more complex to run
411 // this code outside of a pass manager.
412 // FIXME: It's really gross that we have to cast away constness here.
413 if (!F.empty())
414 DT.recalculate(const_cast<Function &>(F));
415
416 for (const BasicBlock &BB : F) {
417 if (!BB.empty() && BB.back().isTerminator())
418 continue;
419
420 if (OS) {
421 *OS << "Basic Block in function '" << F.getName()
422 << "' does not have terminator!\n";
423 BB.printAsOperand(*OS, true, MST);
424 *OS << "\n";
425 }
426 return false;
427 }
428
429 auto FailureCB = [this](const Twine &Message) {
430 this->CheckFailed(Message);
431 };
432 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
433
434 Broken = false;
435 // FIXME: We strip const here because the inst visitor strips const.
436 visit(const_cast<Function &>(F));
437 verifySiblingFuncletUnwinds();
438
439 if (ConvergenceVerifyHelper.sawTokens())
440 ConvergenceVerifyHelper.verify(DT);
441
442 InstsInThisBlock.clear();
443 DebugFnArgs.clear();
444 LandingPadResultTy = nullptr;
445 SawFrameEscape = false;
446 SiblingFuncletInfo.clear();
447 verifyNoAliasScopeDecl();
448 NoAliasScopeDecls.clear();
449
450 return !Broken;
451 }
452
453 /// Verify the module that this instance of \c Verifier was initialized with.
454 bool verify() {
455 Broken = false;
456
457 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
458 for (const Function &F : M)
459 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
460 DeoptimizeDeclarations.push_back(&F);
461
462 // Now that we've visited every function, verify that we never asked to
463 // recover a frame index that wasn't escaped.
464 verifyFrameRecoverIndices();
465 for (const GlobalVariable &GV : M.globals())
466 visitGlobalVariable(GV);
467
468 for (const GlobalAlias &GA : M.aliases())
469 visitGlobalAlias(GA);
470
471 for (const GlobalIFunc &GI : M.ifuncs())
472 visitGlobalIFunc(GI);
473
474 for (const NamedMDNode &NMD : M.named_metadata())
475 visitNamedMDNode(NMD);
476
477 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
478 visitComdat(SMEC.getValue());
479
480 visitModuleFlags();
481 visitModuleIdents();
482 visitModuleCommandLines();
483
484 verifyCompileUnits();
485
486 verifyDeoptimizeCallingConvs();
487 DISubprogramAttachments.clear();
488 return !Broken;
489 }
490
491private:
492 /// Whether a metadata node is allowed to be, or contain, a DILocation.
493 enum class AreDebugLocsAllowed { No, Yes };
494
495 /// Metadata that should be treated as a range, with slightly different
496 /// requirements.
497 enum class RangeLikeMetadataKind {
498 Range, // MD_range
499 AbsoluteSymbol, // MD_absolute_symbol
500 NoaliasAddrspace // MD_noalias_addrspace
501 };
502
503 // Verification methods...
504 void visitGlobalValue(const GlobalValue &GV);
505 void visitGlobalVariable(const GlobalVariable &GV);
506 void visitGlobalAlias(const GlobalAlias &GA);
507 void visitGlobalIFunc(const GlobalIFunc &GI);
508 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
509 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
510 const GlobalAlias &A, const Constant &C);
511 void visitNamedMDNode(const NamedMDNode &NMD);
512 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
513 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
514 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
515 void visitDIArgList(const DIArgList &AL, Function *F);
516 void visitComdat(const Comdat &C);
517 void visitModuleIdents();
518 void visitModuleCommandLines();
519 void visitModuleFlags();
520 void visitModuleFlag(const MDNode *Op,
521 DenseMap<const MDString *, const MDNode *> &SeenIDs,
522 SmallVectorImpl<const MDNode *> &Requirements);
523 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
524 void visitFunction(const Function &F);
525 void visitBasicBlock(BasicBlock &BB);
526 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
527 RangeLikeMetadataKind Kind);
528 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
529 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
530 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
531 void visitNofreeMetadata(Instruction &I, MDNode *MD);
532 void visitProfMetadata(Instruction &I, MDNode *MD);
533 void visitCallStackMetadata(MDNode *MD);
534 void visitMemProfMetadata(Instruction &I, MDNode *MD);
535 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
536 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
537 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
538 void visitMMRAMetadata(Instruction &I, MDNode *MD);
539 void visitAnnotationMetadata(MDNode *Annotation);
540 void visitAliasScopeMetadata(const MDNode *MD);
541 void visitAliasScopeListMetadata(const MDNode *MD);
542 void visitAccessGroupMetadata(const MDNode *MD);
543
544 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
545#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
546#include "llvm/IR/Metadata.def"
547 void visitDIScope(const DIScope &N);
548 void visitDIVariable(const DIVariable &N);
549 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
550 void visitDITemplateParameter(const DITemplateParameter &N);
551
552 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
553
554 void visit(DbgLabelRecord &DLR);
555 void visit(DbgVariableRecord &DVR);
556 // InstVisitor overrides...
557 using InstVisitor<Verifier>::visit;
558 void visitDbgRecords(Instruction &I);
559 void visit(Instruction &I);
560
561 void visitTruncInst(TruncInst &I);
562 void visitZExtInst(ZExtInst &I);
563 void visitSExtInst(SExtInst &I);
564 void visitFPTruncInst(FPTruncInst &I);
565 void visitFPExtInst(FPExtInst &I);
566 void visitFPToUIInst(FPToUIInst &I);
567 void visitFPToSIInst(FPToSIInst &I);
568 void visitUIToFPInst(UIToFPInst &I);
569 void visitSIToFPInst(SIToFPInst &I);
570 void visitIntToPtrInst(IntToPtrInst &I);
571 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
572 void visitPtrToAddrInst(PtrToAddrInst &I);
573 void visitPtrToIntInst(PtrToIntInst &I);
574 void visitBitCastInst(BitCastInst &I);
575 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
576 void visitPHINode(PHINode &PN);
577 void visitCallBase(CallBase &Call);
578 void visitUnaryOperator(UnaryOperator &U);
579 void visitBinaryOperator(BinaryOperator &B);
580 void visitICmpInst(ICmpInst &IC);
581 void visitFCmpInst(FCmpInst &FC);
582 void visitExtractElementInst(ExtractElementInst &EI);
583 void visitInsertElementInst(InsertElementInst &EI);
584 void visitShuffleVectorInst(ShuffleVectorInst &EI);
585 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
586 void visitCallInst(CallInst &CI);
587 void visitInvokeInst(InvokeInst &II);
588 void visitGetElementPtrInst(GetElementPtrInst &GEP);
589 void visitLoadInst(LoadInst &LI);
590 void visitStoreInst(StoreInst &SI);
591 void verifyDominatesUse(Instruction &I, unsigned i);
592 void visitInstruction(Instruction &I);
593 void visitTerminator(Instruction &I);
594 void visitBranchInst(BranchInst &BI);
595 void visitReturnInst(ReturnInst &RI);
596 void visitSwitchInst(SwitchInst &SI);
597 void visitIndirectBrInst(IndirectBrInst &BI);
598 void visitCallBrInst(CallBrInst &CBI);
599 void visitSelectInst(SelectInst &SI);
600 void visitUserOp1(Instruction &I);
601 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
602 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
603 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
604 void visitVPIntrinsic(VPIntrinsic &VPI);
605 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
606 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
607 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
608 void visitFenceInst(FenceInst &FI);
609 void visitAllocaInst(AllocaInst &AI);
610 void visitExtractValueInst(ExtractValueInst &EVI);
611 void visitInsertValueInst(InsertValueInst &IVI);
612 void visitEHPadPredecessors(Instruction &I);
613 void visitLandingPadInst(LandingPadInst &LPI);
614 void visitResumeInst(ResumeInst &RI);
615 void visitCatchPadInst(CatchPadInst &CPI);
616 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
617 void visitCleanupPadInst(CleanupPadInst &CPI);
618 void visitFuncletPadInst(FuncletPadInst &FPI);
619 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
620 void visitCleanupReturnInst(CleanupReturnInst &CRI);
621
622 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
623 void verifySwiftErrorValue(const Value *SwiftErrorVal);
624 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
625 void verifyMustTailCall(CallInst &CI);
626 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
627 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
628 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
629 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
630 const Value *V);
631 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
632 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
633 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
634 void verifyUnknownProfileMetadata(MDNode *MD);
635 void visitConstantExprsRecursively(const Constant *EntryC);
636 void visitConstantExpr(const ConstantExpr *CE);
637 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
638 void verifyInlineAsmCall(const CallBase &Call);
639 void verifyStatepoint(const CallBase &Call);
640 void verifyFrameRecoverIndices();
641 void verifySiblingFuncletUnwinds();
642
643 void verifyFragmentExpression(const DbgVariableRecord &I);
644 template <typename ValueOrMetadata>
645 void verifyFragmentExpression(const DIVariable &V,
647 ValueOrMetadata *Desc);
648 void verifyFnArgs(const DbgVariableRecord &DVR);
649 void verifyNotEntryValue(const DbgVariableRecord &I);
650
651 /// Module-level debug info verification...
652 void verifyCompileUnits();
653
654 /// Module-level verification that all @llvm.experimental.deoptimize
655 /// declarations share the same calling convention.
656 void verifyDeoptimizeCallingConvs();
657
658 void verifyAttachedCallBundle(const CallBase &Call,
659 const OperandBundleUse &BU);
660
661 /// Verify the llvm.experimental.noalias.scope.decl declarations
662 void verifyNoAliasScopeDecl();
663};
664
665} // end anonymous namespace
666
667/// We know that cond should be true, if not print an error message.
668#define Check(C, ...) \
669 do { \
670 if (!(C)) { \
671 CheckFailed(__VA_ARGS__); \
672 return; \
673 } \
674 } while (false)
675
676/// We know that a debug info condition should be true, if not print
677/// an error message.
678#define CheckDI(C, ...) \
679 do { \
680 if (!(C)) { \
681 DebugInfoCheckFailed(__VA_ARGS__); \
682 return; \
683 } \
684 } while (false)
685
686void Verifier::visitDbgRecords(Instruction &I) {
687 if (!I.DebugMarker)
688 return;
689 CheckDI(I.DebugMarker->MarkedInstr == &I,
690 "Instruction has invalid DebugMarker", &I);
691 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
692 "PHI Node must not have any attached DbgRecords", &I);
693 for (DbgRecord &DR : I.getDbgRecordRange()) {
694 CheckDI(DR.getMarker() == I.DebugMarker,
695 "DbgRecord had invalid DebugMarker", &I, &DR);
696 if (auto *Loc =
698 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
699 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
700 visit(*DVR);
701 // These have to appear after `visit` for consistency with existing
702 // intrinsic behaviour.
703 verifyFragmentExpression(*DVR);
704 verifyNotEntryValue(*DVR);
705 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
706 visit(*DLR);
707 }
708 }
709}
710
711void Verifier::visit(Instruction &I) {
712 visitDbgRecords(I);
713 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
714 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
716}
717
718// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
719static void forEachUser(const Value *User,
721 llvm::function_ref<bool(const Value *)> Callback) {
722 if (!Visited.insert(User).second)
723 return;
724
726 while (!WorkList.empty()) {
727 const Value *Cur = WorkList.pop_back_val();
728 if (!Visited.insert(Cur).second)
729 continue;
730 if (Callback(Cur))
731 append_range(WorkList, Cur->materialized_users());
732 }
733}
734
735void Verifier::visitGlobalValue(const GlobalValue &GV) {
737 "Global is external, but doesn't have external or weak linkage!", &GV);
738
739 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
740 if (const MDNode *Associated =
741 GO->getMetadata(LLVMContext::MD_associated)) {
742 Check(Associated->getNumOperands() == 1,
743 "associated metadata must have one operand", &GV, Associated);
744 const Metadata *Op = Associated->getOperand(0).get();
745 Check(Op, "associated metadata must have a global value", GO, Associated);
746
747 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
748 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
749 if (VM) {
750 Check(isa<PointerType>(VM->getValue()->getType()),
751 "associated value must be pointer typed", GV, Associated);
752
753 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
754 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
755 "associated metadata must point to a GlobalObject", GO, Stripped);
756 Check(Stripped != GO,
757 "global values should not associate to themselves", GO,
758 Associated);
759 }
760 }
761
762 // FIXME: Why is getMetadata on GlobalValue protected?
763 if (const MDNode *AbsoluteSymbol =
764 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
765 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
766 DL.getIntPtrType(GO->getType()),
767 RangeLikeMetadataKind::AbsoluteSymbol);
768 }
769 }
770
772 "Only global variables can have appending linkage!", &GV);
773
774 if (GV.hasAppendingLinkage()) {
775 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
776 Check(GVar && GVar->getValueType()->isArrayTy(),
777 "Only global arrays can have appending linkage!", GVar);
778 }
779
780 if (GV.isDeclarationForLinker())
781 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
782
783 if (GV.hasDLLExportStorageClass()) {
785 "dllexport GlobalValue must have default or protected visibility",
786 &GV);
787 }
788 if (GV.hasDLLImportStorageClass()) {
790 "dllimport GlobalValue must have default visibility", &GV);
791 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
792 &GV);
793
794 Check((GV.isDeclaration() &&
797 "Global is marked as dllimport, but not external", &GV);
798 }
799
800 if (GV.isImplicitDSOLocal())
801 Check(GV.isDSOLocal(),
802 "GlobalValue with local linkage or non-default "
803 "visibility must be dso_local!",
804 &GV);
805
806 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
807 if (const Instruction *I = dyn_cast<Instruction>(V)) {
808 if (!I->getParent() || !I->getParent()->getParent())
809 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
810 I);
811 else if (I->getParent()->getParent()->getParent() != &M)
812 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
813 I->getParent()->getParent(),
814 I->getParent()->getParent()->getParent());
815 return false;
816 } else if (const Function *F = dyn_cast<Function>(V)) {
817 if (F->getParent() != &M)
818 CheckFailed("Global is used by function in a different module", &GV, &M,
819 F, F->getParent());
820 return false;
821 }
822 return true;
823 });
824}
825
826void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
827 Type *GVType = GV.getValueType();
828
829 if (MaybeAlign A = GV.getAlign()) {
830 Check(A->value() <= Value::MaximumAlignment,
831 "huge alignment values are unsupported", &GV);
832 }
833
834 if (GV.hasInitializer()) {
835 Check(GV.getInitializer()->getType() == GVType,
836 "Global variable initializer type does not match global "
837 "variable type!",
838 &GV);
840 "Global variable initializer must be sized", &GV);
841 visitConstantExprsRecursively(GV.getInitializer());
842 // If the global has common linkage, it must have a zero initializer and
843 // cannot be constant.
844 if (GV.hasCommonLinkage()) {
846 "'common' global must have a zero initializer!", &GV);
847 Check(!GV.isConstant(), "'common' global may not be marked constant!",
848 &GV);
849 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
850 }
851 }
852
853 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
854 GV.getName() == "llvm.global_dtors")) {
856 "invalid linkage for intrinsic global variable", &GV);
858 "invalid uses of intrinsic global variable", &GV);
859
860 // Don't worry about emitting an error for it not being an array,
861 // visitGlobalValue will complain on appending non-array.
862 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
863 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
864 PointerType *FuncPtrTy =
865 PointerType::get(Context, DL.getProgramAddressSpace());
866 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
867 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
868 STy->getTypeAtIndex(1) == FuncPtrTy,
869 "wrong type for intrinsic global variable", &GV);
870 Check(STy->getNumElements() == 3,
871 "the third field of the element type is mandatory, "
872 "specify ptr null to migrate from the obsoleted 2-field form");
873 Type *ETy = STy->getTypeAtIndex(2);
874 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
875 &GV);
876 }
877 }
878
879 if (GV.hasName() && (GV.getName() == "llvm.used" ||
880 GV.getName() == "llvm.compiler.used")) {
882 "invalid linkage for intrinsic global variable", &GV);
884 "invalid uses of intrinsic global variable", &GV);
885
886 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
887 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
888 Check(PTy, "wrong type for intrinsic global variable", &GV);
889 if (GV.hasInitializer()) {
890 const Constant *Init = GV.getInitializer();
891 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
892 Check(InitArray, "wrong initalizer for intrinsic global variable",
893 Init);
894 for (Value *Op : InitArray->operands()) {
895 Value *V = Op->stripPointerCasts();
898 Twine("invalid ") + GV.getName() + " member", V);
899 Check(V->hasName(),
900 Twine("members of ") + GV.getName() + " must be named", V);
901 }
902 }
903 }
904 }
905
906 // Visit any debug info attachments.
908 GV.getMetadata(LLVMContext::MD_dbg, MDs);
909 for (auto *MD : MDs) {
910 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
911 visitDIGlobalVariableExpression(*GVE);
912 else
913 CheckDI(false, "!dbg attachment of global variable must be a "
914 "DIGlobalVariableExpression");
915 }
916
917 // Scalable vectors cannot be global variables, since we don't know
918 // the runtime size.
919 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
920
921 // Check if it is or contains a target extension type that disallows being
922 // used as a global.
924 "Global @" + GV.getName() + " has illegal target extension type",
925 GVType);
926
927 if (!GV.hasInitializer()) {
928 visitGlobalValue(GV);
929 return;
930 }
931
932 // Walk any aggregate initializers looking for bitcasts between address spaces
933 visitConstantExprsRecursively(GV.getInitializer());
934
935 visitGlobalValue(GV);
936}
937
938void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
939 SmallPtrSet<const GlobalAlias*, 4> Visited;
940 Visited.insert(&GA);
941 visitAliaseeSubExpr(Visited, GA, C);
942}
943
944void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
945 const GlobalAlias &GA, const Constant &C) {
948 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
949 "available_externally alias must point to available_externally "
950 "global value",
951 &GA);
952 }
953 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
955 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
956 &GA);
957 }
958
959 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
960 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
961
962 Check(!GA2->isInterposable(),
963 "Alias cannot point to an interposable alias", &GA);
964 } else {
965 // Only continue verifying subexpressions of GlobalAliases.
966 // Do not recurse into global initializers.
967 return;
968 }
969 }
970
971 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
972 visitConstantExprsRecursively(CE);
973
974 for (const Use &U : C.operands()) {
975 Value *V = &*U;
976 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
977 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
978 else if (const auto *C2 = dyn_cast<Constant>(V))
979 visitAliaseeSubExpr(Visited, GA, *C2);
980 }
981}
982
983void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
985 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
986 "weak_odr, external, or available_externally linkage!",
987 &GA);
988 const Constant *Aliasee = GA.getAliasee();
989 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
990 Check(GA.getType() == Aliasee->getType(),
991 "Alias and aliasee types should match!", &GA);
992
993 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
994 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
995
996 visitAliaseeSubExpr(GA, *Aliasee);
997
998 visitGlobalValue(GA);
999}
1000
1001void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1003 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1004 "weak_odr, or external linkage!",
1005 &GI);
1006 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1007 // is a Function definition.
1008 const Function *Resolver = GI.getResolverFunction();
1009 Check(Resolver, "IFunc must have a Function resolver", &GI);
1010 Check(!Resolver->isDeclarationForLinker(),
1011 "IFunc resolver must be a definition", &GI);
1012
1013 // Check that the immediate resolver operand (prior to any bitcasts) has the
1014 // correct type.
1015 const Type *ResolverTy = GI.getResolver()->getType();
1016
1018 "IFunc resolver must return a pointer", &GI);
1019
1020 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1021 "IFunc resolver has incorrect type", &GI);
1022}
1023
1024void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1025 // There used to be various other llvm.dbg.* nodes, but we don't support
1026 // upgrading them and we want to reserve the namespace for future uses.
1027 if (NMD.getName().starts_with("llvm.dbg."))
1028 CheckDI(NMD.getName() == "llvm.dbg.cu",
1029 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1030 for (const MDNode *MD : NMD.operands()) {
1031 if (NMD.getName() == "llvm.dbg.cu")
1032 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1033
1034 if (!MD)
1035 continue;
1036
1037 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1038 }
1039}
1040
1041void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1042 // Only visit each node once. Metadata can be mutually recursive, so this
1043 // avoids infinite recursion here, as well as being an optimization.
1044 if (!MDNodes.insert(&MD).second)
1045 return;
1046
1047 Check(&MD.getContext() == &Context,
1048 "MDNode context does not match Module context!", &MD);
1049
1050 switch (MD.getMetadataID()) {
1051 default:
1052 llvm_unreachable("Invalid MDNode subclass");
1053 case Metadata::MDTupleKind:
1054 break;
1055#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1056 case Metadata::CLASS##Kind: \
1057 visit##CLASS(cast<CLASS>(MD)); \
1058 break;
1059#include "llvm/IR/Metadata.def"
1060 }
1061
1062 for (const Metadata *Op : MD.operands()) {
1063 if (!Op)
1064 continue;
1065 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1066 &MD, Op);
1067 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1068 "DILocation not allowed within this metadata node", &MD, Op);
1069 if (auto *N = dyn_cast<MDNode>(Op)) {
1070 visitMDNode(*N, AllowLocs);
1071 continue;
1072 }
1073 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1074 visitValueAsMetadata(*V, nullptr);
1075 continue;
1076 }
1077 }
1078
1079 // Check llvm.loop.estimated_trip_count.
1080 if (MD.getNumOperands() > 0 &&
1082 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1084 Check(Count && Count->getType()->isIntegerTy() &&
1085 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1086 "Expected second operand to be an integer constant of type i32 or "
1087 "smaller",
1088 &MD);
1089 }
1090
1091 // Check these last, so we diagnose problems in operands first.
1092 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1093 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1094}
1095
1096void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1097 Check(MD.getValue(), "Expected valid value", &MD);
1098 Check(!MD.getValue()->getType()->isMetadataTy(),
1099 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1100
1101 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1102 if (!L)
1103 return;
1104
1105 Check(F, "function-local metadata used outside a function", L);
1106
1107 // If this was an instruction, bb, or argument, verify that it is in the
1108 // function that we expect.
1109 Function *ActualF = nullptr;
1110 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1111 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1112 ActualF = I->getParent()->getParent();
1113 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1114 ActualF = BB->getParent();
1115 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1116 ActualF = A->getParent();
1117 assert(ActualF && "Unimplemented function local metadata case!");
1118
1119 Check(ActualF == F, "function-local metadata used in wrong function", L);
1120}
1121
1122void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1123 for (const ValueAsMetadata *VAM : AL.getArgs())
1124 visitValueAsMetadata(*VAM, F);
1125}
1126
1127void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1128 Metadata *MD = MDV.getMetadata();
1129 if (auto *N = dyn_cast<MDNode>(MD)) {
1130 visitMDNode(*N, AreDebugLocsAllowed::No);
1131 return;
1132 }
1133
1134 // Only visit each node once. Metadata can be mutually recursive, so this
1135 // avoids infinite recursion here, as well as being an optimization.
1136 if (!MDNodes.insert(MD).second)
1137 return;
1138
1139 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1140 visitValueAsMetadata(*V, F);
1141
1142 if (auto *AL = dyn_cast<DIArgList>(MD))
1143 visitDIArgList(*AL, F);
1144}
1145
1146static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1147static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1148static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1149
1150void Verifier::visitDILocation(const DILocation &N) {
1151 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1152 "location requires a valid scope", &N, N.getRawScope());
1153 if (auto *IA = N.getRawInlinedAt())
1154 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1155 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1156 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1157}
1158
1159void Verifier::visitGenericDINode(const GenericDINode &N) {
1160 CheckDI(N.getTag(), "invalid tag", &N);
1161}
1162
1163void Verifier::visitDIScope(const DIScope &N) {
1164 if (auto *F = N.getRawFile())
1165 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1166}
1167
1168void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1169 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1170 auto *BaseType = N.getRawBaseType();
1171 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1172 auto *LBound = N.getRawLowerBound();
1173 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1174 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1175 "LowerBound must be signed constant or DIVariable or DIExpression",
1176 &N);
1177 auto *UBound = N.getRawUpperBound();
1178 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1179 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1180 "UpperBound must be signed constant or DIVariable or DIExpression",
1181 &N);
1182 auto *Stride = N.getRawStride();
1183 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1184 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1185 "Stride must be signed constant or DIVariable or DIExpression", &N);
1186 auto *Bias = N.getRawBias();
1187 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1188 isa<DIExpression>(Bias),
1189 "Bias must be signed constant or DIVariable or DIExpression", &N);
1190 // Subrange types currently only support constant size.
1191 auto *Size = N.getRawSizeInBits();
1193 "SizeInBits must be a constant");
1194}
1195
1196void Verifier::visitDISubrange(const DISubrange &N) {
1197 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1198 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1199 "Subrange can have any one of count or upperBound", &N);
1200 auto *CBound = N.getRawCountNode();
1201 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1202 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1203 "Count must be signed constant or DIVariable or DIExpression", &N);
1204 auto Count = N.getCount();
1206 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1207 "invalid subrange count", &N);
1208 auto *LBound = N.getRawLowerBound();
1209 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1210 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1211 "LowerBound must be signed constant or DIVariable or DIExpression",
1212 &N);
1213 auto *UBound = N.getRawUpperBound();
1214 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1215 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1216 "UpperBound must be signed constant or DIVariable or DIExpression",
1217 &N);
1218 auto *Stride = N.getRawStride();
1219 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1220 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1221 "Stride must be signed constant or DIVariable or DIExpression", &N);
1222}
1223
1224void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1225 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1226 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1227 "GenericSubrange can have any one of count or upperBound", &N);
1228 auto *CBound = N.getRawCountNode();
1229 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1230 "Count must be signed constant or DIVariable or DIExpression", &N);
1231 auto *LBound = N.getRawLowerBound();
1232 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1233 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1234 "LowerBound must be signed constant or DIVariable or DIExpression",
1235 &N);
1236 auto *UBound = N.getRawUpperBound();
1237 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1238 "UpperBound must be signed constant or DIVariable or DIExpression",
1239 &N);
1240 auto *Stride = N.getRawStride();
1241 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1242 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1243 "Stride must be signed constant or DIVariable or DIExpression", &N);
1244}
1245
1246void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1247 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1248}
1249
1250void Verifier::visitDIBasicType(const DIBasicType &N) {
1251 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1252 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1253 N.getTag() == dwarf::DW_TAG_string_type,
1254 "invalid tag", &N);
1255 // Basic types currently only support constant size.
1256 auto *Size = N.getRawSizeInBits();
1258 "SizeInBits must be a constant");
1259}
1260
1261void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1262 visitDIBasicType(N);
1263
1264 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1265 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1266 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1267 "invalid encoding", &N);
1271 "invalid kind", &N);
1273 N.getFactorRaw() == 0,
1274 "factor should be 0 for rationals", &N);
1276 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1277 "numerator and denominator should be 0 for non-rationals", &N);
1278}
1279
1280void Verifier::visitDIStringType(const DIStringType &N) {
1281 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1282 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1283 &N);
1284}
1285
1286void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1287 // Common scope checks.
1288 visitDIScope(N);
1289
1290 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1291 N.getTag() == dwarf::DW_TAG_pointer_type ||
1292 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1293 N.getTag() == dwarf::DW_TAG_reference_type ||
1294 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1295 N.getTag() == dwarf::DW_TAG_const_type ||
1296 N.getTag() == dwarf::DW_TAG_immutable_type ||
1297 N.getTag() == dwarf::DW_TAG_volatile_type ||
1298 N.getTag() == dwarf::DW_TAG_restrict_type ||
1299 N.getTag() == dwarf::DW_TAG_atomic_type ||
1300 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1301 N.getTag() == dwarf::DW_TAG_member ||
1302 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1303 N.getTag() == dwarf::DW_TAG_inheritance ||
1304 N.getTag() == dwarf::DW_TAG_friend ||
1305 N.getTag() == dwarf::DW_TAG_set_type ||
1306 N.getTag() == dwarf::DW_TAG_template_alias,
1307 "invalid tag", &N);
1308 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1309 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1310 N.getRawExtraData());
1311 }
1312
1313 if (N.getTag() == dwarf::DW_TAG_set_type) {
1314 if (auto *T = N.getRawBaseType()) {
1318 CheckDI(
1319 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1320 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1321 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1322 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1323 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1324 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1325 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1326 "invalid set base type", &N, T);
1327 }
1328 }
1329
1330 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1331 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1332 N.getRawBaseType());
1333
1334 if (N.getDWARFAddressSpace()) {
1335 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1336 N.getTag() == dwarf::DW_TAG_reference_type ||
1337 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1338 "DWARF address space only applies to pointer or reference types",
1339 &N);
1340 }
1341
1342 auto *Size = N.getRawSizeInBits();
1345 "SizeInBits must be a constant or DIVariable or DIExpression");
1346}
1347
1348/// Detect mutually exclusive flags.
1349static bool hasConflictingReferenceFlags(unsigned Flags) {
1350 return ((Flags & DINode::FlagLValueReference) &&
1351 (Flags & DINode::FlagRValueReference)) ||
1352 ((Flags & DINode::FlagTypePassByValue) &&
1353 (Flags & DINode::FlagTypePassByReference));
1354}
1355
1356void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1357 auto *Params = dyn_cast<MDTuple>(&RawParams);
1358 CheckDI(Params, "invalid template params", &N, &RawParams);
1359 for (Metadata *Op : Params->operands()) {
1360 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1361 &N, Params, Op);
1362 }
1363}
1364
1365void Verifier::visitDICompositeType(const DICompositeType &N) {
1366 // Common scope checks.
1367 visitDIScope(N);
1368
1369 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1370 N.getTag() == dwarf::DW_TAG_structure_type ||
1371 N.getTag() == dwarf::DW_TAG_union_type ||
1372 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1373 N.getTag() == dwarf::DW_TAG_class_type ||
1374 N.getTag() == dwarf::DW_TAG_variant_part ||
1375 N.getTag() == dwarf::DW_TAG_variant ||
1376 N.getTag() == dwarf::DW_TAG_namelist,
1377 "invalid tag", &N);
1378
1379 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1380 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1381 N.getRawBaseType());
1382
1383 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1384 "invalid composite elements", &N, N.getRawElements());
1385 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1386 N.getRawVTableHolder());
1388 "invalid reference flags", &N);
1389 unsigned DIBlockByRefStruct = 1 << 4;
1390 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1391 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1392 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1393 "DISubprogram contains null entry in `elements` field", &N);
1394
1395 if (N.isVector()) {
1396 const DINodeArray Elements = N.getElements();
1397 CheckDI(Elements.size() == 1 &&
1398 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1399 "invalid vector, expected one element of type subrange", &N);
1400 }
1401
1402 if (auto *Params = N.getRawTemplateParams())
1403 visitTemplateParams(N, *Params);
1404
1405 if (auto *D = N.getRawDiscriminator()) {
1406 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1407 "discriminator can only appear on variant part");
1408 }
1409
1410 if (N.getRawDataLocation()) {
1411 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1412 "dataLocation can only appear in array type");
1413 }
1414
1415 if (N.getRawAssociated()) {
1416 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1417 "associated can only appear in array type");
1418 }
1419
1420 if (N.getRawAllocated()) {
1421 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1422 "allocated can only appear in array type");
1423 }
1424
1425 if (N.getRawRank()) {
1426 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1427 "rank can only appear in array type");
1428 }
1429
1430 if (N.getTag() == dwarf::DW_TAG_array_type) {
1431 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1432 }
1433
1434 auto *Size = N.getRawSizeInBits();
1437 "SizeInBits must be a constant or DIVariable or DIExpression");
1438}
1439
1440void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1441 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1442 if (auto *Types = N.getRawTypeArray()) {
1443 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1444 for (Metadata *Ty : N.getTypeArray()->operands()) {
1445 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1446 }
1447 }
1449 "invalid reference flags", &N);
1450}
1451
1452void Verifier::visitDIFile(const DIFile &N) {
1453 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1454 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1455 if (Checksum) {
1456 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1457 "invalid checksum kind", &N);
1458 size_t Size;
1459 switch (Checksum->Kind) {
1460 case DIFile::CSK_MD5:
1461 Size = 32;
1462 break;
1463 case DIFile::CSK_SHA1:
1464 Size = 40;
1465 break;
1466 case DIFile::CSK_SHA256:
1467 Size = 64;
1468 break;
1469 }
1470 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1471 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1472 "invalid checksum", &N);
1473 }
1474}
1475
1476void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1477 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1478 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1479
1480 // Don't bother verifying the compilation directory or producer string
1481 // as those could be empty.
1482 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1483 N.getRawFile());
1484 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1485 N.getFile());
1486
1487 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1488 "invalid emission kind", &N);
1489
1490 if (auto *Array = N.getRawEnumTypes()) {
1491 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1492 for (Metadata *Op : N.getEnumTypes()->operands()) {
1494 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1495 "invalid enum type", &N, N.getEnumTypes(), Op);
1496 }
1497 }
1498 if (auto *Array = N.getRawRetainedTypes()) {
1499 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1500 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1501 CheckDI(
1502 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1503 !cast<DISubprogram>(Op)->isDefinition())),
1504 "invalid retained type", &N, Op);
1505 }
1506 }
1507 if (auto *Array = N.getRawGlobalVariables()) {
1508 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1509 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1511 "invalid global variable ref", &N, Op);
1512 }
1513 }
1514 if (auto *Array = N.getRawImportedEntities()) {
1515 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1516 for (Metadata *Op : N.getImportedEntities()->operands()) {
1517 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1518 &N, Op);
1519 }
1520 }
1521 if (auto *Array = N.getRawMacros()) {
1522 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1523 for (Metadata *Op : N.getMacros()->operands()) {
1524 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1525 }
1526 }
1527 CUVisited.insert(&N);
1528}
1529
1530void Verifier::visitDISubprogram(const DISubprogram &N) {
1531 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1532 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1533 if (auto *F = N.getRawFile())
1534 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1535 else
1536 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1537 if (auto *T = N.getRawType())
1538 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1539 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1540 N.getRawContainingType());
1541 if (auto *Params = N.getRawTemplateParams())
1542 visitTemplateParams(N, *Params);
1543 if (auto *S = N.getRawDeclaration())
1544 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1545 "invalid subprogram declaration", &N, S);
1546 if (auto *RawNode = N.getRawRetainedNodes()) {
1547 auto *Node = dyn_cast<MDTuple>(RawNode);
1548 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1549 for (Metadata *Op : Node->operands()) {
1552 "invalid retained nodes, expected DILocalVariable, DILabel or "
1553 "DIImportedEntity",
1554 &N, Node, Op);
1555 }
1556 }
1558 "invalid reference flags", &N);
1559
1560 auto *Unit = N.getRawUnit();
1561 if (N.isDefinition()) {
1562 // Subprogram definitions (not part of the type hierarchy).
1563 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1564 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1565 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1566 // There's no good way to cross the CU boundary to insert a nested
1567 // DISubprogram definition in one CU into a type defined in another CU.
1568 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1569 if (CT && CT->getRawIdentifier() &&
1570 M.getContext().isODRUniquingDebugTypes())
1571 CheckDI(N.getDeclaration(),
1572 "definition subprograms cannot be nested within DICompositeType "
1573 "when enabling ODR",
1574 &N);
1575 } else {
1576 // Subprogram declarations (part of the type hierarchy).
1577 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1578 CheckDI(!N.getRawDeclaration(),
1579 "subprogram declaration must not have a declaration field");
1580 }
1581
1582 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1583 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1584 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1585 for (Metadata *Op : ThrownTypes->operands())
1586 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1587 Op);
1588 }
1589
1590 if (N.areAllCallsDescribed())
1591 CheckDI(N.isDefinition(),
1592 "DIFlagAllCallsDescribed must be attached to a definition");
1593}
1594
1595void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1596 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1597 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1598 "invalid local scope", &N, N.getRawScope());
1599 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1600 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1601}
1602
1603void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1604 visitDILexicalBlockBase(N);
1605
1606 CheckDI(N.getLine() || !N.getColumn(),
1607 "cannot have column info without line info", &N);
1608}
1609
1610void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1611 visitDILexicalBlockBase(N);
1612}
1613
1614void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1615 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1616 if (auto *S = N.getRawScope())
1617 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1618 if (auto *S = N.getRawDecl())
1619 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1620}
1621
1622void Verifier::visitDINamespace(const DINamespace &N) {
1623 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1624 if (auto *S = N.getRawScope())
1625 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1626}
1627
1628void Verifier::visitDIMacro(const DIMacro &N) {
1629 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1630 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1631 "invalid macinfo type", &N);
1632 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1633 if (!N.getValue().empty()) {
1634 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1635 }
1636}
1637
1638void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1639 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1640 "invalid macinfo type", &N);
1641 if (auto *F = N.getRawFile())
1642 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1643
1644 if (auto *Array = N.getRawElements()) {
1645 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1646 for (Metadata *Op : N.getElements()->operands()) {
1647 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1648 }
1649 }
1650}
1651
1652void Verifier::visitDIModule(const DIModule &N) {
1653 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1654 CheckDI(!N.getName().empty(), "anonymous module", &N);
1655}
1656
1657void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1658 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1659}
1660
1661void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1662 visitDITemplateParameter(N);
1663
1664 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1665 &N);
1666}
1667
1668void Verifier::visitDITemplateValueParameter(
1669 const DITemplateValueParameter &N) {
1670 visitDITemplateParameter(N);
1671
1672 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1673 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1674 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1675 "invalid tag", &N);
1676}
1677
1678void Verifier::visitDIVariable(const DIVariable &N) {
1679 if (auto *S = N.getRawScope())
1680 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1681 if (auto *F = N.getRawFile())
1682 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1683}
1684
1685void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1686 // Checks common to all variables.
1687 visitDIVariable(N);
1688
1689 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1690 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1691 // Check only if the global variable is not an extern
1692 if (N.isDefinition())
1693 CheckDI(N.getType(), "missing global variable type", &N);
1694 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1696 "invalid static data member declaration", &N, Member);
1697 }
1698}
1699
1700void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1701 // Checks common to all variables.
1702 visitDIVariable(N);
1703
1704 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1705 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1706 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1707 "local variable requires a valid scope", &N, N.getRawScope());
1708 if (auto Ty = N.getType())
1709 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1710}
1711
1712void Verifier::visitDIAssignID(const DIAssignID &N) {
1713 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1714 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1715}
1716
1717void Verifier::visitDILabel(const DILabel &N) {
1718 if (auto *S = N.getRawScope())
1719 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1720 if (auto *F = N.getRawFile())
1721 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1722
1723 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1724 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1725 "label requires a valid scope", &N, N.getRawScope());
1726}
1727
1728void Verifier::visitDIExpression(const DIExpression &N) {
1729 CheckDI(N.isValid(), "invalid expression", &N);
1730}
1731
1732void Verifier::visitDIGlobalVariableExpression(
1733 const DIGlobalVariableExpression &GVE) {
1734 CheckDI(GVE.getVariable(), "missing variable");
1735 if (auto *Var = GVE.getVariable())
1736 visitDIGlobalVariable(*Var);
1737 if (auto *Expr = GVE.getExpression()) {
1738 visitDIExpression(*Expr);
1739 if (auto Fragment = Expr->getFragmentInfo())
1740 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1741 }
1742}
1743
1744void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1745 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1746 if (auto *T = N.getRawType())
1747 CheckDI(isType(T), "invalid type ref", &N, T);
1748 if (auto *F = N.getRawFile())
1749 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1750}
1751
1752void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1753 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1754 N.getTag() == dwarf::DW_TAG_imported_declaration,
1755 "invalid tag", &N);
1756 if (auto *S = N.getRawScope())
1757 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1758 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1759 N.getRawEntity());
1760}
1761
1762void Verifier::visitComdat(const Comdat &C) {
1763 // In COFF the Module is invalid if the GlobalValue has private linkage.
1764 // Entities with private linkage don't have entries in the symbol table.
1765 if (TT.isOSBinFormatCOFF())
1766 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1767 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1768 GV);
1769}
1770
1771void Verifier::visitModuleIdents() {
1772 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1773 if (!Idents)
1774 return;
1775
1776 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1777 // Scan each llvm.ident entry and make sure that this requirement is met.
1778 for (const MDNode *N : Idents->operands()) {
1779 Check(N->getNumOperands() == 1,
1780 "incorrect number of operands in llvm.ident metadata", N);
1781 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1782 ("invalid value for llvm.ident metadata entry operand"
1783 "(the operand should be a string)"),
1784 N->getOperand(0));
1785 }
1786}
1787
1788void Verifier::visitModuleCommandLines() {
1789 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1790 if (!CommandLines)
1791 return;
1792
1793 // llvm.commandline takes a list of metadata entry. Each entry has only one
1794 // string. Scan each llvm.commandline entry and make sure that this
1795 // requirement is met.
1796 for (const MDNode *N : CommandLines->operands()) {
1797 Check(N->getNumOperands() == 1,
1798 "incorrect number of operands in llvm.commandline metadata", N);
1799 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1800 ("invalid value for llvm.commandline metadata entry operand"
1801 "(the operand should be a string)"),
1802 N->getOperand(0));
1803 }
1804}
1805
1806void Verifier::visitModuleFlags() {
1807 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1808 if (!Flags) return;
1809
1810 // Scan each flag, and track the flags and requirements.
1811 DenseMap<const MDString*, const MDNode*> SeenIDs;
1812 SmallVector<const MDNode*, 16> Requirements;
1813 uint64_t PAuthABIPlatform = -1;
1814 uint64_t PAuthABIVersion = -1;
1815 for (const MDNode *MDN : Flags->operands()) {
1816 visitModuleFlag(MDN, SeenIDs, Requirements);
1817 if (MDN->getNumOperands() != 3)
1818 continue;
1819 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1820 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1821 if (const auto *PAP =
1823 PAuthABIPlatform = PAP->getZExtValue();
1824 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1825 if (const auto *PAV =
1827 PAuthABIVersion = PAV->getZExtValue();
1828 }
1829 }
1830 }
1831
1832 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1833 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1834 "'aarch64-elf-pauthabi-version' module flags must be present");
1835
1836 // Validate that the requirements in the module are valid.
1837 for (const MDNode *Requirement : Requirements) {
1838 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1839 const Metadata *ReqValue = Requirement->getOperand(1);
1840
1841 const MDNode *Op = SeenIDs.lookup(Flag);
1842 if (!Op) {
1843 CheckFailed("invalid requirement on flag, flag is not present in module",
1844 Flag);
1845 continue;
1846 }
1847
1848 if (Op->getOperand(2) != ReqValue) {
1849 CheckFailed(("invalid requirement on flag, "
1850 "flag does not have the required value"),
1851 Flag);
1852 continue;
1853 }
1854 }
1855}
1856
1857void
1858Verifier::visitModuleFlag(const MDNode *Op,
1859 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1860 SmallVectorImpl<const MDNode *> &Requirements) {
1861 // Each module flag should have three arguments, the merge behavior (a
1862 // constant int), the flag ID (an MDString), and the value.
1863 Check(Op->getNumOperands() == 3,
1864 "incorrect number of operands in module flag", Op);
1865 Module::ModFlagBehavior MFB;
1866 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1868 "invalid behavior operand in module flag (expected constant integer)",
1869 Op->getOperand(0));
1870 Check(false,
1871 "invalid behavior operand in module flag (unexpected constant)",
1872 Op->getOperand(0));
1873 }
1874 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1875 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1876 Op->getOperand(1));
1877
1878 // Check the values for behaviors with additional requirements.
1879 switch (MFB) {
1880 case Module::Error:
1881 case Module::Warning:
1882 case Module::Override:
1883 // These behavior types accept any value.
1884 break;
1885
1886 case Module::Min: {
1887 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1888 Check(V && V->getValue().isNonNegative(),
1889 "invalid value for 'min' module flag (expected constant non-negative "
1890 "integer)",
1891 Op->getOperand(2));
1892 break;
1893 }
1894
1895 case Module::Max: {
1897 "invalid value for 'max' module flag (expected constant integer)",
1898 Op->getOperand(2));
1899 break;
1900 }
1901
1902 case Module::Require: {
1903 // The value should itself be an MDNode with two operands, a flag ID (an
1904 // MDString), and a value.
1905 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1906 Check(Value && Value->getNumOperands() == 2,
1907 "invalid value for 'require' module flag (expected metadata pair)",
1908 Op->getOperand(2));
1909 Check(isa<MDString>(Value->getOperand(0)),
1910 ("invalid value for 'require' module flag "
1911 "(first value operand should be a string)"),
1912 Value->getOperand(0));
1913
1914 // Append it to the list of requirements, to check once all module flags are
1915 // scanned.
1916 Requirements.push_back(Value);
1917 break;
1918 }
1919
1920 case Module::Append:
1921 case Module::AppendUnique: {
1922 // These behavior types require the operand be an MDNode.
1923 Check(isa<MDNode>(Op->getOperand(2)),
1924 "invalid value for 'append'-type module flag "
1925 "(expected a metadata node)",
1926 Op->getOperand(2));
1927 break;
1928 }
1929 }
1930
1931 // Unless this is a "requires" flag, check the ID is unique.
1932 if (MFB != Module::Require) {
1933 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1934 Check(Inserted,
1935 "module flag identifiers must be unique (or of 'require' type)", ID);
1936 }
1937
1938 if (ID->getString() == "wchar_size") {
1939 ConstantInt *Value
1941 Check(Value, "wchar_size metadata requires constant integer argument");
1942 }
1943
1944 if (ID->getString() == "Linker Options") {
1945 // If the llvm.linker.options named metadata exists, we assume that the
1946 // bitcode reader has upgraded the module flag. Otherwise the flag might
1947 // have been created by a client directly.
1948 Check(M.getNamedMetadata("llvm.linker.options"),
1949 "'Linker Options' named metadata no longer supported");
1950 }
1951
1952 if (ID->getString() == "SemanticInterposition") {
1953 ConstantInt *Value =
1955 Check(Value,
1956 "SemanticInterposition metadata requires constant integer argument");
1957 }
1958
1959 if (ID->getString() == "CG Profile") {
1960 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1961 visitModuleFlagCGProfileEntry(MDO);
1962 }
1963}
1964
1965void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1966 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1967 if (!FuncMDO)
1968 return;
1969 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1970 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1971 "expected a Function or null", FuncMDO);
1972 };
1973 auto Node = dyn_cast_or_null<MDNode>(MDO);
1974 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1975 CheckFunction(Node->getOperand(0));
1976 CheckFunction(Node->getOperand(1));
1977 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1978 Check(Count && Count->getType()->isIntegerTy(),
1979 "expected an integer constant", Node->getOperand(2));
1980}
1981
1982void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
1983 for (Attribute A : Attrs) {
1984
1985 if (A.isStringAttribute()) {
1986#define GET_ATTR_NAMES
1987#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
1988#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
1989 if (A.getKindAsString() == #DISPLAY_NAME) { \
1990 auto V = A.getValueAsString(); \
1991 if (!(V.empty() || V == "true" || V == "false")) \
1992 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
1993 ""); \
1994 }
1995
1996#include "llvm/IR/Attributes.inc"
1997 continue;
1998 }
1999
2000 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2001 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2002 V);
2003 return;
2004 }
2005 }
2006}
2007
2008// VerifyParameterAttrs - Check the given attributes for an argument or return
2009// value of the specified type. The value V is printed in error messages.
2010void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2011 const Value *V) {
2012 if (!Attrs.hasAttributes())
2013 return;
2014
2015 verifyAttributeTypes(Attrs, V);
2016
2017 for (Attribute Attr : Attrs)
2018 Check(Attr.isStringAttribute() ||
2019 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2020 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2021 V);
2022
2023 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2024 unsigned AttrCount =
2025 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2026 Check(AttrCount == 1,
2027 "Attribute 'immarg' is incompatible with other attributes except the "
2028 "'range' attribute",
2029 V);
2030 }
2031
2032 // Check for mutually incompatible attributes. Only inreg is compatible with
2033 // sret.
2034 unsigned AttrCount = 0;
2035 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2036 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2037 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2038 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2039 Attrs.hasAttribute(Attribute::InReg);
2040 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2041 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2042 Check(AttrCount <= 1,
2043 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2044 "'byref', and 'sret' are incompatible!",
2045 V);
2046
2047 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2048 Attrs.hasAttribute(Attribute::ReadOnly)),
2049 "Attributes "
2050 "'inalloca and readonly' are incompatible!",
2051 V);
2052
2053 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2054 Attrs.hasAttribute(Attribute::Returned)),
2055 "Attributes "
2056 "'sret and returned' are incompatible!",
2057 V);
2058
2059 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2060 Attrs.hasAttribute(Attribute::SExt)),
2061 "Attributes "
2062 "'zeroext and signext' are incompatible!",
2063 V);
2064
2065 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2066 Attrs.hasAttribute(Attribute::ReadOnly)),
2067 "Attributes "
2068 "'readnone and readonly' are incompatible!",
2069 V);
2070
2071 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2072 Attrs.hasAttribute(Attribute::WriteOnly)),
2073 "Attributes "
2074 "'readnone and writeonly' are incompatible!",
2075 V);
2076
2077 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2078 Attrs.hasAttribute(Attribute::WriteOnly)),
2079 "Attributes "
2080 "'readonly and writeonly' are incompatible!",
2081 V);
2082
2083 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2084 Attrs.hasAttribute(Attribute::AlwaysInline)),
2085 "Attributes "
2086 "'noinline and alwaysinline' are incompatible!",
2087 V);
2088
2089 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2090 Attrs.hasAttribute(Attribute::ReadNone)),
2091 "Attributes writable and readnone are incompatible!", V);
2092
2093 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2094 Attrs.hasAttribute(Attribute::ReadOnly)),
2095 "Attributes writable and readonly are incompatible!", V);
2096
2097 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2098 for (Attribute Attr : Attrs) {
2099 if (!Attr.isStringAttribute() &&
2100 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2101 CheckFailed("Attribute '" + Attr.getAsString() +
2102 "' applied to incompatible type!", V);
2103 return;
2104 }
2105 }
2106
2107 if (isa<PointerType>(Ty)) {
2108 if (Attrs.hasAttribute(Attribute::Alignment)) {
2109 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2110 Check(AttrAlign.value() <= Value::MaximumAlignment,
2111 "huge alignment values are unsupported", V);
2112 }
2113 if (Attrs.hasAttribute(Attribute::ByVal)) {
2114 Type *ByValTy = Attrs.getByValType();
2115 SmallPtrSet<Type *, 4> Visited;
2116 Check(ByValTy->isSized(&Visited),
2117 "Attribute 'byval' does not support unsized types!", V);
2118 // Check if it is or contains a target extension type that disallows being
2119 // used on the stack.
2121 "'byval' argument has illegal target extension type", V);
2122 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2123 "huge 'byval' arguments are unsupported", V);
2124 }
2125 if (Attrs.hasAttribute(Attribute::ByRef)) {
2126 SmallPtrSet<Type *, 4> Visited;
2127 Check(Attrs.getByRefType()->isSized(&Visited),
2128 "Attribute 'byref' does not support unsized types!", V);
2129 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2130 (1ULL << 32),
2131 "huge 'byref' arguments are unsupported", V);
2132 }
2133 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2134 SmallPtrSet<Type *, 4> Visited;
2135 Check(Attrs.getInAllocaType()->isSized(&Visited),
2136 "Attribute 'inalloca' does not support unsized types!", V);
2137 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2138 (1ULL << 32),
2139 "huge 'inalloca' arguments are unsupported", V);
2140 }
2141 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2142 SmallPtrSet<Type *, 4> Visited;
2143 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2144 "Attribute 'preallocated' does not support unsized types!", V);
2145 Check(
2146 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2147 (1ULL << 32),
2148 "huge 'preallocated' arguments are unsupported", V);
2149 }
2150 }
2151
2152 if (Attrs.hasAttribute(Attribute::Initializes)) {
2153 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2154 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2155 V);
2157 "Attribute 'initializes' does not support unordered ranges", V);
2158 }
2159
2160 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2161 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2162 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2163 V);
2164 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2165 "Invalid value for 'nofpclass' test mask", V);
2166 }
2167 if (Attrs.hasAttribute(Attribute::Range)) {
2168 const ConstantRange &CR =
2169 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2171 "Range bit width must match type bit width!", V);
2172 }
2173}
2174
2175void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2176 const Value *V) {
2177 if (Attrs.hasFnAttr(Attr)) {
2178 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2179 unsigned N;
2180 if (S.getAsInteger(10, N))
2181 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2182 }
2183}
2184
2185// Check parameter attributes against a function type.
2186// The value V is printed in error messages.
2187void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2188 const Value *V, bool IsIntrinsic,
2189 bool IsInlineAsm) {
2190 if (Attrs.isEmpty())
2191 return;
2192
2193 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2194 Check(Attrs.hasParentContext(Context),
2195 "Attribute list does not match Module context!", &Attrs, V);
2196 for (const auto &AttrSet : Attrs) {
2197 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2198 "Attribute set does not match Module context!", &AttrSet, V);
2199 for (const auto &A : AttrSet) {
2200 Check(A.hasParentContext(Context),
2201 "Attribute does not match Module context!", &A, V);
2202 }
2203 }
2204 }
2205
2206 bool SawNest = false;
2207 bool SawReturned = false;
2208 bool SawSRet = false;
2209 bool SawSwiftSelf = false;
2210 bool SawSwiftAsync = false;
2211 bool SawSwiftError = false;
2212
2213 // Verify return value attributes.
2214 AttributeSet RetAttrs = Attrs.getRetAttrs();
2215 for (Attribute RetAttr : RetAttrs)
2216 Check(RetAttr.isStringAttribute() ||
2217 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2218 "Attribute '" + RetAttr.getAsString() +
2219 "' does not apply to function return values",
2220 V);
2221
2222 unsigned MaxParameterWidth = 0;
2223 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2224 if (Ty->isVectorTy()) {
2225 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2226 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2227 if (Size > MaxParameterWidth)
2228 MaxParameterWidth = Size;
2229 }
2230 }
2231 };
2232 GetMaxParameterWidth(FT->getReturnType());
2233 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2234
2235 // Verify parameter attributes.
2236 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2237 Type *Ty = FT->getParamType(i);
2238 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2239
2240 if (!IsIntrinsic) {
2241 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2242 "immarg attribute only applies to intrinsics", V);
2243 if (!IsInlineAsm)
2244 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2245 "Attribute 'elementtype' can only be applied to intrinsics"
2246 " and inline asm.",
2247 V);
2248 }
2249
2250 verifyParameterAttrs(ArgAttrs, Ty, V);
2251 GetMaxParameterWidth(Ty);
2252
2253 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2254 Check(!SawNest, "More than one parameter has attribute nest!", V);
2255 SawNest = true;
2256 }
2257
2258 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2259 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2260 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2261 "Incompatible argument and return types for 'returned' attribute",
2262 V);
2263 SawReturned = true;
2264 }
2265
2266 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2267 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2268 Check(i == 0 || i == 1,
2269 "Attribute 'sret' is not on first or second parameter!", V);
2270 SawSRet = true;
2271 }
2272
2273 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2274 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2275 SawSwiftSelf = true;
2276 }
2277
2278 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2279 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2280 SawSwiftAsync = true;
2281 }
2282
2283 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2284 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2285 SawSwiftError = true;
2286 }
2287
2288 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2289 Check(i == FT->getNumParams() - 1,
2290 "inalloca isn't on the last parameter!", V);
2291 }
2292 }
2293
2294 if (!Attrs.hasFnAttrs())
2295 return;
2296
2297 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2298 for (Attribute FnAttr : Attrs.getFnAttrs())
2299 Check(FnAttr.isStringAttribute() ||
2300 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2301 "Attribute '" + FnAttr.getAsString() +
2302 "' does not apply to functions!",
2303 V);
2304
2305 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2306 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2307 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2308
2309 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2310 Check(Attrs.hasFnAttr(Attribute::NoInline),
2311 "Attribute 'optnone' requires 'noinline'!", V);
2312
2313 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2314 "Attributes 'optsize and optnone' are incompatible!", V);
2315
2316 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2317 "Attributes 'minsize and optnone' are incompatible!", V);
2318
2319 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2320 "Attributes 'optdebug and optnone' are incompatible!", V);
2321 }
2322
2323 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2324 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2325 "Attributes "
2326 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2327 V);
2328
2329 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2330 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2331 "Attributes 'optsize and optdebug' are incompatible!", V);
2332
2333 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2334 "Attributes 'minsize and optdebug' are incompatible!", V);
2335 }
2336
2337 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2338 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2339 "Attribute writable and memory without argmem: write are incompatible!",
2340 V);
2341
2342 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2343 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2344 "Attributes 'aarch64_pstate_sm_enabled and "
2345 "aarch64_pstate_sm_compatible' are incompatible!",
2346 V);
2347 }
2348
2349 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2350 Attrs.hasFnAttr("aarch64_inout_za") +
2351 Attrs.hasFnAttr("aarch64_out_za") +
2352 Attrs.hasFnAttr("aarch64_preserves_za") +
2353 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2354 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2355 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2356 "'aarch64_za_state_agnostic' are mutually exclusive",
2357 V);
2358
2359 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2360 Attrs.hasFnAttr("aarch64_in_zt0") +
2361 Attrs.hasFnAttr("aarch64_inout_zt0") +
2362 Attrs.hasFnAttr("aarch64_out_zt0") +
2363 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2364 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2365 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2366 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2367 "'aarch64_za_state_agnostic' are mutually exclusive",
2368 V);
2369
2370 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2371 const GlobalValue *GV = cast<GlobalValue>(V);
2373 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2374 }
2375
2376 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2377 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2378 if (ParamNo >= FT->getNumParams()) {
2379 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2380 return false;
2381 }
2382
2383 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2384 CheckFailed("'allocsize' " + Name +
2385 " argument must refer to an integer parameter",
2386 V);
2387 return false;
2388 }
2389
2390 return true;
2391 };
2392
2393 if (!CheckParam("element size", Args->first))
2394 return;
2395
2396 if (Args->second && !CheckParam("number of elements", *Args->second))
2397 return;
2398 }
2399
2400 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2401 AllocFnKind K = Attrs.getAllocKind();
2403 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2404 if (!is_contained(
2405 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2406 Type))
2407 CheckFailed(
2408 "'allockind()' requires exactly one of alloc, realloc, and free");
2409 if ((Type == AllocFnKind::Free) &&
2410 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2411 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2412 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2413 "or aligned modifiers.");
2414 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2415 if ((K & ZeroedUninit) == ZeroedUninit)
2416 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2417 }
2418
2419 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2420 StringRef S = A.getValueAsString();
2421 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2422 Function *Variant = M.getFunction(S);
2423 if (Variant) {
2424 Attribute Family = Attrs.getFnAttr("alloc-family");
2425 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2426 if (Family.isValid())
2427 Check(VariantFamily.isValid() &&
2428 VariantFamily.getValueAsString() == Family.getValueAsString(),
2429 "'alloc-variant-zeroed' must name a function belonging to the "
2430 "same 'alloc-family'");
2431
2432 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2433 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2434 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2435 "'alloc-variant-zeroed' must name a function with "
2436 "'allockind(\"zeroed\")'");
2437
2438 Check(FT == Variant->getFunctionType(),
2439 "'alloc-variant-zeroed' must name a function with the same "
2440 "signature");
2441 }
2442 }
2443
2444 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2445 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2446 if (VScaleMin == 0)
2447 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2448 else if (!isPowerOf2_32(VScaleMin))
2449 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2450 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2451 if (VScaleMax && VScaleMin > VScaleMax)
2452 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2453 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2454 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2455 }
2456
2457 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2458 StringRef FP = FPAttr.getValueAsString();
2459 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved")
2460 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2461 }
2462
2463 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2464 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2465 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2466 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2467 .getValueAsString()
2468 .empty(),
2469 "\"patchable-function-entry-section\" must not be empty");
2470 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2471
2472 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2473 StringRef S = A.getValueAsString();
2474 if (S != "none" && S != "all" && S != "non-leaf")
2475 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2476 }
2477
2478 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2479 StringRef S = A.getValueAsString();
2480 if (S != "a_key" && S != "b_key")
2481 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2482 V);
2483 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2484 CheckFailed(
2485 "'sign-return-address-key' present without `sign-return-address`");
2486 }
2487 }
2488
2489 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2490 StringRef S = A.getValueAsString();
2491 if (S != "" && S != "true" && S != "false")
2492 CheckFailed(
2493 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2494 }
2495
2496 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2497 StringRef S = A.getValueAsString();
2498 if (S != "" && S != "true" && S != "false")
2499 CheckFailed(
2500 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2501 }
2502
2503 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2504 StringRef S = A.getValueAsString();
2505 if (S != "" && S != "true" && S != "false")
2506 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2507 V);
2508 }
2509
2510 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2511 StringRef S = A.getValueAsString();
2512 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2513 if (!Info)
2514 CheckFailed("invalid name for a VFABI variant: " + S, V);
2515 }
2516
2517 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2518 StringRef S = A.getValueAsString();
2520 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2521 }
2522
2523 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2524 StringRef S = A.getValueAsString();
2526 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2527 V);
2528 }
2529}
2530void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2531 Check(MD->getNumOperands() == 2,
2532 "'unknown' !prof should have a single additional operand", MD);
2533 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2534 Check(PassName != nullptr,
2535 "'unknown' !prof should have an additional operand of type "
2536 "string");
2537 Check(!PassName->getString().empty(),
2538 "the 'unknown' !prof operand should not be an empty string");
2539}
2540
2541void Verifier::verifyFunctionMetadata(
2542 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2543 for (const auto &Pair : MDs) {
2544 if (Pair.first == LLVMContext::MD_prof) {
2545 MDNode *MD = Pair.second;
2546 Check(MD->getNumOperands() >= 2,
2547 "!prof annotations should have no less than 2 operands", MD);
2548 // We may have functions that are synthesized by the compiler, e.g. in
2549 // WPD, that we can't currently determine the entry count.
2550 if (MD->getOperand(0).equalsStr(
2552 verifyUnknownProfileMetadata(MD);
2553 continue;
2554 }
2555
2556 // Check first operand.
2557 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2558 MD);
2560 "expected string with name of the !prof annotation", MD);
2561 MDString *MDS = cast<MDString>(MD->getOperand(0));
2562 StringRef ProfName = MDS->getString();
2565 "first operand should be 'function_entry_count'"
2566 " or 'synthetic_function_entry_count'",
2567 MD);
2568
2569 // Check second operand.
2570 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2571 MD);
2573 "expected integer argument to function_entry_count", MD);
2574 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2575 MDNode *MD = Pair.second;
2576 Check(MD->getNumOperands() == 1,
2577 "!kcfi_type must have exactly one operand", MD);
2578 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2579 MD);
2581 "expected a constant operand for !kcfi_type", MD);
2582 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2583 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2584 "expected a constant integer operand for !kcfi_type", MD);
2586 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2587 }
2588 }
2589}
2590
2591void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2592 if (!ConstantExprVisited.insert(EntryC).second)
2593 return;
2594
2596 Stack.push_back(EntryC);
2597
2598 while (!Stack.empty()) {
2599 const Constant *C = Stack.pop_back_val();
2600
2601 // Check this constant expression.
2602 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2603 visitConstantExpr(CE);
2604
2605 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2606 visitConstantPtrAuth(CPA);
2607
2608 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2609 // Global Values get visited separately, but we do need to make sure
2610 // that the global value is in the correct module
2611 Check(GV->getParent() == &M, "Referencing global in another module!",
2612 EntryC, &M, GV, GV->getParent());
2613 continue;
2614 }
2615
2616 // Visit all sub-expressions.
2617 for (const Use &U : C->operands()) {
2618 const auto *OpC = dyn_cast<Constant>(U);
2619 if (!OpC)
2620 continue;
2621 if (!ConstantExprVisited.insert(OpC).second)
2622 continue;
2623 Stack.push_back(OpC);
2624 }
2625 }
2626}
2627
2628void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2629 if (CE->getOpcode() == Instruction::BitCast)
2630 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2631 CE->getType()),
2632 "Invalid bitcast", CE);
2633 else if (CE->getOpcode() == Instruction::PtrToAddr)
2634 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2635}
2636
2637void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2638 Check(CPA->getPointer()->getType()->isPointerTy(),
2639 "signed ptrauth constant base pointer must have pointer type");
2640
2641 Check(CPA->getType() == CPA->getPointer()->getType(),
2642 "signed ptrauth constant must have same type as its base pointer");
2643
2644 Check(CPA->getKey()->getBitWidth() == 32,
2645 "signed ptrauth constant key must be i32 constant integer");
2646
2648 "signed ptrauth constant address discriminator must be a pointer");
2649
2650 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2651 "signed ptrauth constant discriminator must be i64 constant integer");
2652}
2653
2654bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2655 // There shouldn't be more attribute sets than there are parameters plus the
2656 // function and return value.
2657 return Attrs.getNumAttrSets() <= Params + 2;
2658}
2659
2660void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2661 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2662 unsigned ArgNo = 0;
2663 unsigned LabelNo = 0;
2664 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2665 if (CI.Type == InlineAsm::isLabel) {
2666 ++LabelNo;
2667 continue;
2668 }
2669
2670 // Only deal with constraints that correspond to call arguments.
2671 if (!CI.hasArg())
2672 continue;
2673
2674 if (CI.isIndirect) {
2675 const Value *Arg = Call.getArgOperand(ArgNo);
2676 Check(Arg->getType()->isPointerTy(),
2677 "Operand for indirect constraint must have pointer type", &Call);
2678
2680 "Operand for indirect constraint must have elementtype attribute",
2681 &Call);
2682 } else {
2683 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2684 "Elementtype attribute can only be applied for indirect "
2685 "constraints",
2686 &Call);
2687 }
2688
2689 ArgNo++;
2690 }
2691
2692 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2693 Check(LabelNo == CallBr->getNumIndirectDests(),
2694 "Number of label constraints does not match number of callbr dests",
2695 &Call);
2696 } else {
2697 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2698 &Call);
2699 }
2700}
2701
2702/// Verify that statepoint intrinsic is well formed.
2703void Verifier::verifyStatepoint(const CallBase &Call) {
2704 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2705
2708 "gc.statepoint must read and write all memory to preserve "
2709 "reordering restrictions required by safepoint semantics",
2710 Call);
2711
2712 const int64_t NumPatchBytes =
2713 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2714 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2715 Check(NumPatchBytes >= 0,
2716 "gc.statepoint number of patchable bytes must be "
2717 "positive",
2718 Call);
2719
2720 Type *TargetElemType = Call.getParamElementType(2);
2721 Check(TargetElemType,
2722 "gc.statepoint callee argument must have elementtype attribute", Call);
2723 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2724 Check(TargetFuncType,
2725 "gc.statepoint callee elementtype must be function type", Call);
2726
2727 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2728 Check(NumCallArgs >= 0,
2729 "gc.statepoint number of arguments to underlying call "
2730 "must be positive",
2731 Call);
2732 const int NumParams = (int)TargetFuncType->getNumParams();
2733 if (TargetFuncType->isVarArg()) {
2734 Check(NumCallArgs >= NumParams,
2735 "gc.statepoint mismatch in number of vararg call args", Call);
2736
2737 // TODO: Remove this limitation
2738 Check(TargetFuncType->getReturnType()->isVoidTy(),
2739 "gc.statepoint doesn't support wrapping non-void "
2740 "vararg functions yet",
2741 Call);
2742 } else
2743 Check(NumCallArgs == NumParams,
2744 "gc.statepoint mismatch in number of call args", Call);
2745
2746 const uint64_t Flags
2747 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2748 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2749 "unknown flag used in gc.statepoint flags argument", Call);
2750
2751 // Verify that the types of the call parameter arguments match
2752 // the type of the wrapped callee.
2753 AttributeList Attrs = Call.getAttributes();
2754 for (int i = 0; i < NumParams; i++) {
2755 Type *ParamType = TargetFuncType->getParamType(i);
2756 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2757 Check(ArgType == ParamType,
2758 "gc.statepoint call argument does not match wrapped "
2759 "function type",
2760 Call);
2761
2762 if (TargetFuncType->isVarArg()) {
2763 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2764 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2765 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2766 }
2767 }
2768
2769 const int EndCallArgsInx = 4 + NumCallArgs;
2770
2771 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2772 Check(isa<ConstantInt>(NumTransitionArgsV),
2773 "gc.statepoint number of transition arguments "
2774 "must be constant integer",
2775 Call);
2776 const int NumTransitionArgs =
2777 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2778 Check(NumTransitionArgs == 0,
2779 "gc.statepoint w/inline transition bundle is deprecated", Call);
2780 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2781
2782 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2783 Check(isa<ConstantInt>(NumDeoptArgsV),
2784 "gc.statepoint number of deoptimization arguments "
2785 "must be constant integer",
2786 Call);
2787 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2788 Check(NumDeoptArgs == 0,
2789 "gc.statepoint w/inline deopt operands is deprecated", Call);
2790
2791 const int ExpectedNumArgs = 7 + NumCallArgs;
2792 Check(ExpectedNumArgs == (int)Call.arg_size(),
2793 "gc.statepoint too many arguments", Call);
2794
2795 // Check that the only uses of this gc.statepoint are gc.result or
2796 // gc.relocate calls which are tied to this statepoint and thus part
2797 // of the same statepoint sequence
2798 for (const User *U : Call.users()) {
2799 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2800 Check(UserCall, "illegal use of statepoint token", Call, U);
2801 if (!UserCall)
2802 continue;
2803 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2804 "gc.result or gc.relocate are the only value uses "
2805 "of a gc.statepoint",
2806 Call, U);
2807 if (isa<GCResultInst>(UserCall)) {
2808 Check(UserCall->getArgOperand(0) == &Call,
2809 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2810 } else if (isa<GCRelocateInst>(Call)) {
2811 Check(UserCall->getArgOperand(0) == &Call,
2812 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2813 }
2814 }
2815
2816 // Note: It is legal for a single derived pointer to be listed multiple
2817 // times. It's non-optimal, but it is legal. It can also happen after
2818 // insertion if we strip a bitcast away.
2819 // Note: It is really tempting to check that each base is relocated and
2820 // that a derived pointer is never reused as a base pointer. This turns
2821 // out to be problematic since optimizations run after safepoint insertion
2822 // can recognize equality properties that the insertion logic doesn't know
2823 // about. See example statepoint.ll in the verifier subdirectory
2824}
2825
2826void Verifier::verifyFrameRecoverIndices() {
2827 for (auto &Counts : FrameEscapeInfo) {
2828 Function *F = Counts.first;
2829 unsigned EscapedObjectCount = Counts.second.first;
2830 unsigned MaxRecoveredIndex = Counts.second.second;
2831 Check(MaxRecoveredIndex <= EscapedObjectCount,
2832 "all indices passed to llvm.localrecover must be less than the "
2833 "number of arguments passed to llvm.localescape in the parent "
2834 "function",
2835 F);
2836 }
2837}
2838
2839static Instruction *getSuccPad(Instruction *Terminator) {
2840 BasicBlock *UnwindDest;
2841 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2842 UnwindDest = II->getUnwindDest();
2843 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2844 UnwindDest = CSI->getUnwindDest();
2845 else
2846 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2847 return &*UnwindDest->getFirstNonPHIIt();
2848}
2849
2850void Verifier::verifySiblingFuncletUnwinds() {
2851 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2852 SmallPtrSet<Instruction *, 8> Visited;
2853 SmallPtrSet<Instruction *, 8> Active;
2854 for (const auto &Pair : SiblingFuncletInfo) {
2855 Instruction *PredPad = Pair.first;
2856 if (Visited.count(PredPad))
2857 continue;
2858 Active.insert(PredPad);
2859 Instruction *Terminator = Pair.second;
2860 do {
2861 Instruction *SuccPad = getSuccPad(Terminator);
2862 if (Active.count(SuccPad)) {
2863 // Found a cycle; report error
2864 Instruction *CyclePad = SuccPad;
2865 SmallVector<Instruction *, 8> CycleNodes;
2866 do {
2867 CycleNodes.push_back(CyclePad);
2868 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2869 if (CycleTerminator != CyclePad)
2870 CycleNodes.push_back(CycleTerminator);
2871 CyclePad = getSuccPad(CycleTerminator);
2872 } while (CyclePad != SuccPad);
2873 Check(false, "EH pads can't handle each other's exceptions",
2874 ArrayRef<Instruction *>(CycleNodes));
2875 }
2876 // Don't re-walk a node we've already checked
2877 if (!Visited.insert(SuccPad).second)
2878 break;
2879 // Walk to this successor if it has a map entry.
2880 PredPad = SuccPad;
2881 auto TermI = SiblingFuncletInfo.find(PredPad);
2882 if (TermI == SiblingFuncletInfo.end())
2883 break;
2884 Terminator = TermI->second;
2885 Active.insert(PredPad);
2886 } while (true);
2887 // Each node only has one successor, so we've walked all the active
2888 // nodes' successors.
2889 Active.clear();
2890 }
2891}
2892
2893// visitFunction - Verify that a function is ok.
2894//
2895void Verifier::visitFunction(const Function &F) {
2896 visitGlobalValue(F);
2897
2898 // Check function arguments.
2899 FunctionType *FT = F.getFunctionType();
2900 unsigned NumArgs = F.arg_size();
2901
2902 Check(&Context == &F.getContext(),
2903 "Function context does not match Module context!", &F);
2904
2905 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2906 Check(FT->getNumParams() == NumArgs,
2907 "# formal arguments must match # of arguments for function type!", &F,
2908 FT);
2909 Check(F.getReturnType()->isFirstClassType() ||
2910 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2911 "Functions cannot return aggregate values!", &F);
2912
2913 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2914 "Invalid struct return type!", &F);
2915
2916 if (MaybeAlign A = F.getAlign()) {
2917 Check(A->value() <= Value::MaximumAlignment,
2918 "huge alignment values are unsupported", &F);
2919 }
2920
2921 AttributeList Attrs = F.getAttributes();
2922
2923 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2924 "Attribute after last parameter!", &F);
2925
2926 bool IsIntrinsic = F.isIntrinsic();
2927
2928 // Check function attributes.
2929 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2930
2931 // On function declarations/definitions, we do not support the builtin
2932 // attribute. We do not check this in VerifyFunctionAttrs since that is
2933 // checking for Attributes that can/can not ever be on functions.
2934 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2935 "Attribute 'builtin' can only be applied to a callsite.", &F);
2936
2937 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2938 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2939
2940 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
2941 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
2942
2943 if (Attrs.hasFnAttr(Attribute::Naked))
2944 for (const Argument &Arg : F.args())
2945 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
2946
2947 // Check that this function meets the restrictions on this calling convention.
2948 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2949 // restrictions can be lifted.
2950 switch (F.getCallingConv()) {
2951 default:
2952 case CallingConv::C:
2953 break;
2954 case CallingConv::X86_INTR: {
2955 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2956 "Calling convention parameter requires byval", &F);
2957 break;
2958 }
2959 case CallingConv::AMDGPU_KERNEL:
2960 case CallingConv::SPIR_KERNEL:
2961 case CallingConv::AMDGPU_CS_Chain:
2962 case CallingConv::AMDGPU_CS_ChainPreserve:
2963 Check(F.getReturnType()->isVoidTy(),
2964 "Calling convention requires void return type", &F);
2965 [[fallthrough]];
2966 case CallingConv::AMDGPU_VS:
2967 case CallingConv::AMDGPU_HS:
2968 case CallingConv::AMDGPU_GS:
2969 case CallingConv::AMDGPU_PS:
2970 case CallingConv::AMDGPU_CS:
2971 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2972 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2973 const unsigned StackAS = DL.getAllocaAddrSpace();
2974 unsigned i = 0;
2975 for (const Argument &Arg : F.args()) {
2976 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
2977 "Calling convention disallows byval", &F);
2978 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
2979 "Calling convention disallows preallocated", &F);
2980 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
2981 "Calling convention disallows inalloca", &F);
2982
2983 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
2984 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
2985 // value here.
2986 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
2987 "Calling convention disallows stack byref", &F);
2988 }
2989
2990 ++i;
2991 }
2992 }
2993
2994 [[fallthrough]];
2995 case CallingConv::Fast:
2996 case CallingConv::Cold:
2997 case CallingConv::Intel_OCL_BI:
2998 case CallingConv::PTX_Kernel:
2999 case CallingConv::PTX_Device:
3000 Check(!F.isVarArg(),
3001 "Calling convention does not support varargs or "
3002 "perfect forwarding!",
3003 &F);
3004 break;
3005 case CallingConv::AMDGPU_Gfx_WholeWave:
3006 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3007 "Calling convention requires first argument to be i1", &F);
3008 Check(!F.arg_begin()->hasInRegAttr(),
3009 "Calling convention requires first argument to not be inreg", &F);
3010 Check(!F.isVarArg(),
3011 "Calling convention does not support varargs or "
3012 "perfect forwarding!",
3013 &F);
3014 break;
3015 }
3016
3017 // Check that the argument values match the function type for this function...
3018 unsigned i = 0;
3019 for (const Argument &Arg : F.args()) {
3020 Check(Arg.getType() == FT->getParamType(i),
3021 "Argument value does not match function argument type!", &Arg,
3022 FT->getParamType(i));
3023 Check(Arg.getType()->isFirstClassType(),
3024 "Function arguments must have first-class types!", &Arg);
3025 if (!IsIntrinsic) {
3026 Check(!Arg.getType()->isMetadataTy(),
3027 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3028 Check(!Arg.getType()->isTokenLikeTy(),
3029 "Function takes token but isn't an intrinsic", &Arg, &F);
3030 Check(!Arg.getType()->isX86_AMXTy(),
3031 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3032 }
3033
3034 // Check that swifterror argument is only used by loads and stores.
3035 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3036 verifySwiftErrorValue(&Arg);
3037 }
3038 ++i;
3039 }
3040
3041 if (!IsIntrinsic) {
3042 Check(!F.getReturnType()->isTokenLikeTy(),
3043 "Function returns a token but isn't an intrinsic", &F);
3044 Check(!F.getReturnType()->isX86_AMXTy(),
3045 "Function returns a x86_amx but isn't an intrinsic", &F);
3046 }
3047
3048 // Get the function metadata attachments.
3050 F.getAllMetadata(MDs);
3051 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3052 verifyFunctionMetadata(MDs);
3053
3054 // Check validity of the personality function
3055 if (F.hasPersonalityFn()) {
3056 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3057 if (Per)
3058 Check(Per->getParent() == F.getParent(),
3059 "Referencing personality function in another module!", &F,
3060 F.getParent(), Per, Per->getParent());
3061 }
3062
3063 // EH funclet coloring can be expensive, recompute on-demand
3064 BlockEHFuncletColors.clear();
3065
3066 if (F.isMaterializable()) {
3067 // Function has a body somewhere we can't see.
3068 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3069 MDs.empty() ? nullptr : MDs.front().second);
3070 } else if (F.isDeclaration()) {
3071 for (const auto &I : MDs) {
3072 // This is used for call site debug information.
3073 CheckDI(I.first != LLVMContext::MD_dbg ||
3074 !cast<DISubprogram>(I.second)->isDistinct(),
3075 "function declaration may only have a unique !dbg attachment",
3076 &F);
3077 Check(I.first != LLVMContext::MD_prof,
3078 "function declaration may not have a !prof attachment", &F);
3079
3080 // Verify the metadata itself.
3081 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3082 }
3083 Check(!F.hasPersonalityFn(),
3084 "Function declaration shouldn't have a personality routine", &F);
3085 } else {
3086 // Verify that this function (which has a body) is not named "llvm.*". It
3087 // is not legal to define intrinsics.
3088 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3089
3090 // Check the entry node
3091 const BasicBlock *Entry = &F.getEntryBlock();
3092 Check(pred_empty(Entry),
3093 "Entry block to function must not have predecessors!", Entry);
3094
3095 // The address of the entry block cannot be taken, unless it is dead.
3096 if (Entry->hasAddressTaken()) {
3097 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3098 "blockaddress may not be used with the entry block!", Entry);
3099 }
3100
3101 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3102 NumKCFIAttachments = 0;
3103 // Visit metadata attachments.
3104 for (const auto &I : MDs) {
3105 // Verify that the attachment is legal.
3106 auto AllowLocs = AreDebugLocsAllowed::No;
3107 switch (I.first) {
3108 default:
3109 break;
3110 case LLVMContext::MD_dbg: {
3111 ++NumDebugAttachments;
3112 CheckDI(NumDebugAttachments == 1,
3113 "function must have a single !dbg attachment", &F, I.second);
3114 CheckDI(isa<DISubprogram>(I.second),
3115 "function !dbg attachment must be a subprogram", &F, I.second);
3116 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3117 "function definition may only have a distinct !dbg attachment",
3118 &F);
3119
3120 auto *SP = cast<DISubprogram>(I.second);
3121 const Function *&AttachedTo = DISubprogramAttachments[SP];
3122 CheckDI(!AttachedTo || AttachedTo == &F,
3123 "DISubprogram attached to more than one function", SP, &F);
3124 AttachedTo = &F;
3125 AllowLocs = AreDebugLocsAllowed::Yes;
3126 break;
3127 }
3128 case LLVMContext::MD_prof:
3129 ++NumProfAttachments;
3130 Check(NumProfAttachments == 1,
3131 "function must have a single !prof attachment", &F, I.second);
3132 break;
3133 case LLVMContext::MD_kcfi_type:
3134 ++NumKCFIAttachments;
3135 Check(NumKCFIAttachments == 1,
3136 "function must have a single !kcfi_type attachment", &F,
3137 I.second);
3138 break;
3139 }
3140
3141 // Verify the metadata itself.
3142 visitMDNode(*I.second, AllowLocs);
3143 }
3144 }
3145
3146 // If this function is actually an intrinsic, verify that it is only used in
3147 // direct call/invokes, never having its "address taken".
3148 // Only do this if the module is materialized, otherwise we don't have all the
3149 // uses.
3150 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3151 const User *U;
3152 if (F.hasAddressTaken(&U, false, true, false,
3153 /*IgnoreARCAttachedCall=*/true))
3154 Check(false, "Invalid user of intrinsic instruction!", U);
3155 }
3156
3157 // Check intrinsics' signatures.
3158 switch (F.getIntrinsicID()) {
3159 case Intrinsic::experimental_gc_get_pointer_base: {
3160 FunctionType *FT = F.getFunctionType();
3161 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3162 Check(isa<PointerType>(F.getReturnType()),
3163 "gc.get.pointer.base must return a pointer", F);
3164 Check(FT->getParamType(0) == F.getReturnType(),
3165 "gc.get.pointer.base operand and result must be of the same type", F);
3166 break;
3167 }
3168 case Intrinsic::experimental_gc_get_pointer_offset: {
3169 FunctionType *FT = F.getFunctionType();
3170 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3171 Check(isa<PointerType>(FT->getParamType(0)),
3172 "gc.get.pointer.offset operand must be a pointer", F);
3173 Check(F.getReturnType()->isIntegerTy(),
3174 "gc.get.pointer.offset must return integer", F);
3175 break;
3176 }
3177 }
3178
3179 auto *N = F.getSubprogram();
3180 HasDebugInfo = (N != nullptr);
3181 if (!HasDebugInfo)
3182 return;
3183
3184 // Check that all !dbg attachments lead to back to N.
3185 //
3186 // FIXME: Check this incrementally while visiting !dbg attachments.
3187 // FIXME: Only check when N is the canonical subprogram for F.
3188 SmallPtrSet<const MDNode *, 32> Seen;
3189 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3190 // Be careful about using DILocation here since we might be dealing with
3191 // broken code (this is the Verifier after all).
3192 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3193 if (!DL)
3194 return;
3195 if (!Seen.insert(DL).second)
3196 return;
3197
3198 Metadata *Parent = DL->getRawScope();
3199 CheckDI(Parent && isa<DILocalScope>(Parent),
3200 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3201
3202 DILocalScope *Scope = DL->getInlinedAtScope();
3203 Check(Scope, "Failed to find DILocalScope", DL);
3204
3205 if (!Seen.insert(Scope).second)
3206 return;
3207
3208 DISubprogram *SP = Scope->getSubprogram();
3209
3210 // Scope and SP could be the same MDNode and we don't want to skip
3211 // validation in that case
3212 if ((Scope != SP) && !Seen.insert(SP).second)
3213 return;
3214
3215 CheckDI(SP->describes(&F),
3216 "!dbg attachment points at wrong subprogram for function", N, &F,
3217 &I, DL, Scope, SP);
3218 };
3219 for (auto &BB : F)
3220 for (auto &I : BB) {
3221 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3222 // The llvm.loop annotations also contain two DILocations.
3223 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3224 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3225 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3226 if (BrokenDebugInfo)
3227 return;
3228 }
3229}
3230
3231// verifyBasicBlock - Verify that a basic block is well formed...
3232//
3233void Verifier::visitBasicBlock(BasicBlock &BB) {
3234 InstsInThisBlock.clear();
3235 ConvergenceVerifyHelper.visit(BB);
3236
3237 // Ensure that basic blocks have terminators!
3238 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3239
3240 // Check constraints that this basic block imposes on all of the PHI nodes in
3241 // it.
3242 if (isa<PHINode>(BB.front())) {
3243 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3245 llvm::sort(Preds);
3246 for (const PHINode &PN : BB.phis()) {
3247 Check(PN.getNumIncomingValues() == Preds.size(),
3248 "PHINode should have one entry for each predecessor of its "
3249 "parent basic block!",
3250 &PN);
3251
3252 // Get and sort all incoming values in the PHI node...
3253 Values.clear();
3254 Values.reserve(PN.getNumIncomingValues());
3255 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3256 Values.push_back(
3257 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3258 llvm::sort(Values);
3259
3260 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3261 // Check to make sure that if there is more than one entry for a
3262 // particular basic block in this PHI node, that the incoming values are
3263 // all identical.
3264 //
3265 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3266 Values[i].second == Values[i - 1].second,
3267 "PHI node has multiple entries for the same basic block with "
3268 "different incoming values!",
3269 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3270
3271 // Check to make sure that the predecessors and PHI node entries are
3272 // matched up.
3273 Check(Values[i].first == Preds[i],
3274 "PHI node entries do not match predecessors!", &PN,
3275 Values[i].first, Preds[i]);
3276 }
3277 }
3278 }
3279
3280 // Check that all instructions have their parent pointers set up correctly.
3281 for (auto &I : BB)
3282 {
3283 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3284 }
3285
3286 // Confirm that no issues arise from the debug program.
3287 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3288 &BB);
3289}
3290
3291void Verifier::visitTerminator(Instruction &I) {
3292 // Ensure that terminators only exist at the end of the basic block.
3293 Check(&I == I.getParent()->getTerminator(),
3294 "Terminator found in the middle of a basic block!", I.getParent());
3295 visitInstruction(I);
3296}
3297
3298void Verifier::visitBranchInst(BranchInst &BI) {
3299 if (BI.isConditional()) {
3301 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3302 }
3303 visitTerminator(BI);
3304}
3305
3306void Verifier::visitReturnInst(ReturnInst &RI) {
3307 Function *F = RI.getParent()->getParent();
3308 unsigned N = RI.getNumOperands();
3309 if (F->getReturnType()->isVoidTy())
3310 Check(N == 0,
3311 "Found return instr that returns non-void in Function of void "
3312 "return type!",
3313 &RI, F->getReturnType());
3314 else
3315 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3316 "Function return type does not match operand "
3317 "type of return inst!",
3318 &RI, F->getReturnType());
3319
3320 // Check to make sure that the return value has necessary properties for
3321 // terminators...
3322 visitTerminator(RI);
3323}
3324
3325void Verifier::visitSwitchInst(SwitchInst &SI) {
3326 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3327 // Check to make sure that all of the constants in the switch instruction
3328 // have the same type as the switched-on value.
3329 Type *SwitchTy = SI.getCondition()->getType();
3330 SmallPtrSet<ConstantInt*, 32> Constants;
3331 for (auto &Case : SI.cases()) {
3332 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3333 "Case value is not a constant integer.", &SI);
3334 Check(Case.getCaseValue()->getType() == SwitchTy,
3335 "Switch constants must all be same type as switch value!", &SI);
3336 Check(Constants.insert(Case.getCaseValue()).second,
3337 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3338 }
3339
3340 visitTerminator(SI);
3341}
3342
3343void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3345 "Indirectbr operand must have pointer type!", &BI);
3346 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3348 "Indirectbr destinations must all have pointer type!", &BI);
3349
3350 visitTerminator(BI);
3351}
3352
3353void Verifier::visitCallBrInst(CallBrInst &CBI) {
3354 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3355 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3356 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3357
3358 verifyInlineAsmCall(CBI);
3359 visitTerminator(CBI);
3360}
3361
3362void Verifier::visitSelectInst(SelectInst &SI) {
3363 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3364 SI.getOperand(2)),
3365 "Invalid operands for select instruction!", &SI);
3366
3367 Check(SI.getTrueValue()->getType() == SI.getType(),
3368 "Select values must have same type as select instruction!", &SI);
3369 visitInstruction(SI);
3370}
3371
3372/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3373/// a pass, if any exist, it's an error.
3374///
3375void Verifier::visitUserOp1(Instruction &I) {
3376 Check(false, "User-defined operators should not live outside of a pass!", &I);
3377}
3378
3379void Verifier::visitTruncInst(TruncInst &I) {
3380 // Get the source and destination types
3381 Type *SrcTy = I.getOperand(0)->getType();
3382 Type *DestTy = I.getType();
3383
3384 // Get the size of the types in bits, we'll need this later
3385 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3386 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3387
3388 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3389 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3390 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3391 "trunc source and destination must both be a vector or neither", &I);
3392 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3393
3394 visitInstruction(I);
3395}
3396
3397void Verifier::visitZExtInst(ZExtInst &I) {
3398 // Get the source and destination types
3399 Type *SrcTy = I.getOperand(0)->getType();
3400 Type *DestTy = I.getType();
3401
3402 // Get the size of the types in bits, we'll need this later
3403 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3404 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3405 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3406 "zext source and destination must both be a vector or neither", &I);
3407 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3408 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3409
3410 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3411
3412 visitInstruction(I);
3413}
3414
3415void Verifier::visitSExtInst(SExtInst &I) {
3416 // Get the source and destination types
3417 Type *SrcTy = I.getOperand(0)->getType();
3418 Type *DestTy = I.getType();
3419
3420 // Get the size of the types in bits, we'll need this later
3421 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3422 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3423
3424 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3425 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3426 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3427 "sext source and destination must both be a vector or neither", &I);
3428 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3429
3430 visitInstruction(I);
3431}
3432
3433void Verifier::visitFPTruncInst(FPTruncInst &I) {
3434 // Get the source and destination types
3435 Type *SrcTy = I.getOperand(0)->getType();
3436 Type *DestTy = I.getType();
3437 // Get the size of the types in bits, we'll need this later
3438 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3439 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3440
3441 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3442 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3443 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3444 "fptrunc source and destination must both be a vector or neither", &I);
3445 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3446
3447 visitInstruction(I);
3448}
3449
3450void Verifier::visitFPExtInst(FPExtInst &I) {
3451 // Get the source and destination types
3452 Type *SrcTy = I.getOperand(0)->getType();
3453 Type *DestTy = I.getType();
3454
3455 // Get the size of the types in bits, we'll need this later
3456 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3457 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3458
3459 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3460 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3461 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3462 "fpext source and destination must both be a vector or neither", &I);
3463 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3464
3465 visitInstruction(I);
3466}
3467
3468void Verifier::visitUIToFPInst(UIToFPInst &I) {
3469 // Get the source and destination types
3470 Type *SrcTy = I.getOperand(0)->getType();
3471 Type *DestTy = I.getType();
3472
3473 bool SrcVec = SrcTy->isVectorTy();
3474 bool DstVec = DestTy->isVectorTy();
3475
3476 Check(SrcVec == DstVec,
3477 "UIToFP source and dest must both be vector or scalar", &I);
3478 Check(SrcTy->isIntOrIntVectorTy(),
3479 "UIToFP source must be integer or integer vector", &I);
3480 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3481 &I);
3482
3483 if (SrcVec && DstVec)
3484 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3485 cast<VectorType>(DestTy)->getElementCount(),
3486 "UIToFP source and dest vector length mismatch", &I);
3487
3488 visitInstruction(I);
3489}
3490
3491void Verifier::visitSIToFPInst(SIToFPInst &I) {
3492 // Get the source and destination types
3493 Type *SrcTy = I.getOperand(0)->getType();
3494 Type *DestTy = I.getType();
3495
3496 bool SrcVec = SrcTy->isVectorTy();
3497 bool DstVec = DestTy->isVectorTy();
3498
3499 Check(SrcVec == DstVec,
3500 "SIToFP source and dest must both be vector or scalar", &I);
3501 Check(SrcTy->isIntOrIntVectorTy(),
3502 "SIToFP source must be integer or integer vector", &I);
3503 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3504 &I);
3505
3506 if (SrcVec && DstVec)
3507 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3508 cast<VectorType>(DestTy)->getElementCount(),
3509 "SIToFP source and dest vector length mismatch", &I);
3510
3511 visitInstruction(I);
3512}
3513
3514void Verifier::visitFPToUIInst(FPToUIInst &I) {
3515 // Get the source and destination types
3516 Type *SrcTy = I.getOperand(0)->getType();
3517 Type *DestTy = I.getType();
3518
3519 bool SrcVec = SrcTy->isVectorTy();
3520 bool DstVec = DestTy->isVectorTy();
3521
3522 Check(SrcVec == DstVec,
3523 "FPToUI source and dest must both be vector or scalar", &I);
3524 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3525 Check(DestTy->isIntOrIntVectorTy(),
3526 "FPToUI result must be integer or integer vector", &I);
3527
3528 if (SrcVec && DstVec)
3529 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3530 cast<VectorType>(DestTy)->getElementCount(),
3531 "FPToUI source and dest vector length mismatch", &I);
3532
3533 visitInstruction(I);
3534}
3535
3536void Verifier::visitFPToSIInst(FPToSIInst &I) {
3537 // Get the source and destination types
3538 Type *SrcTy = I.getOperand(0)->getType();
3539 Type *DestTy = I.getType();
3540
3541 bool SrcVec = SrcTy->isVectorTy();
3542 bool DstVec = DestTy->isVectorTy();
3543
3544 Check(SrcVec == DstVec,
3545 "FPToSI source and dest must both be vector or scalar", &I);
3546 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3547 Check(DestTy->isIntOrIntVectorTy(),
3548 "FPToSI result must be integer or integer vector", &I);
3549
3550 if (SrcVec && DstVec)
3551 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3552 cast<VectorType>(DestTy)->getElementCount(),
3553 "FPToSI source and dest vector length mismatch", &I);
3554
3555 visitInstruction(I);
3556}
3557
3558void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3559 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3560 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3561 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3562 V);
3563
3564 if (SrcTy->isVectorTy()) {
3565 auto *VSrc = cast<VectorType>(SrcTy);
3566 auto *VDest = cast<VectorType>(DestTy);
3567 Check(VSrc->getElementCount() == VDest->getElementCount(),
3568 "PtrToAddr vector length mismatch", V);
3569 }
3570
3571 Type *AddrTy = DL.getAddressType(SrcTy);
3572 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3573}
3574
3575void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3576 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3577 visitInstruction(I);
3578}
3579
3580void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3581 // Get the source and destination types
3582 Type *SrcTy = I.getOperand(0)->getType();
3583 Type *DestTy = I.getType();
3584
3585 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3586
3587 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3588 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3589 &I);
3590
3591 if (SrcTy->isVectorTy()) {
3592 auto *VSrc = cast<VectorType>(SrcTy);
3593 auto *VDest = cast<VectorType>(DestTy);
3594 Check(VSrc->getElementCount() == VDest->getElementCount(),
3595 "PtrToInt Vector length mismatch", &I);
3596 }
3597
3598 visitInstruction(I);
3599}
3600
3601void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3602 // Get the source and destination types
3603 Type *SrcTy = I.getOperand(0)->getType();
3604 Type *DestTy = I.getType();
3605
3606 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3607 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3608
3609 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3610 &I);
3611 if (SrcTy->isVectorTy()) {
3612 auto *VSrc = cast<VectorType>(SrcTy);
3613 auto *VDest = cast<VectorType>(DestTy);
3614 Check(VSrc->getElementCount() == VDest->getElementCount(),
3615 "IntToPtr Vector length mismatch", &I);
3616 }
3617 visitInstruction(I);
3618}
3619
3620void Verifier::visitBitCastInst(BitCastInst &I) {
3621 Check(
3622 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3623 "Invalid bitcast", &I);
3624 visitInstruction(I);
3625}
3626
3627void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3628 Type *SrcTy = I.getOperand(0)->getType();
3629 Type *DestTy = I.getType();
3630
3631 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3632 &I);
3633 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3634 &I);
3636 "AddrSpaceCast must be between different address spaces", &I);
3637 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3638 Check(SrcVTy->getElementCount() ==
3639 cast<VectorType>(DestTy)->getElementCount(),
3640 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3641 visitInstruction(I);
3642}
3643
3644/// visitPHINode - Ensure that a PHI node is well formed.
3645///
3646void Verifier::visitPHINode(PHINode &PN) {
3647 // Ensure that the PHI nodes are all grouped together at the top of the block.
3648 // This can be tested by checking whether the instruction before this is
3649 // either nonexistent (because this is begin()) or is a PHI node. If not,
3650 // then there is some other instruction before a PHI.
3651 Check(&PN == &PN.getParent()->front() ||
3653 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3654
3655 // Check that a PHI doesn't yield a Token.
3656 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3657
3658 // Check that all of the values of the PHI node have the same type as the
3659 // result.
3660 for (Value *IncValue : PN.incoming_values()) {
3661 Check(PN.getType() == IncValue->getType(),
3662 "PHI node operands are not the same type as the result!", &PN);
3663 }
3664
3665 // All other PHI node constraints are checked in the visitBasicBlock method.
3666
3667 visitInstruction(PN);
3668}
3669
3670void Verifier::visitCallBase(CallBase &Call) {
3672 "Called function must be a pointer!", Call);
3673 FunctionType *FTy = Call.getFunctionType();
3674
3675 // Verify that the correct number of arguments are being passed
3676 if (FTy->isVarArg())
3677 Check(Call.arg_size() >= FTy->getNumParams(),
3678 "Called function requires more parameters than were provided!", Call);
3679 else
3680 Check(Call.arg_size() == FTy->getNumParams(),
3681 "Incorrect number of arguments passed to called function!", Call);
3682
3683 // Verify that all arguments to the call match the function type.
3684 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3685 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3686 "Call parameter type does not match function signature!",
3687 Call.getArgOperand(i), FTy->getParamType(i), Call);
3688
3689 AttributeList Attrs = Call.getAttributes();
3690
3691 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3692 "Attribute after last parameter!", Call);
3693
3694 Function *Callee =
3696 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3697 if (IsIntrinsic)
3698 Check(Callee->getValueType() == FTy,
3699 "Intrinsic called with incompatible signature", Call);
3700
3701 // Verify if the calling convention of the callee is callable.
3703 "calling convention does not permit calls", Call);
3704
3705 // Disallow passing/returning values with alignment higher than we can
3706 // represent.
3707 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3708 // necessary.
3709 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3710 if (!Ty->isSized())
3711 return;
3712 Align ABIAlign = DL.getABITypeAlign(Ty);
3713 Check(ABIAlign.value() <= Value::MaximumAlignment,
3714 "Incorrect alignment of " + Message + " to called function!", Call);
3715 };
3716
3717 if (!IsIntrinsic) {
3718 VerifyTypeAlign(FTy->getReturnType(), "return type");
3719 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3720 Type *Ty = FTy->getParamType(i);
3721 VerifyTypeAlign(Ty, "argument passed");
3722 }
3723 }
3724
3725 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3726 // Don't allow speculatable on call sites, unless the underlying function
3727 // declaration is also speculatable.
3728 Check(Callee && Callee->isSpeculatable(),
3729 "speculatable attribute may not apply to call sites", Call);
3730 }
3731
3732 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3733 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3734 "preallocated as a call site attribute can only be on "
3735 "llvm.call.preallocated.arg");
3736 }
3737
3738 // Verify call attributes.
3739 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3740
3741 // Conservatively check the inalloca argument.
3742 // We have a bug if we can find that there is an underlying alloca without
3743 // inalloca.
3744 if (Call.hasInAllocaArgument()) {
3745 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3746 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3747 Check(AI->isUsedWithInAlloca(),
3748 "inalloca argument for call has mismatched alloca", AI, Call);
3749 }
3750
3751 // For each argument of the callsite, if it has the swifterror argument,
3752 // make sure the underlying alloca/parameter it comes from has a swifterror as
3753 // well.
3754 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3755 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3756 Value *SwiftErrorArg = Call.getArgOperand(i);
3757 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3758 Check(AI->isSwiftError(),
3759 "swifterror argument for call has mismatched alloca", AI, Call);
3760 continue;
3761 }
3762 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3763 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3764 SwiftErrorArg, Call);
3765 Check(ArgI->hasSwiftErrorAttr(),
3766 "swifterror argument for call has mismatched parameter", ArgI,
3767 Call);
3768 }
3769
3770 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3771 // Don't allow immarg on call sites, unless the underlying declaration
3772 // also has the matching immarg.
3773 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3774 "immarg may not apply only to call sites", Call.getArgOperand(i),
3775 Call);
3776 }
3777
3778 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3779 Value *ArgVal = Call.getArgOperand(i);
3780 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3781 "immarg operand has non-immediate parameter", ArgVal, Call);
3782
3783 // If the imm-arg is an integer and also has a range attached,
3784 // check if the given value is within the range.
3785 if (Call.paramHasAttr(i, Attribute::Range)) {
3786 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3787 const ConstantRange &CR =
3788 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3789 Check(CR.contains(CI->getValue()),
3790 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3791 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3792 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3793 Call);
3794 }
3795 }
3796 }
3797
3798 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3799 Value *ArgVal = Call.getArgOperand(i);
3800 bool hasOB =
3802 bool isMustTail = Call.isMustTailCall();
3803 Check(hasOB != isMustTail,
3804 "preallocated operand either requires a preallocated bundle or "
3805 "the call to be musttail (but not both)",
3806 ArgVal, Call);
3807 }
3808 }
3809
3810 if (FTy->isVarArg()) {
3811 // FIXME? is 'nest' even legal here?
3812 bool SawNest = false;
3813 bool SawReturned = false;
3814
3815 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3816 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3817 SawNest = true;
3818 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3819 SawReturned = true;
3820 }
3821
3822 // Check attributes on the varargs part.
3823 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3824 Type *Ty = Call.getArgOperand(Idx)->getType();
3825 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3826 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3827
3828 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3829 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3830 SawNest = true;
3831 }
3832
3833 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3834 Check(!SawReturned, "More than one parameter has attribute returned!",
3835 Call);
3836 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3837 "Incompatible argument and return types for 'returned' "
3838 "attribute",
3839 Call);
3840 SawReturned = true;
3841 }
3842
3843 // Statepoint intrinsic is vararg but the wrapped function may be not.
3844 // Allow sret here and check the wrapped function in verifyStatepoint.
3845 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
3846 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3847 "Attribute 'sret' cannot be used for vararg call arguments!",
3848 Call);
3849
3850 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3851 Check(Idx == Call.arg_size() - 1,
3852 "inalloca isn't on the last argument!", Call);
3853 }
3854 }
3855
3856 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3857 if (!IsIntrinsic) {
3858 for (Type *ParamTy : FTy->params()) {
3859 Check(!ParamTy->isMetadataTy(),
3860 "Function has metadata parameter but isn't an intrinsic", Call);
3861 Check(!ParamTy->isTokenLikeTy(),
3862 "Function has token parameter but isn't an intrinsic", Call);
3863 }
3864 }
3865
3866 // Verify that indirect calls don't return tokens.
3867 if (!Call.getCalledFunction()) {
3868 Check(!FTy->getReturnType()->isTokenLikeTy(),
3869 "Return type cannot be token for indirect call!");
3870 Check(!FTy->getReturnType()->isX86_AMXTy(),
3871 "Return type cannot be x86_amx for indirect call!");
3872 }
3873
3875 visitIntrinsicCall(ID, Call);
3876
3877 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3878 // most one "gc-transition", at most one "cfguardtarget", at most one
3879 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3880 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3881 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3882 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3883 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3884 FoundAttachedCallBundle = false;
3885 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3886 OperandBundleUse BU = Call.getOperandBundleAt(i);
3887 uint32_t Tag = BU.getTagID();
3888 if (Tag == LLVMContext::OB_deopt) {
3889 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3890 FoundDeoptBundle = true;
3891 } else if (Tag == LLVMContext::OB_gc_transition) {
3892 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3893 Call);
3894 FoundGCTransitionBundle = true;
3895 } else if (Tag == LLVMContext::OB_funclet) {
3896 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3897 FoundFuncletBundle = true;
3898 Check(BU.Inputs.size() == 1,
3899 "Expected exactly one funclet bundle operand", Call);
3900 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3901 "Funclet bundle operands should correspond to a FuncletPadInst",
3902 Call);
3903 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3904 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3905 Call);
3906 FoundCFGuardTargetBundle = true;
3907 Check(BU.Inputs.size() == 1,
3908 "Expected exactly one cfguardtarget bundle operand", Call);
3909 } else if (Tag == LLVMContext::OB_ptrauth) {
3910 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3911 FoundPtrauthBundle = true;
3912 Check(BU.Inputs.size() == 2,
3913 "Expected exactly two ptrauth bundle operands", Call);
3914 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3915 BU.Inputs[0]->getType()->isIntegerTy(32),
3916 "Ptrauth bundle key operand must be an i32 constant", Call);
3917 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3918 "Ptrauth bundle discriminator operand must be an i64", Call);
3919 } else if (Tag == LLVMContext::OB_kcfi) {
3920 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3921 FoundKCFIBundle = true;
3922 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3923 Call);
3924 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3925 BU.Inputs[0]->getType()->isIntegerTy(32),
3926 "Kcfi bundle operand must be an i32 constant", Call);
3927 } else if (Tag == LLVMContext::OB_preallocated) {
3928 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3929 Call);
3930 FoundPreallocatedBundle = true;
3931 Check(BU.Inputs.size() == 1,
3932 "Expected exactly one preallocated bundle operand", Call);
3933 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3934 Check(Input &&
3935 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3936 "\"preallocated\" argument must be a token from "
3937 "llvm.call.preallocated.setup",
3938 Call);
3939 } else if (Tag == LLVMContext::OB_gc_live) {
3940 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3941 FoundGCLiveBundle = true;
3943 Check(!FoundAttachedCallBundle,
3944 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3945 FoundAttachedCallBundle = true;
3946 verifyAttachedCallBundle(Call, BU);
3947 }
3948 }
3949
3950 // Verify that callee and callsite agree on whether to use pointer auth.
3951 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3952 "Direct call cannot have a ptrauth bundle", Call);
3953
3954 // Verify that each inlinable callsite of a debug-info-bearing function in a
3955 // debug-info-bearing function has a debug location attached to it. Failure to
3956 // do so causes assertion failures when the inliner sets up inline scope info
3957 // (Interposable functions are not inlinable, neither are functions without
3958 // definitions.)
3964 "inlinable function call in a function with "
3965 "debug info must have a !dbg location",
3966 Call);
3967
3968 if (Call.isInlineAsm())
3969 verifyInlineAsmCall(Call);
3970
3971 ConvergenceVerifyHelper.visit(Call);
3972
3973 visitInstruction(Call);
3974}
3975
3976void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
3977 StringRef Context) {
3978 Check(!Attrs.contains(Attribute::InAlloca),
3979 Twine("inalloca attribute not allowed in ") + Context);
3980 Check(!Attrs.contains(Attribute::InReg),
3981 Twine("inreg attribute not allowed in ") + Context);
3982 Check(!Attrs.contains(Attribute::SwiftError),
3983 Twine("swifterror attribute not allowed in ") + Context);
3984 Check(!Attrs.contains(Attribute::Preallocated),
3985 Twine("preallocated attribute not allowed in ") + Context);
3986 Check(!Attrs.contains(Attribute::ByRef),
3987 Twine("byref attribute not allowed in ") + Context);
3988}
3989
3990/// Two types are "congruent" if they are identical, or if they are both pointer
3991/// types with different pointee types and the same address space.
3992static bool isTypeCongruent(Type *L, Type *R) {
3993 if (L == R)
3994 return true;
3997 if (!PL || !PR)
3998 return false;
3999 return PL->getAddressSpace() == PR->getAddressSpace();
4000}
4001
4002static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4003 static const Attribute::AttrKind ABIAttrs[] = {
4004 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4005 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4006 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4007 Attribute::ByRef};
4008 AttrBuilder Copy(C);
4009 for (auto AK : ABIAttrs) {
4010 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4011 if (Attr.isValid())
4012 Copy.addAttribute(Attr);
4013 }
4014
4015 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4016 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4017 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4018 Attrs.hasParamAttr(I, Attribute::ByRef)))
4019 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4020 return Copy;
4021}
4022
4023void Verifier::verifyMustTailCall(CallInst &CI) {
4024 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4025
4026 Function *F = CI.getParent()->getParent();
4027 FunctionType *CallerTy = F->getFunctionType();
4028 FunctionType *CalleeTy = CI.getFunctionType();
4029 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4030 "cannot guarantee tail call due to mismatched varargs", &CI);
4031 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4032 "cannot guarantee tail call due to mismatched return types", &CI);
4033
4034 // - The calling conventions of the caller and callee must match.
4035 Check(F->getCallingConv() == CI.getCallingConv(),
4036 "cannot guarantee tail call due to mismatched calling conv", &CI);
4037
4038 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4039 // or a pointer bitcast followed by a ret instruction.
4040 // - The ret instruction must return the (possibly bitcasted) value
4041 // produced by the call or void.
4042 Value *RetVal = &CI;
4044
4045 // Handle the optional bitcast.
4046 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4047 Check(BI->getOperand(0) == RetVal,
4048 "bitcast following musttail call must use the call", BI);
4049 RetVal = BI;
4050 Next = BI->getNextNode();
4051 }
4052
4053 // Check the return.
4054 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4055 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4056 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4057 isa<UndefValue>(Ret->getReturnValue()),
4058 "musttail call result must be returned", Ret);
4059
4060 AttributeList CallerAttrs = F->getAttributes();
4061 AttributeList CalleeAttrs = CI.getAttributes();
4062 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4063 CI.getCallingConv() == CallingConv::Tail) {
4064 StringRef CCName =
4065 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4066
4067 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4068 // are allowed in swifttailcc call
4069 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4070 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4071 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4072 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4073 }
4074 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4075 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4076 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4077 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4078 }
4079 // - Varargs functions are not allowed
4080 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4081 " tail call for varargs function");
4082 return;
4083 }
4084
4085 // - The caller and callee prototypes must match. Pointer types of
4086 // parameters or return types may differ in pointee type, but not
4087 // address space.
4088 if (!CI.getIntrinsicID()) {
4089 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4090 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4091 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4092 Check(
4093 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4094 "cannot guarantee tail call due to mismatched parameter types", &CI);
4095 }
4096 }
4097
4098 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4099 // returned, preallocated, and inalloca, must match.
4100 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4101 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4102 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4103 Check(CallerABIAttrs == CalleeABIAttrs,
4104 "cannot guarantee tail call due to mismatched ABI impacting "
4105 "function attributes",
4106 &CI, CI.getOperand(I));
4107 }
4108}
4109
4110void Verifier::visitCallInst(CallInst &CI) {
4111 visitCallBase(CI);
4112
4113 if (CI.isMustTailCall())
4114 verifyMustTailCall(CI);
4115}
4116
4117void Verifier::visitInvokeInst(InvokeInst &II) {
4118 visitCallBase(II);
4119
4120 // Verify that the first non-PHI instruction of the unwind destination is an
4121 // exception handling instruction.
4122 Check(
4123 II.getUnwindDest()->isEHPad(),
4124 "The unwind destination does not have an exception handling instruction!",
4125 &II);
4126
4127 visitTerminator(II);
4128}
4129
4130/// visitUnaryOperator - Check the argument to the unary operator.
4131///
4132void Verifier::visitUnaryOperator(UnaryOperator &U) {
4133 Check(U.getType() == U.getOperand(0)->getType(),
4134 "Unary operators must have same type for"
4135 "operands and result!",
4136 &U);
4137
4138 switch (U.getOpcode()) {
4139 // Check that floating-point arithmetic operators are only used with
4140 // floating-point operands.
4141 case Instruction::FNeg:
4142 Check(U.getType()->isFPOrFPVectorTy(),
4143 "FNeg operator only works with float types!", &U);
4144 break;
4145 default:
4146 llvm_unreachable("Unknown UnaryOperator opcode!");
4147 }
4148
4149 visitInstruction(U);
4150}
4151
4152/// visitBinaryOperator - Check that both arguments to the binary operator are
4153/// of the same type!
4154///
4155void Verifier::visitBinaryOperator(BinaryOperator &B) {
4156 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4157 "Both operands to a binary operator are not of the same type!", &B);
4158
4159 switch (B.getOpcode()) {
4160 // Check that integer arithmetic operators are only used with
4161 // integral operands.
4162 case Instruction::Add:
4163 case Instruction::Sub:
4164 case Instruction::Mul:
4165 case Instruction::SDiv:
4166 case Instruction::UDiv:
4167 case Instruction::SRem:
4168 case Instruction::URem:
4169 Check(B.getType()->isIntOrIntVectorTy(),
4170 "Integer arithmetic operators only work with integral types!", &B);
4171 Check(B.getType() == B.getOperand(0)->getType(),
4172 "Integer arithmetic operators must have same type "
4173 "for operands and result!",
4174 &B);
4175 break;
4176 // Check that floating-point arithmetic operators are only used with
4177 // floating-point operands.
4178 case Instruction::FAdd:
4179 case Instruction::FSub:
4180 case Instruction::FMul:
4181 case Instruction::FDiv:
4182 case Instruction::FRem:
4183 Check(B.getType()->isFPOrFPVectorTy(),
4184 "Floating-point arithmetic operators only work with "
4185 "floating-point types!",
4186 &B);
4187 Check(B.getType() == B.getOperand(0)->getType(),
4188 "Floating-point arithmetic operators must have same type "
4189 "for operands and result!",
4190 &B);
4191 break;
4192 // Check that logical operators are only used with integral operands.
4193 case Instruction::And:
4194 case Instruction::Or:
4195 case Instruction::Xor:
4196 Check(B.getType()->isIntOrIntVectorTy(),
4197 "Logical operators only work with integral types!", &B);
4198 Check(B.getType() == B.getOperand(0)->getType(),
4199 "Logical operators must have same type for operands and result!", &B);
4200 break;
4201 case Instruction::Shl:
4202 case Instruction::LShr:
4203 case Instruction::AShr:
4204 Check(B.getType()->isIntOrIntVectorTy(),
4205 "Shifts only work with integral types!", &B);
4206 Check(B.getType() == B.getOperand(0)->getType(),
4207 "Shift return type must be same as operands!", &B);
4208 break;
4209 default:
4210 llvm_unreachable("Unknown BinaryOperator opcode!");
4211 }
4212
4213 visitInstruction(B);
4214}
4215
4216void Verifier::visitICmpInst(ICmpInst &IC) {
4217 // Check that the operands are the same type
4218 Type *Op0Ty = IC.getOperand(0)->getType();
4219 Type *Op1Ty = IC.getOperand(1)->getType();
4220 Check(Op0Ty == Op1Ty,
4221 "Both operands to ICmp instruction are not of the same type!", &IC);
4222 // Check that the operands are the right type
4223 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4224 "Invalid operand types for ICmp instruction", &IC);
4225 // Check that the predicate is valid.
4226 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4227
4228 visitInstruction(IC);
4229}
4230
4231void Verifier::visitFCmpInst(FCmpInst &FC) {
4232 // Check that the operands are the same type
4233 Type *Op0Ty = FC.getOperand(0)->getType();
4234 Type *Op1Ty = FC.getOperand(1)->getType();
4235 Check(Op0Ty == Op1Ty,
4236 "Both operands to FCmp instruction are not of the same type!", &FC);
4237 // Check that the operands are the right type
4238 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4239 &FC);
4240 // Check that the predicate is valid.
4241 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4242
4243 visitInstruction(FC);
4244}
4245
4246void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4248 "Invalid extractelement operands!", &EI);
4249 visitInstruction(EI);
4250}
4251
4252void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4253 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4254 IE.getOperand(2)),
4255 "Invalid insertelement operands!", &IE);
4256 visitInstruction(IE);
4257}
4258
4259void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4261 SV.getShuffleMask()),
4262 "Invalid shufflevector operands!", &SV);
4263 visitInstruction(SV);
4264}
4265
4266void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4267 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4268
4269 Check(isa<PointerType>(TargetTy),
4270 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4271 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4272
4273 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4274 Check(!STy->isScalableTy(),
4275 "getelementptr cannot target structure that contains scalable vector"
4276 "type",
4277 &GEP);
4278 }
4279
4280 SmallVector<Value *, 16> Idxs(GEP.indices());
4281 Check(
4282 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4283 "GEP indexes must be integers", &GEP);
4284 Type *ElTy =
4285 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4286 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4287
4288 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4289
4290 Check(PtrTy && GEP.getResultElementType() == ElTy,
4291 "GEP is not of right type for indices!", &GEP, ElTy);
4292
4293 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4294 // Additional checks for vector GEPs.
4295 ElementCount GEPWidth = GEPVTy->getElementCount();
4296 if (GEP.getPointerOperandType()->isVectorTy())
4297 Check(
4298 GEPWidth ==
4299 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4300 "Vector GEP result width doesn't match operand's", &GEP);
4301 for (Value *Idx : Idxs) {
4302 Type *IndexTy = Idx->getType();
4303 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4304 ElementCount IndexWidth = IndexVTy->getElementCount();
4305 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4306 }
4307 Check(IndexTy->isIntOrIntVectorTy(),
4308 "All GEP indices should be of integer type");
4309 }
4310 }
4311
4312 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4313 "GEP address space doesn't match type", &GEP);
4314
4315 visitInstruction(GEP);
4316}
4317
4318static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4319 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4320}
4321
4322/// Verify !range and !absolute_symbol metadata. These have the same
4323/// restrictions, except !absolute_symbol allows the full set.
4324void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4325 Type *Ty, RangeLikeMetadataKind Kind) {
4326 unsigned NumOperands = Range->getNumOperands();
4327 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4328 unsigned NumRanges = NumOperands / 2;
4329 Check(NumRanges >= 1, "It should have at least one range!", Range);
4330
4331 ConstantRange LastRange(1, true); // Dummy initial value
4332 for (unsigned i = 0; i < NumRanges; ++i) {
4333 ConstantInt *Low =
4334 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4335 Check(Low, "The lower limit must be an integer!", Low);
4336 ConstantInt *High =
4337 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4338 Check(High, "The upper limit must be an integer!", High);
4339
4340 Check(High->getType() == Low->getType(), "Range pair types must match!",
4341 &I);
4342
4343 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4344 Check(High->getType()->isIntegerTy(32),
4345 "noalias.addrspace type must be i32!", &I);
4346 } else {
4347 Check(High->getType() == Ty->getScalarType(),
4348 "Range types must match instruction type!", &I);
4349 }
4350
4351 APInt HighV = High->getValue();
4352 APInt LowV = Low->getValue();
4353
4354 // ConstantRange asserts if the ranges are the same except for the min/max
4355 // value. Leave the cases it tolerates for the empty range error below.
4356 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4357 "The upper and lower limits cannot be the same value", &I);
4358
4359 ConstantRange CurRange(LowV, HighV);
4360 Check(!CurRange.isEmptySet() &&
4361 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4362 !CurRange.isFullSet()),
4363 "Range must not be empty!", Range);
4364 if (i != 0) {
4365 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4366 "Intervals are overlapping", Range);
4367 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4368 Range);
4369 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4370 Range);
4371 }
4372 LastRange = ConstantRange(LowV, HighV);
4373 }
4374 if (NumRanges > 2) {
4375 APInt FirstLow =
4376 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4377 APInt FirstHigh =
4378 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4379 ConstantRange FirstRange(FirstLow, FirstHigh);
4380 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4381 "Intervals are overlapping", Range);
4382 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4383 Range);
4384 }
4385}
4386
4387void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4388 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4389 "precondition violation");
4390 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4391}
4392
4393void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4394 Type *Ty) {
4395 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4396 "precondition violation");
4397 verifyRangeLikeMetadata(I, Range, Ty,
4398 RangeLikeMetadataKind::NoaliasAddrspace);
4399}
4400
4401void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4402 unsigned Size = DL.getTypeSizeInBits(Ty);
4403 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4404 Check(!(Size & (Size - 1)),
4405 "atomic memory access' operand must have a power-of-two size", Ty, I);
4406}
4407
4408void Verifier::visitLoadInst(LoadInst &LI) {
4410 Check(PTy, "Load operand must be a pointer.", &LI);
4411 Type *ElTy = LI.getType();
4412 if (MaybeAlign A = LI.getAlign()) {
4413 Check(A->value() <= Value::MaximumAlignment,
4414 "huge alignment values are unsupported", &LI);
4415 }
4416 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4417 if (LI.isAtomic()) {
4418 Check(LI.getOrdering() != AtomicOrdering::Release &&
4419 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4420 "Load cannot have Release ordering", &LI);
4421 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4422 "atomic load operand must have integer, pointer, or floating point "
4423 "type!",
4424 ElTy, &LI);
4425 checkAtomicMemAccessSize(ElTy, &LI);
4426 } else {
4428 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4429 }
4430
4431 visitInstruction(LI);
4432}
4433
4434void Verifier::visitStoreInst(StoreInst &SI) {
4435 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4436 Check(PTy, "Store operand must be a pointer.", &SI);
4437 Type *ElTy = SI.getOperand(0)->getType();
4438 if (MaybeAlign A = SI.getAlign()) {
4439 Check(A->value() <= Value::MaximumAlignment,
4440 "huge alignment values are unsupported", &SI);
4441 }
4442 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4443 if (SI.isAtomic()) {
4444 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4445 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4446 "Store cannot have Acquire ordering", &SI);
4447 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4448 "atomic store operand must have integer, pointer, or floating point "
4449 "type!",
4450 ElTy, &SI);
4451 checkAtomicMemAccessSize(ElTy, &SI);
4452 } else {
4453 Check(SI.getSyncScopeID() == SyncScope::System,
4454 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4455 }
4456 visitInstruction(SI);
4457}
4458
4459/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4460void Verifier::verifySwiftErrorCall(CallBase &Call,
4461 const Value *SwiftErrorVal) {
4462 for (const auto &I : llvm::enumerate(Call.args())) {
4463 if (I.value() == SwiftErrorVal) {
4464 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4465 "swifterror value when used in a callsite should be marked "
4466 "with swifterror attribute",
4467 SwiftErrorVal, Call);
4468 }
4469 }
4470}
4471
4472void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4473 // Check that swifterror value is only used by loads, stores, or as
4474 // a swifterror argument.
4475 for (const User *U : SwiftErrorVal->users()) {
4477 isa<InvokeInst>(U),
4478 "swifterror value can only be loaded and stored from, or "
4479 "as a swifterror argument!",
4480 SwiftErrorVal, U);
4481 // If it is used by a store, check it is the second operand.
4482 if (auto StoreI = dyn_cast<StoreInst>(U))
4483 Check(StoreI->getOperand(1) == SwiftErrorVal,
4484 "swifterror value should be the second operand when used "
4485 "by stores",
4486 SwiftErrorVal, U);
4487 if (auto *Call = dyn_cast<CallBase>(U))
4488 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4489 }
4490}
4491
4492void Verifier::visitAllocaInst(AllocaInst &AI) {
4493 Type *Ty = AI.getAllocatedType();
4494 SmallPtrSet<Type*, 4> Visited;
4495 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4496 // Check if it's a target extension type that disallows being used on the
4497 // stack.
4499 "Alloca has illegal target extension type", &AI);
4501 "Alloca array size must have integer type", &AI);
4502 if (MaybeAlign A = AI.getAlign()) {
4503 Check(A->value() <= Value::MaximumAlignment,
4504 "huge alignment values are unsupported", &AI);
4505 }
4506
4507 if (AI.isSwiftError()) {
4508 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4510 "swifterror alloca must not be array allocation", &AI);
4511 verifySwiftErrorValue(&AI);
4512 }
4513
4514 if (TT.isAMDGPU()) {
4516 "alloca on amdgpu must be in addrspace(5)", &AI);
4517 }
4518
4519 visitInstruction(AI);
4520}
4521
4522void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4523 Type *ElTy = CXI.getOperand(1)->getType();
4524 Check(ElTy->isIntOrPtrTy(),
4525 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4526 checkAtomicMemAccessSize(ElTy, &CXI);
4527 visitInstruction(CXI);
4528}
4529
4530void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4531 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4532 "atomicrmw instructions cannot be unordered.", &RMWI);
4533 auto Op = RMWI.getOperation();
4534 Type *ElTy = RMWI.getOperand(1)->getType();
4535 if (Op == AtomicRMWInst::Xchg) {
4536 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4537 ElTy->isPointerTy(),
4538 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4539 " operand must have integer or floating point type!",
4540 &RMWI, ElTy);
4541 } else if (AtomicRMWInst::isFPOperation(Op)) {
4543 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4544 " operand must have floating-point or fixed vector of floating-point "
4545 "type!",
4546 &RMWI, ElTy);
4547 } else {
4548 Check(ElTy->isIntegerTy(),
4549 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4550 " operand must have integer type!",
4551 &RMWI, ElTy);
4552 }
4553 checkAtomicMemAccessSize(ElTy, &RMWI);
4555 "Invalid binary operation!", &RMWI);
4556 visitInstruction(RMWI);
4557}
4558
4559void Verifier::visitFenceInst(FenceInst &FI) {
4560 const AtomicOrdering Ordering = FI.getOrdering();
4561 Check(Ordering == AtomicOrdering::Acquire ||
4562 Ordering == AtomicOrdering::Release ||
4563 Ordering == AtomicOrdering::AcquireRelease ||
4564 Ordering == AtomicOrdering::SequentiallyConsistent,
4565 "fence instructions may only have acquire, release, acq_rel, or "
4566 "seq_cst ordering.",
4567 &FI);
4568 visitInstruction(FI);
4569}
4570
4571void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4573 EVI.getIndices()) == EVI.getType(),
4574 "Invalid ExtractValueInst operands!", &EVI);
4575
4576 visitInstruction(EVI);
4577}
4578
4579void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4581 IVI.getIndices()) ==
4582 IVI.getOperand(1)->getType(),
4583 "Invalid InsertValueInst operands!", &IVI);
4584
4585 visitInstruction(IVI);
4586}
4587
4588static Value *getParentPad(Value *EHPad) {
4589 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4590 return FPI->getParentPad();
4591
4592 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4593}
4594
4595void Verifier::visitEHPadPredecessors(Instruction &I) {
4596 assert(I.isEHPad());
4597
4598 BasicBlock *BB = I.getParent();
4599 Function *F = BB->getParent();
4600
4601 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4602
4603 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4604 // The landingpad instruction defines its parent as a landing pad block. The
4605 // landing pad block may be branched to only by the unwind edge of an
4606 // invoke.
4607 for (BasicBlock *PredBB : predecessors(BB)) {
4608 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4609 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4610 "Block containing LandingPadInst must be jumped to "
4611 "only by the unwind edge of an invoke.",
4612 LPI);
4613 }
4614 return;
4615 }
4616 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4617 if (!pred_empty(BB))
4618 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4619 "Block containg CatchPadInst must be jumped to "
4620 "only by its catchswitch.",
4621 CPI);
4622 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4623 "Catchswitch cannot unwind to one of its catchpads",
4624 CPI->getCatchSwitch(), CPI);
4625 return;
4626 }
4627
4628 // Verify that each pred has a legal terminator with a legal to/from EH
4629 // pad relationship.
4630 Instruction *ToPad = &I;
4631 Value *ToPadParent = getParentPad(ToPad);
4632 for (BasicBlock *PredBB : predecessors(BB)) {
4633 Instruction *TI = PredBB->getTerminator();
4634 Value *FromPad;
4635 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4636 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4637 "EH pad must be jumped to via an unwind edge", ToPad, II);
4638 auto *CalledFn =
4639 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4640 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4641 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4642 continue;
4643 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4644 FromPad = Bundle->Inputs[0];
4645 else
4646 FromPad = ConstantTokenNone::get(II->getContext());
4647 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4648 FromPad = CRI->getOperand(0);
4649 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4650 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4651 FromPad = CSI;
4652 } else {
4653 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4654 }
4655
4656 // The edge may exit from zero or more nested pads.
4657 SmallPtrSet<Value *, 8> Seen;
4658 for (;; FromPad = getParentPad(FromPad)) {
4659 Check(FromPad != ToPad,
4660 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4661 if (FromPad == ToPadParent) {
4662 // This is a legal unwind edge.
4663 break;
4664 }
4665 Check(!isa<ConstantTokenNone>(FromPad),
4666 "A single unwind edge may only enter one EH pad", TI);
4667 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4668 FromPad);
4669
4670 // This will be diagnosed on the corresponding instruction already. We
4671 // need the extra check here to make sure getParentPad() works.
4672 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4673 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4674 }
4675 }
4676}
4677
4678void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4679 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4680 // isn't a cleanup.
4681 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4682 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4683
4684 visitEHPadPredecessors(LPI);
4685
4686 if (!LandingPadResultTy)
4687 LandingPadResultTy = LPI.getType();
4688 else
4689 Check(LandingPadResultTy == LPI.getType(),
4690 "The landingpad instruction should have a consistent result type "
4691 "inside a function.",
4692 &LPI);
4693
4694 Function *F = LPI.getParent()->getParent();
4695 Check(F->hasPersonalityFn(),
4696 "LandingPadInst needs to be in a function with a personality.", &LPI);
4697
4698 // The landingpad instruction must be the first non-PHI instruction in the
4699 // block.
4700 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4701 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4702
4703 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4704 Constant *Clause = LPI.getClause(i);
4705 if (LPI.isCatch(i)) {
4706 Check(isa<PointerType>(Clause->getType()),
4707 "Catch operand does not have pointer type!", &LPI);
4708 } else {
4709 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4711 "Filter operand is not an array of constants!", &LPI);
4712 }
4713 }
4714
4715 visitInstruction(LPI);
4716}
4717
4718void Verifier::visitResumeInst(ResumeInst &RI) {
4720 "ResumeInst needs to be in a function with a personality.", &RI);
4721
4722 if (!LandingPadResultTy)
4723 LandingPadResultTy = RI.getValue()->getType();
4724 else
4725 Check(LandingPadResultTy == RI.getValue()->getType(),
4726 "The resume instruction should have a consistent result type "
4727 "inside a function.",
4728 &RI);
4729
4730 visitTerminator(RI);
4731}
4732
4733void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4734 BasicBlock *BB = CPI.getParent();
4735
4736 Function *F = BB->getParent();
4737 Check(F->hasPersonalityFn(),
4738 "CatchPadInst needs to be in a function with a personality.", &CPI);
4739
4741 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4742 CPI.getParentPad());
4743
4744 // The catchpad instruction must be the first non-PHI instruction in the
4745 // block.
4746 Check(&*BB->getFirstNonPHIIt() == &CPI,
4747 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4748
4749 visitEHPadPredecessors(CPI);
4750 visitFuncletPadInst(CPI);
4751}
4752
4753void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4754 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4755 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4756 CatchReturn.getOperand(0));
4757
4758 visitTerminator(CatchReturn);
4759}
4760
4761void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4762 BasicBlock *BB = CPI.getParent();
4763
4764 Function *F = BB->getParent();
4765 Check(F->hasPersonalityFn(),
4766 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4767
4768 // The cleanuppad instruction must be the first non-PHI instruction in the
4769 // block.
4770 Check(&*BB->getFirstNonPHIIt() == &CPI,
4771 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4772
4773 auto *ParentPad = CPI.getParentPad();
4774 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4775 "CleanupPadInst has an invalid parent.", &CPI);
4776
4777 visitEHPadPredecessors(CPI);
4778 visitFuncletPadInst(CPI);
4779}
4780
4781void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4782 User *FirstUser = nullptr;
4783 Value *FirstUnwindPad = nullptr;
4784 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4785 SmallPtrSet<FuncletPadInst *, 8> Seen;
4786
4787 while (!Worklist.empty()) {
4788 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4789 Check(Seen.insert(CurrentPad).second,
4790 "FuncletPadInst must not be nested within itself", CurrentPad);
4791 Value *UnresolvedAncestorPad = nullptr;
4792 for (User *U : CurrentPad->users()) {
4793 BasicBlock *UnwindDest;
4794 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4795 UnwindDest = CRI->getUnwindDest();
4796 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4797 // We allow catchswitch unwind to caller to nest
4798 // within an outer pad that unwinds somewhere else,
4799 // because catchswitch doesn't have a nounwind variant.
4800 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4801 if (CSI->unwindsToCaller())
4802 continue;
4803 UnwindDest = CSI->getUnwindDest();
4804 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4805 UnwindDest = II->getUnwindDest();
4806 } else if (isa<CallInst>(U)) {
4807 // Calls which don't unwind may be found inside funclet
4808 // pads that unwind somewhere else. We don't *require*
4809 // such calls to be annotated nounwind.
4810 continue;
4811 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4812 // The unwind dest for a cleanup can only be found by
4813 // recursive search. Add it to the worklist, and we'll
4814 // search for its first use that determines where it unwinds.
4815 Worklist.push_back(CPI);
4816 continue;
4817 } else {
4818 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4819 continue;
4820 }
4821
4822 Value *UnwindPad;
4823 bool ExitsFPI;
4824 if (UnwindDest) {
4825 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4826 if (!cast<Instruction>(UnwindPad)->isEHPad())
4827 continue;
4828 Value *UnwindParent = getParentPad(UnwindPad);
4829 // Ignore unwind edges that don't exit CurrentPad.
4830 if (UnwindParent == CurrentPad)
4831 continue;
4832 // Determine whether the original funclet pad is exited,
4833 // and if we are scanning nested pads determine how many
4834 // of them are exited so we can stop searching their
4835 // children.
4836 Value *ExitedPad = CurrentPad;
4837 ExitsFPI = false;
4838 do {
4839 if (ExitedPad == &FPI) {
4840 ExitsFPI = true;
4841 // Now we can resolve any ancestors of CurrentPad up to
4842 // FPI, but not including FPI since we need to make sure
4843 // to check all direct users of FPI for consistency.
4844 UnresolvedAncestorPad = &FPI;
4845 break;
4846 }
4847 Value *ExitedParent = getParentPad(ExitedPad);
4848 if (ExitedParent == UnwindParent) {
4849 // ExitedPad is the ancestor-most pad which this unwind
4850 // edge exits, so we can resolve up to it, meaning that
4851 // ExitedParent is the first ancestor still unresolved.
4852 UnresolvedAncestorPad = ExitedParent;
4853 break;
4854 }
4855 ExitedPad = ExitedParent;
4856 } while (!isa<ConstantTokenNone>(ExitedPad));
4857 } else {
4858 // Unwinding to caller exits all pads.
4859 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4860 ExitsFPI = true;
4861 UnresolvedAncestorPad = &FPI;
4862 }
4863
4864 if (ExitsFPI) {
4865 // This unwind edge exits FPI. Make sure it agrees with other
4866 // such edges.
4867 if (FirstUser) {
4868 Check(UnwindPad == FirstUnwindPad,
4869 "Unwind edges out of a funclet "
4870 "pad must have the same unwind "
4871 "dest",
4872 &FPI, U, FirstUser);
4873 } else {
4874 FirstUser = U;
4875 FirstUnwindPad = UnwindPad;
4876 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4877 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4878 getParentPad(UnwindPad) == getParentPad(&FPI))
4879 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4880 }
4881 }
4882 // Make sure we visit all uses of FPI, but for nested pads stop as
4883 // soon as we know where they unwind to.
4884 if (CurrentPad != &FPI)
4885 break;
4886 }
4887 if (UnresolvedAncestorPad) {
4888 if (CurrentPad == UnresolvedAncestorPad) {
4889 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4890 // we've found an unwind edge that exits it, because we need to verify
4891 // all direct uses of FPI.
4892 assert(CurrentPad == &FPI);
4893 continue;
4894 }
4895 // Pop off the worklist any nested pads that we've found an unwind
4896 // destination for. The pads on the worklist are the uncles,
4897 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4898 // for all ancestors of CurrentPad up to but not including
4899 // UnresolvedAncestorPad.
4900 Value *ResolvedPad = CurrentPad;
4901 while (!Worklist.empty()) {
4902 Value *UnclePad = Worklist.back();
4903 Value *AncestorPad = getParentPad(UnclePad);
4904 // Walk ResolvedPad up the ancestor list until we either find the
4905 // uncle's parent or the last resolved ancestor.
4906 while (ResolvedPad != AncestorPad) {
4907 Value *ResolvedParent = getParentPad(ResolvedPad);
4908 if (ResolvedParent == UnresolvedAncestorPad) {
4909 break;
4910 }
4911 ResolvedPad = ResolvedParent;
4912 }
4913 // If the resolved ancestor search didn't find the uncle's parent,
4914 // then the uncle is not yet resolved.
4915 if (ResolvedPad != AncestorPad)
4916 break;
4917 // This uncle is resolved, so pop it from the worklist.
4918 Worklist.pop_back();
4919 }
4920 }
4921 }
4922
4923 if (FirstUnwindPad) {
4924 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4925 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4926 Value *SwitchUnwindPad;
4927 if (SwitchUnwindDest)
4928 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
4929 else
4930 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4931 Check(SwitchUnwindPad == FirstUnwindPad,
4932 "Unwind edges out of a catch must have the same unwind dest as "
4933 "the parent catchswitch",
4934 &FPI, FirstUser, CatchSwitch);
4935 }
4936 }
4937
4938 visitInstruction(FPI);
4939}
4940
4941void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4942 BasicBlock *BB = CatchSwitch.getParent();
4943
4944 Function *F = BB->getParent();
4945 Check(F->hasPersonalityFn(),
4946 "CatchSwitchInst needs to be in a function with a personality.",
4947 &CatchSwitch);
4948
4949 // The catchswitch instruction must be the first non-PHI instruction in the
4950 // block.
4951 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
4952 "CatchSwitchInst not the first non-PHI instruction in the block.",
4953 &CatchSwitch);
4954
4955 auto *ParentPad = CatchSwitch.getParentPad();
4956 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4957 "CatchSwitchInst has an invalid parent.", ParentPad);
4958
4959 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4960 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
4961 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4962 "CatchSwitchInst must unwind to an EH block which is not a "
4963 "landingpad.",
4964 &CatchSwitch);
4965
4966 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4967 if (getParentPad(&*I) == ParentPad)
4968 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4969 }
4970
4971 Check(CatchSwitch.getNumHandlers() != 0,
4972 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4973
4974 for (BasicBlock *Handler : CatchSwitch.handlers()) {
4975 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
4976 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4977 }
4978
4979 visitEHPadPredecessors(CatchSwitch);
4980 visitTerminator(CatchSwitch);
4981}
4982
4983void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4985 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4986 CRI.getOperand(0));
4987
4988 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4989 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
4990 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4991 "CleanupReturnInst must unwind to an EH block which is not a "
4992 "landingpad.",
4993 &CRI);
4994 }
4995
4996 visitTerminator(CRI);
4997}
4998
4999void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5000 Instruction *Op = cast<Instruction>(I.getOperand(i));
5001 // If the we have an invalid invoke, don't try to compute the dominance.
5002 // We already reject it in the invoke specific checks and the dominance
5003 // computation doesn't handle multiple edges.
5004 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5005 if (II->getNormalDest() == II->getUnwindDest())
5006 return;
5007 }
5008
5009 // Quick check whether the def has already been encountered in the same block.
5010 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5011 // uses are defined to happen on the incoming edge, not at the instruction.
5012 //
5013 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5014 // wrapping an SSA value, assert that we've already encountered it. See
5015 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5016 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5017 return;
5018
5019 const Use &U = I.getOperandUse(i);
5020 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5021}
5022
5023void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5024 Check(I.getType()->isPointerTy(),
5025 "dereferenceable, dereferenceable_or_null "
5026 "apply only to pointer types",
5027 &I);
5029 "dereferenceable, dereferenceable_or_null apply only to load"
5030 " and inttoptr instructions, use attributes for calls or invokes",
5031 &I);
5032 Check(MD->getNumOperands() == 1,
5033 "dereferenceable, dereferenceable_or_null "
5034 "take one operand!",
5035 &I);
5036 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5037 Check(CI && CI->getType()->isIntegerTy(64),
5038 "dereferenceable, "
5039 "dereferenceable_or_null metadata value must be an i64!",
5040 &I);
5041}
5042
5043void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5044 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5045 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5046 &I);
5047 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5048}
5049
5050void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5051 auto GetBranchingTerminatorNumOperands = [&]() {
5052 unsigned ExpectedNumOperands = 0;
5053 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5054 ExpectedNumOperands = BI->getNumSuccessors();
5055 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5056 ExpectedNumOperands = SI->getNumSuccessors();
5057 else if (isa<CallInst>(&I))
5058 ExpectedNumOperands = 1;
5059 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5060 ExpectedNumOperands = IBI->getNumDestinations();
5061 else if (isa<SelectInst>(&I))
5062 ExpectedNumOperands = 2;
5063 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5064 ExpectedNumOperands = CI->getNumSuccessors();
5065 return ExpectedNumOperands;
5066 };
5067 Check(MD->getNumOperands() >= 1,
5068 "!prof annotations should have at least 1 operand", MD);
5069 // Check first operand.
5070 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5072 "expected string with name of the !prof annotation", MD);
5073 MDString *MDS = cast<MDString>(MD->getOperand(0));
5074 StringRef ProfName = MDS->getString();
5075
5077 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5078 "'unknown' !prof should only appear on instructions on which "
5079 "'branch_weights' would",
5080 MD);
5081 verifyUnknownProfileMetadata(MD);
5082 return;
5083 }
5084
5085 Check(MD->getNumOperands() >= 2,
5086 "!prof annotations should have no less than 2 operands", MD);
5087
5088 // Check consistency of !prof branch_weights metadata.
5089 if (ProfName == MDProfLabels::BranchWeights) {
5090 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5091 if (isa<InvokeInst>(&I)) {
5092 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5093 "Wrong number of InvokeInst branch_weights operands", MD);
5094 } else {
5095 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5096 if (ExpectedNumOperands == 0)
5097 CheckFailed("!prof branch_weights are not allowed for this instruction",
5098 MD);
5099
5100 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5101 MD);
5102 }
5103 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5104 ++i) {
5105 auto &MDO = MD->getOperand(i);
5106 Check(MDO, "second operand should not be null", MD);
5108 "!prof brunch_weights operand is not a const int");
5109 }
5110 } else if (ProfName == MDProfLabels::ValueProfile) {
5111 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5112 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5113 Check(KindInt, "VP !prof missing kind argument", MD);
5114
5115 auto Kind = KindInt->getZExtValue();
5116 Check(Kind >= InstrProfValueKind::IPVK_First &&
5117 Kind <= InstrProfValueKind::IPVK_Last,
5118 "Invalid VP !prof kind", MD);
5119 Check(MD->getNumOperands() % 2 == 1,
5120 "VP !prof should have an even number "
5121 "of arguments after 'VP'",
5122 MD);
5123 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5124 Kind == InstrProfValueKind::IPVK_MemOPSize)
5126 "VP !prof indirect call or memop size expected to be applied to "
5127 "CallBase instructions only",
5128 MD);
5129 } else {
5130 CheckFailed("expected either branch_weights or VP profile name", MD);
5131 }
5132}
5133
5134void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5135 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5136 // DIAssignID metadata must be attached to either an alloca or some form of
5137 // store/memory-writing instruction.
5138 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5139 // possible store intrinsics.
5140 bool ExpectedInstTy =
5142 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5143 I, MD);
5144 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5145 // only be found as DbgAssignIntrinsic operands.
5146 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5147 for (auto *User : AsValue->users()) {
5149 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5150 MD, User);
5151 // All of the dbg.assign intrinsics should be in the same function as I.
5152 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5153 CheckDI(DAI->getFunction() == I.getFunction(),
5154 "dbg.assign not in same function as inst", DAI, &I);
5155 }
5156 }
5157 for (DbgVariableRecord *DVR :
5158 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5159 CheckDI(DVR->isDbgAssign(),
5160 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5161 CheckDI(DVR->getFunction() == I.getFunction(),
5162 "DVRAssign not in same function as inst", DVR, &I);
5163 }
5164}
5165
5166void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5168 "!mmra metadata attached to unexpected instruction kind", I, MD);
5169
5170 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5171 // list of tags such as !2 in the following example:
5172 // !0 = !{!"a", !"b"}
5173 // !1 = !{!"c", !"d"}
5174 // !2 = !{!0, !1}
5175 if (MMRAMetadata::isTagMD(MD))
5176 return;
5177
5178 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5179 for (const MDOperand &MDOp : MD->operands())
5180 Check(MMRAMetadata::isTagMD(MDOp.get()),
5181 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5182}
5183
5184void Verifier::visitCallStackMetadata(MDNode *MD) {
5185 // Call stack metadata should consist of a list of at least 1 constant int
5186 // (representing a hash of the location).
5187 Check(MD->getNumOperands() >= 1,
5188 "call stack metadata should have at least 1 operand", MD);
5189
5190 for (const auto &Op : MD->operands())
5192 "call stack metadata operand should be constant integer", Op);
5193}
5194
5195void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5196 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5197 Check(MD->getNumOperands() >= 1,
5198 "!memprof annotations should have at least 1 metadata operand "
5199 "(MemInfoBlock)",
5200 MD);
5201
5202 // Check each MIB
5203 for (auto &MIBOp : MD->operands()) {
5204 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5205 // The first operand of an MIB should be the call stack metadata.
5206 // There rest of the operands should be MDString tags, and there should be
5207 // at least one.
5208 Check(MIB->getNumOperands() >= 2,
5209 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5210
5211 // Check call stack metadata (first operand).
5212 Check(MIB->getOperand(0) != nullptr,
5213 "!memprof MemInfoBlock first operand should not be null", MIB);
5214 Check(isa<MDNode>(MIB->getOperand(0)),
5215 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5216 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5217 visitCallStackMetadata(StackMD);
5218
5219 // The next set of 1 or more operands should be MDString.
5220 unsigned I = 1;
5221 for (; I < MIB->getNumOperands(); ++I) {
5222 if (!isa<MDString>(MIB->getOperand(I))) {
5223 Check(I > 1,
5224 "!memprof MemInfoBlock second operand should be an MDString",
5225 MIB);
5226 break;
5227 }
5228 }
5229
5230 // Any remaining should be MDNode that are pairs of integers
5231 for (; I < MIB->getNumOperands(); ++I) {
5232 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5233 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5234 MIB);
5235 Check(OpNode->getNumOperands() == 2,
5236 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5237 "operands",
5238 MIB);
5239 // Check that all of Op's operands are ConstantInt.
5240 Check(llvm::all_of(OpNode->operands(),
5241 [](const MDOperand &Op) {
5242 return mdconst::hasa<ConstantInt>(Op);
5243 }),
5244 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5245 "ConstantInt operands",
5246 MIB);
5247 }
5248 }
5249}
5250
5251void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5252 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5253 // Verify the partial callstack annotated from memprof profiles. This callsite
5254 // is a part of a profiled allocation callstack.
5255 visitCallStackMetadata(MD);
5256}
5257
5258static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5259 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5260 return isa<ConstantInt>(VAL->getValue());
5261 return false;
5262}
5263
5264void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5265 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5266 &I);
5267 for (Metadata *Op : MD->operands()) {
5269 "The callee_type metadata must be a list of type metadata nodes", Op);
5270 auto *TypeMD = cast<MDNode>(Op);
5271 Check(TypeMD->getNumOperands() == 2,
5272 "Well-formed generalized type metadata must contain exactly two "
5273 "operands",
5274 Op);
5275 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5276 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5277 "The first operand of type metadata for functions must be zero", Op);
5278 Check(TypeMD->hasGeneralizedMDString(),
5279 "Only generalized type metadata can be part of the callee_type "
5280 "metadata list",
5281 Op);
5282 }
5283}
5284
5285void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5286 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5287 Check(Annotation->getNumOperands() >= 1,
5288 "annotation must have at least one operand");
5289 for (const MDOperand &Op : Annotation->operands()) {
5290 bool TupleOfStrings =
5291 isa<MDTuple>(Op.get()) &&
5292 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5293 return isa<MDString>(Annotation.get());
5294 });
5295 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5296 "operands must be a string or a tuple of strings");
5297 }
5298}
5299
5300void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5301 unsigned NumOps = MD->getNumOperands();
5302 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5303 MD);
5304 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5305 "first scope operand must be self-referential or string", MD);
5306 if (NumOps == 3)
5308 "third scope operand must be string (if used)", MD);
5309
5310 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5311 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5312
5313 unsigned NumDomainOps = Domain->getNumOperands();
5314 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5315 "domain must have one or two operands", Domain);
5316 Check(Domain->getOperand(0).get() == Domain ||
5317 isa<MDString>(Domain->getOperand(0)),
5318 "first domain operand must be self-referential or string", Domain);
5319 if (NumDomainOps == 2)
5320 Check(isa<MDString>(Domain->getOperand(1)),
5321 "second domain operand must be string (if used)", Domain);
5322}
5323
5324void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5325 for (const MDOperand &Op : MD->operands()) {
5326 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5327 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5328 visitAliasScopeMetadata(OpMD);
5329 }
5330}
5331
5332void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5333 auto IsValidAccessScope = [](const MDNode *MD) {
5334 return MD->getNumOperands() == 0 && MD->isDistinct();
5335 };
5336
5337 // It must be either an access scope itself...
5338 if (IsValidAccessScope(MD))
5339 return;
5340
5341 // ...or a list of access scopes.
5342 for (const MDOperand &Op : MD->operands()) {
5343 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5344 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5345 Check(IsValidAccessScope(OpMD),
5346 "Access scope list contains invalid access scope", MD);
5347 }
5348}
5349
5350/// verifyInstruction - Verify that an instruction is well formed.
5351///
5352void Verifier::visitInstruction(Instruction &I) {
5353 BasicBlock *BB = I.getParent();
5354 Check(BB, "Instruction not embedded in basic block!", &I);
5355
5356 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5357 for (User *U : I.users()) {
5358 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5359 "Only PHI nodes may reference their own value!", &I);
5360 }
5361 }
5362
5363 // Check that void typed values don't have names
5364 Check(!I.getType()->isVoidTy() || !I.hasName(),
5365 "Instruction has a name, but provides a void value!", &I);
5366
5367 // Check that the return value of the instruction is either void or a legal
5368 // value type.
5369 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5370 "Instruction returns a non-scalar type!", &I);
5371
5372 // Check that the instruction doesn't produce metadata. Calls are already
5373 // checked against the callee type.
5374 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5375 "Invalid use of metadata!", &I);
5376
5377 // Check that all uses of the instruction, if they are instructions
5378 // themselves, actually have parent basic blocks. If the use is not an
5379 // instruction, it is an error!
5380 for (Use &U : I.uses()) {
5381 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5382 Check(Used->getParent() != nullptr,
5383 "Instruction referencing"
5384 " instruction not embedded in a basic block!",
5385 &I, Used);
5386 else {
5387 CheckFailed("Use of instruction is not an instruction!", U);
5388 return;
5389 }
5390 }
5391
5392 // Get a pointer to the call base of the instruction if it is some form of
5393 // call.
5394 const CallBase *CBI = dyn_cast<CallBase>(&I);
5395
5396 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5397 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5398
5399 // Check to make sure that only first-class-values are operands to
5400 // instructions.
5401 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5402 Check(false, "Instruction operands must be first-class values!", &I);
5403 }
5404
5405 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5406 // This code checks whether the function is used as the operand of a
5407 // clang_arc_attachedcall operand bundle.
5408 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5409 int Idx) {
5410 return CBI && CBI->isOperandBundleOfType(
5412 };
5413
5414 // Check to make sure that the "address of" an intrinsic function is never
5415 // taken. Ignore cases where the address of the intrinsic function is used
5416 // as the argument of operand bundle "clang.arc.attachedcall" as those
5417 // cases are handled in verifyAttachedCallBundle.
5418 Check((!F->isIntrinsic() ||
5419 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5420 IsAttachedCallOperand(F, CBI, i)),
5421 "Cannot take the address of an intrinsic!", &I);
5422 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5423 F->getIntrinsicID() == Intrinsic::donothing ||
5424 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5425 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5426 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5427 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5428 F->getIntrinsicID() == Intrinsic::coro_resume ||
5429 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5430 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5431 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5432 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5433 F->getIntrinsicID() ==
5434 Intrinsic::experimental_patchpoint_void ||
5435 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5436 F->getIntrinsicID() == Intrinsic::fake_use ||
5437 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5438 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5439 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5440 IsAttachedCallOperand(F, CBI, i),
5441 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5442 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5443 "wasm.(re)throw",
5444 &I);
5445 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5446 &M, F, F->getParent());
5447 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5448 Check(OpBB->getParent() == BB->getParent(),
5449 "Referring to a basic block in another function!", &I);
5450 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5451 Check(OpArg->getParent() == BB->getParent(),
5452 "Referring to an argument in another function!", &I);
5453 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5454 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5455 &M, GV, GV->getParent());
5456 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5457 Check(OpInst->getFunction() == BB->getParent(),
5458 "Referring to an instruction in another function!", &I);
5459 verifyDominatesUse(I, i);
5460 } else if (isa<InlineAsm>(I.getOperand(i))) {
5461 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5462 "Cannot take the address of an inline asm!", &I);
5463 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5464 visitConstantExprsRecursively(CPA);
5465 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5466 if (CE->getType()->isPtrOrPtrVectorTy()) {
5467 // If we have a ConstantExpr pointer, we need to see if it came from an
5468 // illegal bitcast.
5469 visitConstantExprsRecursively(CE);
5470 }
5471 }
5472 }
5473
5474 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5475 Check(I.getType()->isFPOrFPVectorTy(),
5476 "fpmath requires a floating point result!", &I);
5477 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5478 if (ConstantFP *CFP0 =
5480 const APFloat &Accuracy = CFP0->getValueAPF();
5481 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5482 "fpmath accuracy must have float type", &I);
5483 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5484 "fpmath accuracy not a positive number!", &I);
5485 } else {
5486 Check(false, "invalid fpmath accuracy!", &I);
5487 }
5488 }
5489
5490 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5492 "Ranges are only for loads, calls and invokes!", &I);
5493 visitRangeMetadata(I, Range, I.getType());
5494 }
5495
5496 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5499 "noalias.addrspace are only for memory operations!", &I);
5500 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5501 }
5502
5503 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5505 "invariant.group metadata is only for loads and stores", &I);
5506 }
5507
5508 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5509 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5510 &I);
5512 "nonnull applies only to load instructions, use attributes"
5513 " for calls or invokes",
5514 &I);
5515 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5516 }
5517
5518 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5519 visitDereferenceableMetadata(I, MD);
5520
5521 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5522 visitDereferenceableMetadata(I, MD);
5523
5524 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5525 visitNofreeMetadata(I, MD);
5526
5527 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5528 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
5529
5530 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5531 visitAliasScopeListMetadata(MD);
5532 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5533 visitAliasScopeListMetadata(MD);
5534
5535 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5536 visitAccessGroupMetadata(MD);
5537
5538 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5539 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5540 &I);
5542 "align applies only to load instructions, "
5543 "use attributes for calls or invokes",
5544 &I);
5545 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5546 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5547 Check(CI && CI->getType()->isIntegerTy(64),
5548 "align metadata value must be an i64!", &I);
5549 uint64_t Align = CI->getZExtValue();
5550 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5551 &I);
5552 Check(Align <= Value::MaximumAlignment,
5553 "alignment is larger that implementation defined limit", &I);
5554 }
5555
5556 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5557 visitProfMetadata(I, MD);
5558
5559 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5560 visitMemProfMetadata(I, MD);
5561
5562 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5563 visitCallsiteMetadata(I, MD);
5564
5565 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5566 visitCalleeTypeMetadata(I, MD);
5567
5568 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5569 visitDIAssignIDMetadata(I, MD);
5570
5571 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5572 visitMMRAMetadata(I, MMRA);
5573
5574 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5575 visitAnnotationMetadata(Annotation);
5576
5577 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5578 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5579 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5580
5581 if (auto *DL = dyn_cast<DILocation>(N)) {
5582 if (DL->getAtomGroup()) {
5583 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5584 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5585 "Instructions enabled",
5586 DL, DL->getScope()->getSubprogram());
5587 }
5588 }
5589 }
5590
5592 I.getAllMetadata(MDs);
5593 for (auto Attachment : MDs) {
5594 unsigned Kind = Attachment.first;
5595 auto AllowLocs =
5596 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5597 ? AreDebugLocsAllowed::Yes
5598 : AreDebugLocsAllowed::No;
5599 visitMDNode(*Attachment.second, AllowLocs);
5600 }
5601
5602 InstsInThisBlock.insert(&I);
5603}
5604
5605/// Allow intrinsics to be verified in different ways.
5606void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5608 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5609 IF);
5610
5611 // Verify that the intrinsic prototype lines up with what the .td files
5612 // describe.
5613 FunctionType *IFTy = IF->getFunctionType();
5614 bool IsVarArg = IFTy->isVarArg();
5615
5619
5620 // Walk the descriptors to extract overloaded types.
5625 "Intrinsic has incorrect return type!", IF);
5627 "Intrinsic has incorrect argument type!", IF);
5628
5629 // Verify if the intrinsic call matches the vararg property.
5630 if (IsVarArg)
5632 "Intrinsic was not defined with variable arguments!", IF);
5633 else
5635 "Callsite was not defined with variable arguments!", IF);
5636
5637 // All descriptors should be absorbed by now.
5638 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5639
5640 // Now that we have the intrinsic ID and the actual argument types (and we
5641 // know they are legal for the intrinsic!) get the intrinsic name through the
5642 // usual means. This allows us to verify the mangling of argument types into
5643 // the name.
5644 const std::string ExpectedName =
5645 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5646 Check(ExpectedName == IF->getName(),
5647 "Intrinsic name not mangled correctly for type arguments! "
5648 "Should be: " +
5649 ExpectedName,
5650 IF);
5651
5652 // If the intrinsic takes MDNode arguments, verify that they are either global
5653 // or are local to *this* function.
5654 for (Value *V : Call.args()) {
5655 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5656 visitMetadataAsValue(*MD, Call.getCaller());
5657 if (auto *Const = dyn_cast<Constant>(V))
5658 Check(!Const->getType()->isX86_AMXTy(),
5659 "const x86_amx is not allowed in argument!");
5660 }
5661
5662 switch (ID) {
5663 default:
5664 break;
5665 case Intrinsic::assume: {
5666 for (auto &Elem : Call.bundle_op_infos()) {
5667 unsigned ArgCount = Elem.End - Elem.Begin;
5668 // Separate storage assumptions are special insofar as they're the only
5669 // operand bundles allowed on assumes that aren't parameter attributes.
5670 if (Elem.Tag->getKey() == "separate_storage") {
5671 Check(ArgCount == 2,
5672 "separate_storage assumptions should have 2 arguments", Call);
5673 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5674 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5675 "arguments to separate_storage assumptions should be pointers",
5676 Call);
5677 continue;
5678 }
5679 Check(Elem.Tag->getKey() == "ignore" ||
5680 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5681 "tags must be valid attribute names", Call);
5682 Attribute::AttrKind Kind =
5683 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5684 if (Kind == Attribute::Alignment) {
5685 Check(ArgCount <= 3 && ArgCount >= 2,
5686 "alignment assumptions should have 2 or 3 arguments", Call);
5687 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5688 "first argument should be a pointer", Call);
5689 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5690 "second argument should be an integer", Call);
5691 if (ArgCount == 3)
5692 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5693 "third argument should be an integer if present", Call);
5694 continue;
5695 }
5696 if (Kind == Attribute::Dereferenceable) {
5697 Check(ArgCount == 2,
5698 "dereferenceable assumptions should have 2 arguments", Call);
5699 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5700 "first argument should be a pointer", Call);
5701 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5702 "second argument should be an integer", Call);
5703 continue;
5704 }
5705 Check(ArgCount <= 2, "too many arguments", Call);
5706 if (Kind == Attribute::None)
5707 break;
5708 if (Attribute::isIntAttrKind(Kind)) {
5709 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5710 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5711 "the second argument should be a constant integral value", Call);
5712 } else if (Attribute::canUseAsParamAttr(Kind)) {
5713 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5714 } else if (Attribute::canUseAsFnAttr(Kind)) {
5715 Check((ArgCount) == 0, "this attribute has no argument", Call);
5716 }
5717 }
5718 break;
5719 }
5720 case Intrinsic::ucmp:
5721 case Intrinsic::scmp: {
5722 Type *SrcTy = Call.getOperand(0)->getType();
5723 Type *DestTy = Call.getType();
5724
5725 Check(DestTy->getScalarSizeInBits() >= 2,
5726 "result type must be at least 2 bits wide", Call);
5727
5728 bool IsDestTypeVector = DestTy->isVectorTy();
5729 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5730 "ucmp/scmp argument and result types must both be either vector or "
5731 "scalar types",
5732 Call);
5733 if (IsDestTypeVector) {
5734 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5735 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5736 Check(SrcVecLen == DestVecLen,
5737 "return type and arguments must have the same number of "
5738 "elements",
5739 Call);
5740 }
5741 break;
5742 }
5743 case Intrinsic::coro_id: {
5744 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5745 if (isa<ConstantPointerNull>(InfoArg))
5746 break;
5747 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5748 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5749 "info argument of llvm.coro.id must refer to an initialized "
5750 "constant");
5751 Constant *Init = GV->getInitializer();
5753 "info argument of llvm.coro.id must refer to either a struct or "
5754 "an array");
5755 break;
5756 }
5757 case Intrinsic::is_fpclass: {
5758 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5759 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5760 "unsupported bits for llvm.is.fpclass test mask");
5761 break;
5762 }
5763 case Intrinsic::fptrunc_round: {
5764 // Check the rounding mode
5765 Metadata *MD = nullptr;
5767 if (MAV)
5768 MD = MAV->getMetadata();
5769
5770 Check(MD != nullptr, "missing rounding mode argument", Call);
5771
5772 Check(isa<MDString>(MD),
5773 ("invalid value for llvm.fptrunc.round metadata operand"
5774 " (the operand should be a string)"),
5775 MD);
5776
5777 std::optional<RoundingMode> RoundMode =
5778 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5779 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5780 "unsupported rounding mode argument", Call);
5781 break;
5782 }
5783#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5784#include "llvm/IR/VPIntrinsics.def"
5785#undef BEGIN_REGISTER_VP_INTRINSIC
5786 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5787 break;
5788#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5789 case Intrinsic::INTRINSIC:
5790#include "llvm/IR/ConstrainedOps.def"
5791#undef INSTRUCTION
5792 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5793 break;
5794 case Intrinsic::dbg_declare: // llvm.dbg.declare
5795 case Intrinsic::dbg_value: // llvm.dbg.value
5796 case Intrinsic::dbg_assign: // llvm.dbg.assign
5797 case Intrinsic::dbg_label: // llvm.dbg.label
5798 // We no longer interpret debug intrinsics (the old variable-location
5799 // design). They're meaningless as far as LLVM is concerned we could make
5800 // it an error for them to appear, but it's possible we'll have users
5801 // converting back to intrinsics for the forseeable future (such as DXIL),
5802 // so tolerate their existance.
5803 break;
5804 case Intrinsic::memcpy:
5805 case Intrinsic::memcpy_inline:
5806 case Intrinsic::memmove:
5807 case Intrinsic::memset:
5808 case Intrinsic::memset_inline:
5809 break;
5810 case Intrinsic::experimental_memset_pattern: {
5811 const auto Memset = cast<MemSetPatternInst>(&Call);
5812 Check(Memset->getValue()->getType()->isSized(),
5813 "unsized types cannot be used as memset patterns", Call);
5814 break;
5815 }
5816 case Intrinsic::memcpy_element_unordered_atomic:
5817 case Intrinsic::memmove_element_unordered_atomic:
5818 case Intrinsic::memset_element_unordered_atomic: {
5819 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
5820
5821 ConstantInt *ElementSizeCI =
5822 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5823 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5824 Check(ElementSizeVal.isPowerOf2(),
5825 "element size of the element-wise atomic memory intrinsic "
5826 "must be a power of 2",
5827 Call);
5828
5829 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5830 return Alignment && ElementSizeVal.ule(Alignment->value());
5831 };
5832 Check(IsValidAlignment(AMI->getDestAlign()),
5833 "incorrect alignment of the destination argument", Call);
5834 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
5835 Check(IsValidAlignment(AMT->getSourceAlign()),
5836 "incorrect alignment of the source argument", Call);
5837 }
5838 break;
5839 }
5840 case Intrinsic::call_preallocated_setup: {
5841 auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5842 Check(NumArgs != nullptr,
5843 "llvm.call.preallocated.setup argument must be a constant");
5844 bool FoundCall = false;
5845 for (User *U : Call.users()) {
5846 auto *UseCall = dyn_cast<CallBase>(U);
5847 Check(UseCall != nullptr,
5848 "Uses of llvm.call.preallocated.setup must be calls");
5849 Intrinsic::ID IID = UseCall->getIntrinsicID();
5850 if (IID == Intrinsic::call_preallocated_arg) {
5851 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5852 Check(AllocArgIndex != nullptr,
5853 "llvm.call.preallocated.alloc arg index must be a constant");
5854 auto AllocArgIndexInt = AllocArgIndex->getValue();
5855 Check(AllocArgIndexInt.sge(0) &&
5856 AllocArgIndexInt.slt(NumArgs->getValue()),
5857 "llvm.call.preallocated.alloc arg index must be between 0 and "
5858 "corresponding "
5859 "llvm.call.preallocated.setup's argument count");
5860 } else if (IID == Intrinsic::call_preallocated_teardown) {
5861 // nothing to do
5862 } else {
5863 Check(!FoundCall, "Can have at most one call corresponding to a "
5864 "llvm.call.preallocated.setup");
5865 FoundCall = true;
5866 size_t NumPreallocatedArgs = 0;
5867 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5868 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5869 ++NumPreallocatedArgs;
5870 }
5871 }
5872 Check(NumPreallocatedArgs != 0,
5873 "cannot use preallocated intrinsics on a call without "
5874 "preallocated arguments");
5875 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5876 "llvm.call.preallocated.setup arg size must be equal to number "
5877 "of preallocated arguments "
5878 "at call site",
5879 Call, *UseCall);
5880 // getOperandBundle() cannot be called if more than one of the operand
5881 // bundle exists. There is already a check elsewhere for this, so skip
5882 // here if we see more than one.
5883 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5884 1) {
5885 return;
5886 }
5887 auto PreallocatedBundle =
5888 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5889 Check(PreallocatedBundle,
5890 "Use of llvm.call.preallocated.setup outside intrinsics "
5891 "must be in \"preallocated\" operand bundle");
5892 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5893 "preallocated bundle must have token from corresponding "
5894 "llvm.call.preallocated.setup");
5895 }
5896 }
5897 break;
5898 }
5899 case Intrinsic::call_preallocated_arg: {
5900 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5901 Check(Token &&
5902 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5903 "llvm.call.preallocated.arg token argument must be a "
5904 "llvm.call.preallocated.setup");
5905 Check(Call.hasFnAttr(Attribute::Preallocated),
5906 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5907 "call site attribute");
5908 break;
5909 }
5910 case Intrinsic::call_preallocated_teardown: {
5911 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5912 Check(Token &&
5913 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5914 "llvm.call.preallocated.teardown token argument must be a "
5915 "llvm.call.preallocated.setup");
5916 break;
5917 }
5918 case Intrinsic::gcroot:
5919 case Intrinsic::gcwrite:
5920 case Intrinsic::gcread:
5921 if (ID == Intrinsic::gcroot) {
5922 AllocaInst *AI =
5924 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5926 "llvm.gcroot parameter #2 must be a constant.", Call);
5927 if (!AI->getAllocatedType()->isPointerTy()) {
5929 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5930 "or argument #2 must be a non-null constant.",
5931 Call);
5932 }
5933 }
5934
5935 Check(Call.getParent()->getParent()->hasGC(),
5936 "Enclosing function does not use GC.", Call);
5937 break;
5938 case Intrinsic::init_trampoline:
5940 "llvm.init_trampoline parameter #2 must resolve to a function.",
5941 Call);
5942 break;
5943 case Intrinsic::prefetch:
5944 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5945 "rw argument to llvm.prefetch must be 0-1", Call);
5946 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5947 "locality argument to llvm.prefetch must be 0-3", Call);
5948 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
5949 "cache type argument to llvm.prefetch must be 0-1", Call);
5950 break;
5951 case Intrinsic::stackprotector:
5953 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5954 break;
5955 case Intrinsic::localescape: {
5956 BasicBlock *BB = Call.getParent();
5957 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
5958 Call);
5959 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5960 Call);
5961 for (Value *Arg : Call.args()) {
5962 if (isa<ConstantPointerNull>(Arg))
5963 continue; // Null values are allowed as placeholders.
5964 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5965 Check(AI && AI->isStaticAlloca(),
5966 "llvm.localescape only accepts static allocas", Call);
5967 }
5968 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
5969 SawFrameEscape = true;
5970 break;
5971 }
5972 case Intrinsic::localrecover: {
5974 Function *Fn = dyn_cast<Function>(FnArg);
5975 Check(Fn && !Fn->isDeclaration(),
5976 "llvm.localrecover first "
5977 "argument must be function defined in this module",
5978 Call);
5979 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
5980 auto &Entry = FrameEscapeInfo[Fn];
5981 Entry.second = unsigned(
5982 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
5983 break;
5984 }
5985
5986 case Intrinsic::experimental_gc_statepoint:
5987 if (auto *CI = dyn_cast<CallInst>(&Call))
5988 Check(!CI->isInlineAsm(),
5989 "gc.statepoint support for inline assembly unimplemented", CI);
5990 Check(Call.getParent()->getParent()->hasGC(),
5991 "Enclosing function does not use GC.", Call);
5992
5993 verifyStatepoint(Call);
5994 break;
5995 case Intrinsic::experimental_gc_result: {
5996 Check(Call.getParent()->getParent()->hasGC(),
5997 "Enclosing function does not use GC.", Call);
5998
5999 auto *Statepoint = Call.getArgOperand(0);
6000 if (isa<UndefValue>(Statepoint))
6001 break;
6002
6003 // Are we tied to a statepoint properly?
6004 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6005 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6006 Intrinsic::experimental_gc_statepoint,
6007 "gc.result operand #1 must be from a statepoint", Call,
6008 Call.getArgOperand(0));
6009
6010 // Check that result type matches wrapped callee.
6011 auto *TargetFuncType =
6012 cast<FunctionType>(StatepointCall->getParamElementType(2));
6013 Check(Call.getType() == TargetFuncType->getReturnType(),
6014 "gc.result result type does not match wrapped callee", Call);
6015 break;
6016 }
6017 case Intrinsic::experimental_gc_relocate: {
6018 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6019
6021 "gc.relocate must return a pointer or a vector of pointers", Call);
6022
6023 // Check that this relocate is correctly tied to the statepoint
6024
6025 // This is case for relocate on the unwinding path of an invoke statepoint
6026 if (LandingPadInst *LandingPad =
6028
6029 const BasicBlock *InvokeBB =
6030 LandingPad->getParent()->getUniquePredecessor();
6031
6032 // Landingpad relocates should have only one predecessor with invoke
6033 // statepoint terminator
6034 Check(InvokeBB, "safepoints should have unique landingpads",
6035 LandingPad->getParent());
6036 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6037 InvokeBB);
6039 "gc relocate should be linked to a statepoint", InvokeBB);
6040 } else {
6041 // In all other cases relocate should be tied to the statepoint directly.
6042 // This covers relocates on a normal return path of invoke statepoint and
6043 // relocates of a call statepoint.
6044 auto *Token = Call.getArgOperand(0);
6046 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6047 }
6048
6049 // Verify rest of the relocate arguments.
6050 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6051
6052 // Both the base and derived must be piped through the safepoint.
6055 "gc.relocate operand #2 must be integer offset", Call);
6056
6057 Value *Derived = Call.getArgOperand(2);
6058 Check(isa<ConstantInt>(Derived),
6059 "gc.relocate operand #3 must be integer offset", Call);
6060
6061 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6062 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6063
6064 // Check the bounds
6065 if (isa<UndefValue>(StatepointCall))
6066 break;
6067 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6068 .getOperandBundle(LLVMContext::OB_gc_live)) {
6069 Check(BaseIndex < Opt->Inputs.size(),
6070 "gc.relocate: statepoint base index out of bounds", Call);
6071 Check(DerivedIndex < Opt->Inputs.size(),
6072 "gc.relocate: statepoint derived index out of bounds", Call);
6073 }
6074
6075 // Relocated value must be either a pointer type or vector-of-pointer type,
6076 // but gc_relocate does not need to return the same pointer type as the
6077 // relocated pointer. It can be casted to the correct type later if it's
6078 // desired. However, they must have the same address space and 'vectorness'
6079 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6080 auto *ResultType = Call.getType();
6081 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6082 auto *BaseType = Relocate.getBasePtr()->getType();
6083
6084 Check(BaseType->isPtrOrPtrVectorTy(),
6085 "gc.relocate: relocated value must be a pointer", Call);
6086 Check(DerivedType->isPtrOrPtrVectorTy(),
6087 "gc.relocate: relocated value must be a pointer", Call);
6088
6089 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6090 "gc.relocate: vector relocates to vector and pointer to pointer",
6091 Call);
6092 Check(
6093 ResultType->getPointerAddressSpace() ==
6094 DerivedType->getPointerAddressSpace(),
6095 "gc.relocate: relocating a pointer shouldn't change its address space",
6096 Call);
6097
6098 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6099 Check(GC, "gc.relocate: calling function must have GCStrategy",
6100 Call.getFunction());
6101 if (GC) {
6102 auto isGCPtr = [&GC](Type *PTy) {
6103 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6104 };
6105 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6106 Check(isGCPtr(BaseType),
6107 "gc.relocate: relocated value must be a gc pointer", Call);
6108 Check(isGCPtr(DerivedType),
6109 "gc.relocate: relocated value must be a gc pointer", Call);
6110 }
6111 break;
6112 }
6113 case Intrinsic::experimental_patchpoint: {
6114 if (Call.getCallingConv() == CallingConv::AnyReg) {
6116 "patchpoint: invalid return type used with anyregcc", Call);
6117 }
6118 break;
6119 }
6120 case Intrinsic::eh_exceptioncode:
6121 case Intrinsic::eh_exceptionpointer: {
6123 "eh.exceptionpointer argument must be a catchpad", Call);
6124 break;
6125 }
6126 case Intrinsic::get_active_lane_mask: {
6128 "get_active_lane_mask: must return a "
6129 "vector",
6130 Call);
6131 auto *ElemTy = Call.getType()->getScalarType();
6132 Check(ElemTy->isIntegerTy(1),
6133 "get_active_lane_mask: element type is not "
6134 "i1",
6135 Call);
6136 break;
6137 }
6138 case Intrinsic::experimental_get_vector_length: {
6139 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6140 Check(!VF->isNegative() && !VF->isZero(),
6141 "get_vector_length: VF must be positive", Call);
6142 break;
6143 }
6144 case Intrinsic::masked_load: {
6145 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6146 Call);
6147
6148 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
6150 Value *PassThru = Call.getArgOperand(3);
6151 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6152 Call);
6153 Check(Alignment->getValue().isPowerOf2(),
6154 "masked_load: alignment must be a power of 2", Call);
6155 Check(PassThru->getType() == Call.getType(),
6156 "masked_load: pass through and return type must match", Call);
6157 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6158 cast<VectorType>(Call.getType())->getElementCount(),
6159 "masked_load: vector mask must be same length as return", Call);
6160 break;
6161 }
6162 case Intrinsic::masked_store: {
6163 Value *Val = Call.getArgOperand(0);
6164 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
6166 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6167 Call);
6168 Check(Alignment->getValue().isPowerOf2(),
6169 "masked_store: alignment must be a power of 2", Call);
6170 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6171 cast<VectorType>(Val->getType())->getElementCount(),
6172 "masked_store: vector mask must be same length as value", Call);
6173 break;
6174 }
6175
6176 case Intrinsic::masked_gather: {
6177 const APInt &Alignment =
6179 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6180 "masked_gather: alignment must be 0 or a power of 2", Call);
6181 break;
6182 }
6183 case Intrinsic::masked_scatter: {
6184 const APInt &Alignment =
6185 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
6186 Check(Alignment.isZero() || Alignment.isPowerOf2(),
6187 "masked_scatter: alignment must be 0 or a power of 2", Call);
6188 break;
6189 }
6190
6191 case Intrinsic::experimental_guard: {
6192 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6194 "experimental_guard must have exactly one "
6195 "\"deopt\" operand bundle");
6196 break;
6197 }
6198
6199 case Intrinsic::experimental_deoptimize: {
6200 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6201 Call);
6203 "experimental_deoptimize must have exactly one "
6204 "\"deopt\" operand bundle");
6206 "experimental_deoptimize return type must match caller return type");
6207
6208 if (isa<CallInst>(Call)) {
6210 Check(RI,
6211 "calls to experimental_deoptimize must be followed by a return");
6212
6213 if (!Call.getType()->isVoidTy() && RI)
6214 Check(RI->getReturnValue() == &Call,
6215 "calls to experimental_deoptimize must be followed by a return "
6216 "of the value computed by experimental_deoptimize");
6217 }
6218
6219 break;
6220 }
6221 case Intrinsic::vastart: {
6223 "va_start called in a non-varargs function");
6224 break;
6225 }
6226 case Intrinsic::get_dynamic_area_offset: {
6227 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6228 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6229 IntTy->getBitWidth(),
6230 "get_dynamic_area_offset result type must be scalar integer matching "
6231 "alloca address space width",
6232 Call);
6233 break;
6234 }
6235 case Intrinsic::vector_reduce_and:
6236 case Intrinsic::vector_reduce_or:
6237 case Intrinsic::vector_reduce_xor:
6238 case Intrinsic::vector_reduce_add:
6239 case Intrinsic::vector_reduce_mul:
6240 case Intrinsic::vector_reduce_smax:
6241 case Intrinsic::vector_reduce_smin:
6242 case Intrinsic::vector_reduce_umax:
6243 case Intrinsic::vector_reduce_umin: {
6244 Type *ArgTy = Call.getArgOperand(0)->getType();
6245 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6246 "Intrinsic has incorrect argument type!");
6247 break;
6248 }
6249 case Intrinsic::vector_reduce_fmax:
6250 case Intrinsic::vector_reduce_fmin: {
6251 Type *ArgTy = Call.getArgOperand(0)->getType();
6252 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6253 "Intrinsic has incorrect argument type!");
6254 break;
6255 }
6256 case Intrinsic::vector_reduce_fadd:
6257 case Intrinsic::vector_reduce_fmul: {
6258 // Unlike the other reductions, the first argument is a start value. The
6259 // second argument is the vector to be reduced.
6260 Type *ArgTy = Call.getArgOperand(1)->getType();
6261 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6262 "Intrinsic has incorrect argument type!");
6263 break;
6264 }
6265 case Intrinsic::smul_fix:
6266 case Intrinsic::smul_fix_sat:
6267 case Intrinsic::umul_fix:
6268 case Intrinsic::umul_fix_sat:
6269 case Intrinsic::sdiv_fix:
6270 case Intrinsic::sdiv_fix_sat:
6271 case Intrinsic::udiv_fix:
6272 case Intrinsic::udiv_fix_sat: {
6273 Value *Op1 = Call.getArgOperand(0);
6274 Value *Op2 = Call.getArgOperand(1);
6276 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6277 "vector of ints");
6279 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6280 "vector of ints");
6281
6282 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6283 Check(Op3->getType()->isIntegerTy(),
6284 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6285 Check(Op3->getBitWidth() <= 32,
6286 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6287
6288 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6289 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6290 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6291 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6292 "the operands");
6293 } else {
6294 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6295 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6296 "to the width of the operands");
6297 }
6298 break;
6299 }
6300 case Intrinsic::lrint:
6301 case Intrinsic::llrint:
6302 case Intrinsic::lround:
6303 case Intrinsic::llround: {
6304 Type *ValTy = Call.getArgOperand(0)->getType();
6305 Type *ResultTy = Call.getType();
6306 auto *VTy = dyn_cast<VectorType>(ValTy);
6307 auto *RTy = dyn_cast<VectorType>(ResultTy);
6308 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6309 ExpectedName + ": argument must be floating-point or vector "
6310 "of floating-points, and result must be integer or "
6311 "vector of integers",
6312 &Call);
6313 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6314 ExpectedName + ": argument and result disagree on vector use", &Call);
6315 if (VTy) {
6316 Check(VTy->getElementCount() == RTy->getElementCount(),
6317 ExpectedName + ": argument must be same length as result", &Call);
6318 }
6319 break;
6320 }
6321 case Intrinsic::bswap: {
6322 Type *Ty = Call.getType();
6323 unsigned Size = Ty->getScalarSizeInBits();
6324 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6325 break;
6326 }
6327 case Intrinsic::invariant_start: {
6328 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6329 Check(InvariantSize &&
6330 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6331 "invariant_start parameter must be -1, 0 or a positive number",
6332 &Call);
6333 break;
6334 }
6335 case Intrinsic::matrix_multiply:
6336 case Intrinsic::matrix_transpose:
6337 case Intrinsic::matrix_column_major_load:
6338 case Intrinsic::matrix_column_major_store: {
6340 ConstantInt *Stride = nullptr;
6341 ConstantInt *NumRows;
6342 ConstantInt *NumColumns;
6343 VectorType *ResultTy;
6344 Type *Op0ElemTy = nullptr;
6345 Type *Op1ElemTy = nullptr;
6346 switch (ID) {
6347 case Intrinsic::matrix_multiply: {
6348 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6349 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6350 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6352 ->getNumElements() ==
6353 NumRows->getZExtValue() * N->getZExtValue(),
6354 "First argument of a matrix operation does not match specified "
6355 "shape!");
6357 ->getNumElements() ==
6358 N->getZExtValue() * NumColumns->getZExtValue(),
6359 "Second argument of a matrix operation does not match specified "
6360 "shape!");
6361
6362 ResultTy = cast<VectorType>(Call.getType());
6363 Op0ElemTy =
6364 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6365 Op1ElemTy =
6366 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6367 break;
6368 }
6369 case Intrinsic::matrix_transpose:
6370 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6371 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6372 ResultTy = cast<VectorType>(Call.getType());
6373 Op0ElemTy =
6374 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6375 break;
6376 case Intrinsic::matrix_column_major_load: {
6378 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6379 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6380 ResultTy = cast<VectorType>(Call.getType());
6381 break;
6382 }
6383 case Intrinsic::matrix_column_major_store: {
6385 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6386 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6387 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6388 Op0ElemTy =
6389 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6390 break;
6391 }
6392 default:
6393 llvm_unreachable("unexpected intrinsic");
6394 }
6395
6396 Check(ResultTy->getElementType()->isIntegerTy() ||
6397 ResultTy->getElementType()->isFloatingPointTy(),
6398 "Result type must be an integer or floating-point type!", IF);
6399
6400 if (Op0ElemTy)
6401 Check(ResultTy->getElementType() == Op0ElemTy,
6402 "Vector element type mismatch of the result and first operand "
6403 "vector!",
6404 IF);
6405
6406 if (Op1ElemTy)
6407 Check(ResultTy->getElementType() == Op1ElemTy,
6408 "Vector element type mismatch of the result and second operand "
6409 "vector!",
6410 IF);
6411
6413 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6414 "Result of a matrix operation does not fit in the returned vector!");
6415
6416 if (Stride)
6417 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6418 "Stride must be greater or equal than the number of rows!", IF);
6419
6420 break;
6421 }
6422 case Intrinsic::vector_splice: {
6424 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6425 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6426 if (Call.getParent() && Call.getParent()->getParent()) {
6427 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6428 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6429 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6430 }
6431 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6432 (Idx >= 0 && Idx < KnownMinNumElements),
6433 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6434 "known minimum number of elements in the vector. For scalable "
6435 "vectors the minimum number of elements is determined from "
6436 "vscale_range.",
6437 &Call);
6438 break;
6439 }
6440 case Intrinsic::stepvector: {
6442 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6443 VecTy->getScalarSizeInBits() >= 8,
6444 "stepvector only supported for vectors of integers "
6445 "with a bitwidth of at least 8.",
6446 &Call);
6447 break;
6448 }
6449 case Intrinsic::experimental_vector_match: {
6450 Value *Op1 = Call.getArgOperand(0);
6451 Value *Op2 = Call.getArgOperand(1);
6453
6454 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6455 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6456 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6457
6458 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6460 "Second operand must be a fixed length vector.", &Call);
6461 Check(Op1Ty->getElementType()->isIntegerTy(),
6462 "First operand must be a vector of integers.", &Call);
6463 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6464 "First two operands must have the same element type.", &Call);
6465 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6466 "First operand and mask must have the same number of elements.",
6467 &Call);
6468 Check(MaskTy->getElementType()->isIntegerTy(1),
6469 "Mask must be a vector of i1's.", &Call);
6470 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6471 &Call);
6472 break;
6473 }
6474 case Intrinsic::vector_insert: {
6475 Value *Vec = Call.getArgOperand(0);
6476 Value *SubVec = Call.getArgOperand(1);
6477 Value *Idx = Call.getArgOperand(2);
6478 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6479
6480 VectorType *VecTy = cast<VectorType>(Vec->getType());
6481 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6482
6483 ElementCount VecEC = VecTy->getElementCount();
6484 ElementCount SubVecEC = SubVecTy->getElementCount();
6485 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6486 "vector_insert parameters must have the same element "
6487 "type.",
6488 &Call);
6489 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6490 "vector_insert index must be a constant multiple of "
6491 "the subvector's known minimum vector length.");
6492
6493 // If this insertion is not the 'mixed' case where a fixed vector is
6494 // inserted into a scalable vector, ensure that the insertion of the
6495 // subvector does not overrun the parent vector.
6496 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6497 Check(IdxN < VecEC.getKnownMinValue() &&
6498 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6499 "subvector operand of vector_insert would overrun the "
6500 "vector being inserted into.");
6501 }
6502 break;
6503 }
6504 case Intrinsic::vector_extract: {
6505 Value *Vec = Call.getArgOperand(0);
6506 Value *Idx = Call.getArgOperand(1);
6507 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6508
6509 VectorType *ResultTy = cast<VectorType>(Call.getType());
6510 VectorType *VecTy = cast<VectorType>(Vec->getType());
6511
6512 ElementCount VecEC = VecTy->getElementCount();
6513 ElementCount ResultEC = ResultTy->getElementCount();
6514
6515 Check(ResultTy->getElementType() == VecTy->getElementType(),
6516 "vector_extract result must have the same element "
6517 "type as the input vector.",
6518 &Call);
6519 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6520 "vector_extract index must be a constant multiple of "
6521 "the result type's known minimum vector length.");
6522
6523 // If this extraction is not the 'mixed' case where a fixed vector is
6524 // extracted from a scalable vector, ensure that the extraction does not
6525 // overrun the parent vector.
6526 if (VecEC.isScalable() == ResultEC.isScalable()) {
6527 Check(IdxN < VecEC.getKnownMinValue() &&
6528 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6529 "vector_extract would overrun.");
6530 }
6531 break;
6532 }
6533 case Intrinsic::experimental_vector_partial_reduce_add: {
6536
6537 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6538 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6539
6540 Check((VecWidth % AccWidth) == 0,
6541 "Invalid vector widths for partial "
6542 "reduction. The width of the input vector "
6543 "must be a positive integer multiple of "
6544 "the width of the accumulator vector.");
6545 break;
6546 }
6547 case Intrinsic::experimental_noalias_scope_decl: {
6548 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6549 break;
6550 }
6551 case Intrinsic::preserve_array_access_index:
6552 case Intrinsic::preserve_struct_access_index:
6553 case Intrinsic::aarch64_ldaxr:
6554 case Intrinsic::aarch64_ldxr:
6555 case Intrinsic::arm_ldaex:
6556 case Intrinsic::arm_ldrex: {
6557 Type *ElemTy = Call.getParamElementType(0);
6558 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6559 &Call);
6560 break;
6561 }
6562 case Intrinsic::aarch64_stlxr:
6563 case Intrinsic::aarch64_stxr:
6564 case Intrinsic::arm_stlex:
6565 case Intrinsic::arm_strex: {
6566 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6567 Check(ElemTy,
6568 "Intrinsic requires elementtype attribute on second argument.",
6569 &Call);
6570 break;
6571 }
6572 case Intrinsic::aarch64_prefetch: {
6573 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6574 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6575 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6576 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6577 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6578 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6579 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6580 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6581 break;
6582 }
6583 case Intrinsic::callbr_landingpad: {
6584 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6585 Check(CBR, "intrinstic requires callbr operand", &Call);
6586 if (!CBR)
6587 break;
6588
6589 const BasicBlock *LandingPadBB = Call.getParent();
6590 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6591 if (!PredBB) {
6592 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6593 break;
6594 }
6595 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6596 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6597 &Call);
6598 break;
6599 }
6600 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6601 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6602 "block in indirect destination list",
6603 &Call);
6604 const Instruction &First = *LandingPadBB->begin();
6605 Check(&First == &Call, "No other instructions may proceed intrinsic",
6606 &Call);
6607 break;
6608 }
6609 case Intrinsic::amdgcn_cs_chain: {
6610 auto CallerCC = Call.getCaller()->getCallingConv();
6611 switch (CallerCC) {
6612 case CallingConv::AMDGPU_CS:
6613 case CallingConv::AMDGPU_CS_Chain:
6614 case CallingConv::AMDGPU_CS_ChainPreserve:
6615 break;
6616 default:
6617 CheckFailed("Intrinsic can only be used from functions with the "
6618 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6619 "calling conventions",
6620 &Call);
6621 break;
6622 }
6623
6624 Check(Call.paramHasAttr(2, Attribute::InReg),
6625 "SGPR arguments must have the `inreg` attribute", &Call);
6626 Check(!Call.paramHasAttr(3, Attribute::InReg),
6627 "VGPR arguments must not have the `inreg` attribute", &Call);
6628
6629 auto *Next = Call.getNextNode();
6630 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6631 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6632 Intrinsic::amdgcn_unreachable;
6633 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6634 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6635 break;
6636 }
6637 case Intrinsic::amdgcn_init_exec_from_input: {
6638 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6639 Check(Arg && Arg->hasInRegAttr(),
6640 "only inreg arguments to the parent function are valid as inputs to "
6641 "this intrinsic",
6642 &Call);
6643 break;
6644 }
6645 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6646 auto CallerCC = Call.getCaller()->getCallingConv();
6647 switch (CallerCC) {
6648 case CallingConv::AMDGPU_CS_Chain:
6649 case CallingConv::AMDGPU_CS_ChainPreserve:
6650 break;
6651 default:
6652 CheckFailed("Intrinsic can only be used from functions with the "
6653 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6654 "calling conventions",
6655 &Call);
6656 break;
6657 }
6658
6659 unsigned InactiveIdx = 1;
6660 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6661 "Value for inactive lanes must not have the `inreg` attribute",
6662 &Call);
6663 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6664 "Value for inactive lanes must be a function argument", &Call);
6665 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6666 "Value for inactive lanes must be a VGPR function argument", &Call);
6667 break;
6668 }
6669 case Intrinsic::amdgcn_call_whole_wave: {
6671 Check(F, "Indirect whole wave calls are not allowed", &Call);
6672
6673 CallingConv::ID CC = F->getCallingConv();
6674 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6675 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6676 &Call);
6677
6678 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6679
6680 Check(Call.arg_size() == F->arg_size(),
6681 "Call argument count must match callee argument count", &Call);
6682
6683 // The first argument of the call is the callee, and the first argument of
6684 // the callee is the active mask. The rest of the arguments must match.
6685 Check(F->arg_begin()->getType()->isIntegerTy(1),
6686 "Callee must have i1 as its first argument", &Call);
6687 for (auto [CallArg, FuncArg] :
6688 drop_begin(zip_equal(Call.args(), F->args()))) {
6689 Check(CallArg->getType() == FuncArg.getType(),
6690 "Argument types must match", &Call);
6691
6692 // Check that inreg attributes match between call site and function
6693 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6694 FuncArg.hasInRegAttr(),
6695 "Argument inreg attributes must match", &Call);
6696 }
6697 break;
6698 }
6699 case Intrinsic::amdgcn_s_prefetch_data: {
6700 Check(
6703 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6704 break;
6705 }
6706 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6707 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6708 Value *Src0 = Call.getArgOperand(0);
6709 Value *Src1 = Call.getArgOperand(1);
6710
6711 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6712 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6713 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6714 Call.getArgOperand(3));
6715 Check(BLGP <= 4, "invalid value for blgp format", Call,
6716 Call.getArgOperand(4));
6717
6718 // AMDGPU::MFMAScaleFormats values
6719 auto getFormatNumRegs = [](unsigned FormatVal) {
6720 switch (FormatVal) {
6721 case 0:
6722 case 1:
6723 return 8u;
6724 case 2:
6725 case 3:
6726 return 6u;
6727 case 4:
6728 return 4u;
6729 default:
6730 llvm_unreachable("invalid format value");
6731 }
6732 };
6733
6734 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6735 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6736 return false;
6737 unsigned NumElts = Ty->getNumElements();
6738 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6739 };
6740
6741 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6742 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6743 Check(isValidSrcASrcBVector(Src0Ty),
6744 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6745 Check(isValidSrcASrcBVector(Src1Ty),
6746 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6747
6748 // Permit excess registers for the format.
6749 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6750 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6751 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6752 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6753 break;
6754 }
6755 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6756 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6757 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6758 Value *Src0 = Call.getArgOperand(1);
6759 Value *Src1 = Call.getArgOperand(3);
6760
6761 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6762 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6763 Check(FmtA <= 4, "invalid value for matrix format", Call,
6764 Call.getArgOperand(0));
6765 Check(FmtB <= 4, "invalid value for matrix format", Call,
6766 Call.getArgOperand(2));
6767
6768 // AMDGPU::MatrixFMT values
6769 auto getFormatNumRegs = [](unsigned FormatVal) {
6770 switch (FormatVal) {
6771 case 0:
6772 case 1:
6773 return 16u;
6774 case 2:
6775 case 3:
6776 return 12u;
6777 case 4:
6778 return 8u;
6779 default:
6780 llvm_unreachable("invalid format value");
6781 }
6782 };
6783
6784 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6785 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6786 return false;
6787 unsigned NumElts = Ty->getNumElements();
6788 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6789 };
6790
6791 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6792 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6793 Check(isValidSrcASrcBVector(Src0Ty),
6794 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6795 Check(isValidSrcASrcBVector(Src1Ty),
6796 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6797
6798 // Permit excess registers for the format.
6799 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6800 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6801 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6802 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6803 break;
6804 }
6805 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
6806 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
6807 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
6808 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
6809 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
6810 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
6811 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
6812 Value *PtrArg = Call.getArgOperand(0);
6813 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
6815 "cooperative atomic intrinsics require a generic or global pointer",
6816 &Call, PtrArg);
6817
6818 // Last argument must be a MD string
6820 MDNode *MD = cast<MDNode>(Op->getMetadata());
6821 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
6822 "cooperative atomic intrinsics require that the last argument is a "
6823 "metadata string",
6824 &Call, Op);
6825 break;
6826 }
6827 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6828 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6829 Value *V = Call.getArgOperand(0);
6830 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6831 Check(RegCount % 8 == 0,
6832 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6833 break;
6834 }
6835 case Intrinsic::experimental_convergence_entry:
6836 case Intrinsic::experimental_convergence_anchor:
6837 break;
6838 case Intrinsic::experimental_convergence_loop:
6839 break;
6840 case Intrinsic::ptrmask: {
6841 Type *Ty0 = Call.getArgOperand(0)->getType();
6842 Type *Ty1 = Call.getArgOperand(1)->getType();
6844 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6845 "of pointers",
6846 &Call);
6847 Check(
6848 Ty0->isVectorTy() == Ty1->isVectorTy(),
6849 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6850 &Call);
6851 if (Ty0->isVectorTy())
6852 Check(cast<VectorType>(Ty0)->getElementCount() ==
6853 cast<VectorType>(Ty1)->getElementCount(),
6854 "llvm.ptrmask intrinsic arguments must have the same number of "
6855 "elements",
6856 &Call);
6857 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6858 "llvm.ptrmask intrinsic second argument bitwidth must match "
6859 "pointer index type size of first argument",
6860 &Call);
6861 break;
6862 }
6863 case Intrinsic::thread_pointer: {
6865 DL.getDefaultGlobalsAddressSpace(),
6866 "llvm.thread.pointer intrinsic return type must be for the globals "
6867 "address space",
6868 &Call);
6869 break;
6870 }
6871 case Intrinsic::threadlocal_address: {
6872 const Value &Arg0 = *Call.getArgOperand(0);
6873 Check(isa<GlobalValue>(Arg0),
6874 "llvm.threadlocal.address first argument must be a GlobalValue");
6875 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6876 "llvm.threadlocal.address operand isThreadLocal() must be true");
6877 break;
6878 }
6879 case Intrinsic::lifetime_start:
6880 case Intrinsic::lifetime_end: {
6883 "llvm.lifetime.start/end can only be used on alloca or poison",
6884 &Call);
6885 break;
6886 }
6887 };
6888
6889 // Verify that there aren't any unmediated control transfers between funclets.
6891 Function *F = Call.getParent()->getParent();
6892 if (F->hasPersonalityFn() &&
6893 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6894 // Run EH funclet coloring on-demand and cache results for other intrinsic
6895 // calls in this function
6896 if (BlockEHFuncletColors.empty())
6897 BlockEHFuncletColors = colorEHFunclets(*F);
6898
6899 // Check for catch-/cleanup-pad in first funclet block
6900 bool InEHFunclet = false;
6901 BasicBlock *CallBB = Call.getParent();
6902 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6903 assert(CV.size() > 0 && "Uncolored block");
6904 for (BasicBlock *ColorFirstBB : CV)
6905 if (auto It = ColorFirstBB->getFirstNonPHIIt();
6906 It != ColorFirstBB->end())
6908 InEHFunclet = true;
6909
6910 // Check for funclet operand bundle
6911 bool HasToken = false;
6912 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6914 HasToken = true;
6915
6916 // This would cause silent code truncation in WinEHPrepare
6917 if (InEHFunclet)
6918 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6919 }
6920 }
6921}
6922
6923/// Carefully grab the subprogram from a local scope.
6924///
6925/// This carefully grabs the subprogram from a local scope, avoiding the
6926/// built-in assertions that would typically fire.
6928 if (!LocalScope)
6929 return nullptr;
6930
6931 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6932 return SP;
6933
6934 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6935 return getSubprogram(LB->getRawScope());
6936
6937 // Just return null; broken scope chains are checked elsewhere.
6938 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6939 return nullptr;
6940}
6941
6942void Verifier::visit(DbgLabelRecord &DLR) {
6944 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6945
6946 // Ignore broken !dbg attachments; they're checked elsewhere.
6947 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
6948 if (!isa<DILocation>(N))
6949 return;
6950
6951 BasicBlock *BB = DLR.getParent();
6952 Function *F = BB ? BB->getParent() : nullptr;
6953
6954 // The scopes for variables and !dbg attachments must agree.
6955 DILabel *Label = DLR.getLabel();
6956 DILocation *Loc = DLR.getDebugLoc();
6957 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
6958
6959 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6960 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6961 if (!LabelSP || !LocSP)
6962 return;
6963
6964 CheckDI(LabelSP == LocSP,
6965 "mismatched subprogram between #dbg_label label and !dbg attachment",
6966 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6967 Loc->getScope()->getSubprogram());
6968}
6969
6970void Verifier::visit(DbgVariableRecord &DVR) {
6971 BasicBlock *BB = DVR.getParent();
6972 Function *F = BB->getParent();
6973
6974 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
6975 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
6976 DVR.getType() == DbgVariableRecord::LocationType::Assign,
6977 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
6978
6979 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
6980 // DIArgList, or an empty MDNode (which is a legacy representation for an
6981 // "undef" location).
6982 auto *MD = DVR.getRawLocation();
6983 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6984 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
6985 "invalid #dbg record address/value", &DVR, MD, BB, F);
6986 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
6987 visitValueAsMetadata(*VAM, F);
6988 if (DVR.isDbgDeclare()) {
6989 // Allow integers here to support inttoptr salvage.
6990 Type *Ty = VAM->getValue()->getType();
6991 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
6992 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
6993 F);
6994 }
6995 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
6996 visitDIArgList(*AL, F);
6997 }
6998
7000 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7001 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7002
7004 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7005 F);
7006 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7007
7008 if (DVR.isDbgAssign()) {
7010 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7011 F);
7012 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7013 AreDebugLocsAllowed::No);
7014
7015 const auto *RawAddr = DVR.getRawAddress();
7016 // Similarly to the location above, the address for an assign
7017 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7018 // represents an undef address.
7019 CheckDI(
7020 isa<ValueAsMetadata>(RawAddr) ||
7021 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7022 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7023 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7024 visitValueAsMetadata(*VAM, F);
7025
7027 "invalid #dbg_assign address expression", &DVR,
7028 DVR.getRawAddressExpression(), BB, F);
7029 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7030
7031 // All of the linked instructions should be in the same function as DVR.
7032 for (Instruction *I : at::getAssignmentInsts(&DVR))
7033 CheckDI(DVR.getFunction() == I->getFunction(),
7034 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7035 }
7036
7037 // This check is redundant with one in visitLocalVariable().
7038 DILocalVariable *Var = DVR.getVariable();
7039 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7040 BB, F);
7041
7042 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7043 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7044 &DVR, DLNode, BB, F);
7045 DILocation *Loc = DVR.getDebugLoc();
7046
7047 // The scopes for variables and !dbg attachments must agree.
7048 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7049 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7050 if (!VarSP || !LocSP)
7051 return; // Broken scope chains are checked elsewhere.
7052
7053 CheckDI(VarSP == LocSP,
7054 "mismatched subprogram between #dbg record variable and DILocation",
7055 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7056 Loc->getScope()->getSubprogram(), BB, F);
7057
7058 verifyFnArgs(DVR);
7059}
7060
7061void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7062 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7063 auto *RetTy = cast<VectorType>(VPCast->getType());
7064 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7065 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7066 "VP cast intrinsic first argument and result vector lengths must be "
7067 "equal",
7068 *VPCast);
7069
7070 switch (VPCast->getIntrinsicID()) {
7071 default:
7072 llvm_unreachable("Unknown VP cast intrinsic");
7073 case Intrinsic::vp_trunc:
7074 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7075 "llvm.vp.trunc intrinsic first argument and result element type "
7076 "must be integer",
7077 *VPCast);
7078 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7079 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7080 "larger than the bit size of the return type",
7081 *VPCast);
7082 break;
7083 case Intrinsic::vp_zext:
7084 case Intrinsic::vp_sext:
7085 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7086 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7087 "element type must be integer",
7088 *VPCast);
7089 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7090 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7091 "argument must be smaller than the bit size of the return type",
7092 *VPCast);
7093 break;
7094 case Intrinsic::vp_fptoui:
7095 case Intrinsic::vp_fptosi:
7096 case Intrinsic::vp_lrint:
7097 case Intrinsic::vp_llrint:
7098 Check(
7099 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7100 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7101 "type must be floating-point and result element type must be integer",
7102 *VPCast);
7103 break;
7104 case Intrinsic::vp_uitofp:
7105 case Intrinsic::vp_sitofp:
7106 Check(
7107 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7108 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7109 "type must be integer and result element type must be floating-point",
7110 *VPCast);
7111 break;
7112 case Intrinsic::vp_fptrunc:
7113 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7114 "llvm.vp.fptrunc intrinsic first argument and result element type "
7115 "must be floating-point",
7116 *VPCast);
7117 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7118 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7119 "larger than the bit size of the return type",
7120 *VPCast);
7121 break;
7122 case Intrinsic::vp_fpext:
7123 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7124 "llvm.vp.fpext intrinsic first argument and result element type "
7125 "must be floating-point",
7126 *VPCast);
7127 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7128 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7129 "smaller than the bit size of the return type",
7130 *VPCast);
7131 break;
7132 case Intrinsic::vp_ptrtoint:
7133 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7134 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7135 "pointer and result element type must be integer",
7136 *VPCast);
7137 break;
7138 case Intrinsic::vp_inttoptr:
7139 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7140 "llvm.vp.inttoptr intrinsic first argument element type must be "
7141 "integer and result element type must be pointer",
7142 *VPCast);
7143 break;
7144 }
7145 }
7146
7147 switch (VPI.getIntrinsicID()) {
7148 case Intrinsic::vp_fcmp: {
7149 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7151 "invalid predicate for VP FP comparison intrinsic", &VPI);
7152 break;
7153 }
7154 case Intrinsic::vp_icmp: {
7155 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7157 "invalid predicate for VP integer comparison intrinsic", &VPI);
7158 break;
7159 }
7160 case Intrinsic::vp_is_fpclass: {
7161 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7162 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7163 "unsupported bits for llvm.vp.is.fpclass test mask");
7164 break;
7165 }
7166 case Intrinsic::experimental_vp_splice: {
7167 VectorType *VecTy = cast<VectorType>(VPI.getType());
7168 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7169 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7170 if (VPI.getParent() && VPI.getParent()->getParent()) {
7171 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7172 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7173 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7174 }
7175 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7176 (Idx >= 0 && Idx < KnownMinNumElements),
7177 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7178 "known minimum number of elements in the vector. For scalable "
7179 "vectors the minimum number of elements is determined from "
7180 "vscale_range.",
7181 &VPI);
7182 break;
7183 }
7184 }
7185}
7186
7187void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7188 unsigned NumOperands = FPI.getNonMetadataArgCount();
7189 bool HasRoundingMD =
7191
7192 // Add the expected number of metadata operands.
7193 NumOperands += (1 + HasRoundingMD);
7194
7195 // Compare intrinsics carry an extra predicate metadata operand.
7197 NumOperands += 1;
7198 Check((FPI.arg_size() == NumOperands),
7199 "invalid arguments for constrained FP intrinsic", &FPI);
7200
7201 switch (FPI.getIntrinsicID()) {
7202 case Intrinsic::experimental_constrained_lrint:
7203 case Intrinsic::experimental_constrained_llrint: {
7204 Type *ValTy = FPI.getArgOperand(0)->getType();
7205 Type *ResultTy = FPI.getType();
7206 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7207 "Intrinsic does not support vectors", &FPI);
7208 break;
7209 }
7210
7211 case Intrinsic::experimental_constrained_lround:
7212 case Intrinsic::experimental_constrained_llround: {
7213 Type *ValTy = FPI.getArgOperand(0)->getType();
7214 Type *ResultTy = FPI.getType();
7215 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7216 "Intrinsic does not support vectors", &FPI);
7217 break;
7218 }
7219
7220 case Intrinsic::experimental_constrained_fcmp:
7221 case Intrinsic::experimental_constrained_fcmps: {
7222 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7224 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7225 break;
7226 }
7227
7228 case Intrinsic::experimental_constrained_fptosi:
7229 case Intrinsic::experimental_constrained_fptoui: {
7230 Value *Operand = FPI.getArgOperand(0);
7231 ElementCount SrcEC;
7232 Check(Operand->getType()->isFPOrFPVectorTy(),
7233 "Intrinsic first argument must be floating point", &FPI);
7234 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7235 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7236 }
7237
7238 Operand = &FPI;
7239 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7240 "Intrinsic first argument and result disagree on vector use", &FPI);
7241 Check(Operand->getType()->isIntOrIntVectorTy(),
7242 "Intrinsic result must be an integer", &FPI);
7243 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7244 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7245 "Intrinsic first argument and result vector lengths must be equal",
7246 &FPI);
7247 }
7248 break;
7249 }
7250
7251 case Intrinsic::experimental_constrained_sitofp:
7252 case Intrinsic::experimental_constrained_uitofp: {
7253 Value *Operand = FPI.getArgOperand(0);
7254 ElementCount SrcEC;
7255 Check(Operand->getType()->isIntOrIntVectorTy(),
7256 "Intrinsic first argument must be integer", &FPI);
7257 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7258 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7259 }
7260
7261 Operand = &FPI;
7262 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7263 "Intrinsic first argument and result disagree on vector use", &FPI);
7264 Check(Operand->getType()->isFPOrFPVectorTy(),
7265 "Intrinsic result must be a floating point", &FPI);
7266 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7267 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7268 "Intrinsic first argument and result vector lengths must be equal",
7269 &FPI);
7270 }
7271 break;
7272 }
7273
7274 case Intrinsic::experimental_constrained_fptrunc:
7275 case Intrinsic::experimental_constrained_fpext: {
7276 Value *Operand = FPI.getArgOperand(0);
7277 Type *OperandTy = Operand->getType();
7278 Value *Result = &FPI;
7279 Type *ResultTy = Result->getType();
7280 Check(OperandTy->isFPOrFPVectorTy(),
7281 "Intrinsic first argument must be FP or FP vector", &FPI);
7282 Check(ResultTy->isFPOrFPVectorTy(),
7283 "Intrinsic result must be FP or FP vector", &FPI);
7284 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7285 "Intrinsic first argument and result disagree on vector use", &FPI);
7286 if (OperandTy->isVectorTy()) {
7287 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7288 cast<VectorType>(ResultTy)->getElementCount(),
7289 "Intrinsic first argument and result vector lengths must be equal",
7290 &FPI);
7291 }
7292 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7293 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7294 "Intrinsic first argument's type must be larger than result type",
7295 &FPI);
7296 } else {
7297 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7298 "Intrinsic first argument's type must be smaller than result type",
7299 &FPI);
7300 }
7301 break;
7302 }
7303
7304 default:
7305 break;
7306 }
7307
7308 // If a non-metadata argument is passed in a metadata slot then the
7309 // error will be caught earlier when the incorrect argument doesn't
7310 // match the specification in the intrinsic call table. Thus, no
7311 // argument type check is needed here.
7312
7313 Check(FPI.getExceptionBehavior().has_value(),
7314 "invalid exception behavior argument", &FPI);
7315 if (HasRoundingMD) {
7316 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7317 &FPI);
7318 }
7319}
7320
7321void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7322 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7323 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7324
7325 // We don't know whether this intrinsic verified correctly.
7326 if (!V || !E || !E->isValid())
7327 return;
7328
7329 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7330 auto Fragment = E->getFragmentInfo();
7331 if (!Fragment)
7332 return;
7333
7334 // The frontend helps out GDB by emitting the members of local anonymous
7335 // unions as artificial local variables with shared storage. When SROA splits
7336 // the storage for artificial local variables that are smaller than the entire
7337 // union, the overhang piece will be outside of the allotted space for the
7338 // variable and this check fails.
7339 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7340 if (V->isArtificial())
7341 return;
7342
7343 verifyFragmentExpression(*V, *Fragment, &DVR);
7344}
7345
7346template <typename ValueOrMetadata>
7347void Verifier::verifyFragmentExpression(const DIVariable &V,
7349 ValueOrMetadata *Desc) {
7350 // If there's no size, the type is broken, but that should be checked
7351 // elsewhere.
7352 auto VarSize = V.getSizeInBits();
7353 if (!VarSize)
7354 return;
7355
7356 unsigned FragSize = Fragment.SizeInBits;
7357 unsigned FragOffset = Fragment.OffsetInBits;
7358 CheckDI(FragSize + FragOffset <= *VarSize,
7359 "fragment is larger than or outside of variable", Desc, &V);
7360 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7361}
7362
7363void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7364 // This function does not take the scope of noninlined function arguments into
7365 // account. Don't run it if current function is nodebug, because it may
7366 // contain inlined debug intrinsics.
7367 if (!HasDebugInfo)
7368 return;
7369
7370 // For performance reasons only check non-inlined ones.
7371 if (DVR.getDebugLoc()->getInlinedAt())
7372 return;
7373
7374 DILocalVariable *Var = DVR.getVariable();
7375 CheckDI(Var, "#dbg record without variable");
7376
7377 unsigned ArgNo = Var->getArg();
7378 if (!ArgNo)
7379 return;
7380
7381 // Verify there are no duplicate function argument debug info entries.
7382 // These will cause hard-to-debug assertions in the DWARF backend.
7383 if (DebugFnArgs.size() < ArgNo)
7384 DebugFnArgs.resize(ArgNo, nullptr);
7385
7386 auto *Prev = DebugFnArgs[ArgNo - 1];
7387 DebugFnArgs[ArgNo - 1] = Var;
7388 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7389 Prev, Var);
7390}
7391
7392void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7393 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7394
7395 // We don't know whether this intrinsic verified correctly.
7396 if (!E || !E->isValid())
7397 return;
7398
7400 Value *VarValue = DVR.getVariableLocationOp(0);
7401 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7402 return;
7403 // We allow EntryValues for swift async arguments, as they have an
7404 // ABI-guarantee to be turned into a specific register.
7405 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7406 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7407 return;
7408 }
7409
7410 CheckDI(!E->isEntryValue(),
7411 "Entry values are only allowed in MIR unless they target a "
7412 "swiftasync Argument",
7413 &DVR);
7414}
7415
7416void Verifier::verifyCompileUnits() {
7417 // When more than one Module is imported into the same context, such as during
7418 // an LTO build before linking the modules, ODR type uniquing may cause types
7419 // to point to a different CU. This check does not make sense in this case.
7420 if (M.getContext().isODRUniquingDebugTypes())
7421 return;
7422 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7423 SmallPtrSet<const Metadata *, 2> Listed;
7424 if (CUs)
7425 Listed.insert_range(CUs->operands());
7426 for (const auto *CU : CUVisited)
7427 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7428 CUVisited.clear();
7429}
7430
7431void Verifier::verifyDeoptimizeCallingConvs() {
7432 if (DeoptimizeDeclarations.empty())
7433 return;
7434
7435 const Function *First = DeoptimizeDeclarations[0];
7436 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7437 Check(First->getCallingConv() == F->getCallingConv(),
7438 "All llvm.experimental.deoptimize declarations must have the same "
7439 "calling convention",
7440 First, F);
7441 }
7442}
7443
7444void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7445 const OperandBundleUse &BU) {
7446 FunctionType *FTy = Call.getFunctionType();
7447
7448 Check((FTy->getReturnType()->isPointerTy() ||
7449 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7450 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7451 "function returning a pointer or a non-returning function that has a "
7452 "void return type",
7453 Call);
7454
7455 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7456 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7457 "an argument",
7458 Call);
7459
7460 auto *Fn = cast<Function>(BU.Inputs.front());
7461 Intrinsic::ID IID = Fn->getIntrinsicID();
7462
7463 if (IID) {
7464 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7465 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7466 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7467 "invalid function argument", Call);
7468 } else {
7469 StringRef FnName = Fn->getName();
7470 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7471 FnName == "objc_claimAutoreleasedReturnValue" ||
7472 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7473 "invalid function argument", Call);
7474 }
7475}
7476
7477void Verifier::verifyNoAliasScopeDecl() {
7478 if (NoAliasScopeDecls.empty())
7479 return;
7480
7481 // only a single scope must be declared at a time.
7482 for (auto *II : NoAliasScopeDecls) {
7483 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7484 "Not a llvm.experimental.noalias.scope.decl ?");
7485 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7487 Check(ScopeListMV != nullptr,
7488 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7489 "argument",
7490 II);
7491
7492 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7493 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7494 Check(ScopeListMD->getNumOperands() == 1,
7495 "!id.scope.list must point to a list with a single scope", II);
7496 visitAliasScopeListMetadata(ScopeListMD);
7497 }
7498
7499 // Only check the domination rule when requested. Once all passes have been
7500 // adapted this option can go away.
7502 return;
7503
7504 // Now sort the intrinsics based on the scope MDNode so that declarations of
7505 // the same scopes are next to each other.
7506 auto GetScope = [](IntrinsicInst *II) {
7507 const auto *ScopeListMV = cast<MetadataAsValue>(
7509 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7510 };
7511
7512 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7513 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7514 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7515 return GetScope(Lhs) < GetScope(Rhs);
7516 };
7517
7518 llvm::sort(NoAliasScopeDecls, Compare);
7519
7520 // Go over the intrinsics and check that for the same scope, they are not
7521 // dominating each other.
7522 auto ItCurrent = NoAliasScopeDecls.begin();
7523 while (ItCurrent != NoAliasScopeDecls.end()) {
7524 auto CurScope = GetScope(*ItCurrent);
7525 auto ItNext = ItCurrent;
7526 do {
7527 ++ItNext;
7528 } while (ItNext != NoAliasScopeDecls.end() &&
7529 GetScope(*ItNext) == CurScope);
7530
7531 // [ItCurrent, ItNext) represents the declarations for the same scope.
7532 // Ensure they are not dominating each other.. but only if it is not too
7533 // expensive.
7534 if (ItNext - ItCurrent < 32)
7535 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7536 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7537 if (I != J)
7538 Check(!DT.dominates(I, J),
7539 "llvm.experimental.noalias.scope.decl dominates another one "
7540 "with the same scope",
7541 I);
7542 ItCurrent = ItNext;
7543 }
7544}
7545
7546//===----------------------------------------------------------------------===//
7547// Implement the public interfaces to this file...
7548//===----------------------------------------------------------------------===//
7549
7551 Function &F = const_cast<Function &>(f);
7552
7553 // Don't use a raw_null_ostream. Printing IR is expensive.
7554 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7555
7556 // Note that this function's return value is inverted from what you would
7557 // expect of a function called "verify".
7558 return !V.verify(F);
7559}
7560
7562 bool *BrokenDebugInfo) {
7563 // Don't use a raw_null_ostream. Printing IR is expensive.
7564 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7565
7566 bool Broken = false;
7567 for (const Function &F : M)
7568 Broken |= !V.verify(F);
7569
7570 Broken |= !V.verify();
7571 if (BrokenDebugInfo)
7572 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7573 // Note that this function's return value is inverted from what you would
7574 // expect of a function called "verify".
7575 return Broken;
7576}
7577
7578namespace {
7579
7580struct VerifierLegacyPass : public FunctionPass {
7581 static char ID;
7582
7583 std::unique_ptr<Verifier> V;
7584 bool FatalErrors = true;
7585
7586 VerifierLegacyPass() : FunctionPass(ID) {
7588 }
7589 explicit VerifierLegacyPass(bool FatalErrors)
7590 : FunctionPass(ID),
7591 FatalErrors(FatalErrors) {
7593 }
7594
7595 bool doInitialization(Module &M) override {
7596 V = std::make_unique<Verifier>(
7597 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7598 return false;
7599 }
7600
7601 bool runOnFunction(Function &F) override {
7602 if (!V->verify(F) && FatalErrors) {
7603 errs() << "in function " << F.getName() << '\n';
7604 report_fatal_error("Broken function found, compilation aborted!");
7605 }
7606 return false;
7607 }
7608
7609 bool doFinalization(Module &M) override {
7610 bool HasErrors = false;
7611 for (Function &F : M)
7612 if (F.isDeclaration())
7613 HasErrors |= !V->verify(F);
7614
7615 HasErrors |= !V->verify();
7616 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7617 report_fatal_error("Broken module found, compilation aborted!");
7618 return false;
7619 }
7620
7621 void getAnalysisUsage(AnalysisUsage &AU) const override {
7622 AU.setPreservesAll();
7623 }
7624};
7625
7626} // end anonymous namespace
7627
7628/// Helper to issue failure from the TBAA verification
7629template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7630 if (Diagnostic)
7631 return Diagnostic->CheckFailed(Args...);
7632}
7633
7634#define CheckTBAA(C, ...) \
7635 do { \
7636 if (!(C)) { \
7637 CheckFailed(__VA_ARGS__); \
7638 return false; \
7639 } \
7640 } while (false)
7641
7642/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7643/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7644/// struct-type node describing an aggregate data structure (like a struct).
7645TBAAVerifier::TBAABaseNodeSummary
7646TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
7647 bool IsNewFormat) {
7648 if (BaseNode->getNumOperands() < 2) {
7649 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
7650 return {true, ~0u};
7651 }
7652
7653 auto Itr = TBAABaseNodes.find(BaseNode);
7654 if (Itr != TBAABaseNodes.end())
7655 return Itr->second;
7656
7657 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7658 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7659 (void)InsertResult;
7660 assert(InsertResult.second && "We just checked!");
7661 return Result;
7662}
7663
7664TBAAVerifier::TBAABaseNodeSummary
7665TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
7666 bool IsNewFormat) {
7667 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7668
7669 if (BaseNode->getNumOperands() == 2) {
7670 // Scalar nodes can only be accessed at offset 0.
7671 return isValidScalarTBAANode(BaseNode)
7672 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7673 : InvalidNode;
7674 }
7675
7676 if (IsNewFormat) {
7677 if (BaseNode->getNumOperands() % 3 != 0) {
7678 CheckFailed("Access tag nodes must have the number of operands that is a "
7679 "multiple of 3!", BaseNode);
7680 return InvalidNode;
7681 }
7682 } else {
7683 if (BaseNode->getNumOperands() % 2 != 1) {
7684 CheckFailed("Struct tag nodes must have an odd number of operands!",
7685 BaseNode);
7686 return InvalidNode;
7687 }
7688 }
7689
7690 // Check the type size field.
7691 if (IsNewFormat) {
7692 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7693 BaseNode->getOperand(1));
7694 if (!TypeSizeNode) {
7695 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
7696 return InvalidNode;
7697 }
7698 }
7699
7700 // Check the type name field. In the new format it can be anything.
7701 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7702 CheckFailed("Struct tag nodes have a string as their first operand",
7703 BaseNode);
7704 return InvalidNode;
7705 }
7706
7707 bool Failed = false;
7708
7709 std::optional<APInt> PrevOffset;
7710 unsigned BitWidth = ~0u;
7711
7712 // We've already checked that BaseNode is not a degenerate root node with one
7713 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7714 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7715 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7716 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7717 Idx += NumOpsPerField) {
7718 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7719 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7720 if (!isa<MDNode>(FieldTy)) {
7721 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
7722 Failed = true;
7723 continue;
7724 }
7725
7726 auto *OffsetEntryCI =
7728 if (!OffsetEntryCI) {
7729 CheckFailed("Offset entries must be constants!", &I, BaseNode);
7730 Failed = true;
7731 continue;
7732 }
7733
7734 if (BitWidth == ~0u)
7735 BitWidth = OffsetEntryCI->getBitWidth();
7736
7737 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7738 CheckFailed(
7739 "Bitwidth between the offsets and struct type entries must match", &I,
7740 BaseNode);
7741 Failed = true;
7742 continue;
7743 }
7744
7745 // NB! As far as I can tell, we generate a non-strictly increasing offset
7746 // sequence only from structs that have zero size bit fields. When
7747 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7748 // pick the field lexically the latest in struct type metadata node. This
7749 // mirrors the actual behavior of the alias analysis implementation.
7750 bool IsAscending =
7751 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7752
7753 if (!IsAscending) {
7754 CheckFailed("Offsets must be increasing!", &I, BaseNode);
7755 Failed = true;
7756 }
7757
7758 PrevOffset = OffsetEntryCI->getValue();
7759
7760 if (IsNewFormat) {
7761 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7762 BaseNode->getOperand(Idx + 2));
7763 if (!MemberSizeNode) {
7764 CheckFailed("Member size entries must be constants!", &I, BaseNode);
7765 Failed = true;
7766 continue;
7767 }
7768 }
7769 }
7770
7771 return Failed ? InvalidNode
7772 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7773}
7774
7775static bool IsRootTBAANode(const MDNode *MD) {
7776 return MD->getNumOperands() < 2;
7777}
7778
7779static bool IsScalarTBAANodeImpl(const MDNode *MD,
7781 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7782 return false;
7783
7784 if (!isa<MDString>(MD->getOperand(0)))
7785 return false;
7786
7787 if (MD->getNumOperands() == 3) {
7789 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7790 return false;
7791 }
7792
7793 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7794 return Parent && Visited.insert(Parent).second &&
7795 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7796}
7797
7798bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7799 auto ResultIt = TBAAScalarNodes.find(MD);
7800 if (ResultIt != TBAAScalarNodes.end())
7801 return ResultIt->second;
7802
7803 SmallPtrSet<const MDNode *, 4> Visited;
7804 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7805 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7806 (void)InsertResult;
7807 assert(InsertResult.second && "Just checked!");
7808
7809 return Result;
7810}
7811
7812/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7813/// Offset in place to be the offset within the field node returned.
7814///
7815/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7816MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
7817 const MDNode *BaseNode,
7818 APInt &Offset,
7819 bool IsNewFormat) {
7820 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7821
7822 // Scalar nodes have only one possible "field" -- their parent in the access
7823 // hierarchy. Offset must be zero at this point, but our caller is supposed
7824 // to check that.
7825 if (BaseNode->getNumOperands() == 2)
7826 return cast<MDNode>(BaseNode->getOperand(1));
7827
7828 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7829 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7830 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7831 Idx += NumOpsPerField) {
7832 auto *OffsetEntryCI =
7833 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7834 if (OffsetEntryCI->getValue().ugt(Offset)) {
7835 if (Idx == FirstFieldOpNo) {
7836 CheckFailed("Could not find TBAA parent in struct type node", &I,
7837 BaseNode, &Offset);
7838 return nullptr;
7839 }
7840
7841 unsigned PrevIdx = Idx - NumOpsPerField;
7842 auto *PrevOffsetEntryCI =
7843 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7844 Offset -= PrevOffsetEntryCI->getValue();
7845 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7846 }
7847 }
7848
7849 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7850 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7851 BaseNode->getOperand(LastIdx + 1));
7852 Offset -= LastOffsetEntryCI->getValue();
7853 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7854}
7855
7857 if (!Type || Type->getNumOperands() < 3)
7858 return false;
7859
7860 // In the new format type nodes shall have a reference to the parent type as
7861 // its first operand.
7862 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7863}
7864
7866 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands",
7867 &I, MD);
7868
7872 "This instruction shall not have a TBAA access tag!", &I);
7873
7874 bool IsStructPathTBAA =
7875 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7876
7877 CheckTBAA(IsStructPathTBAA,
7878 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7879 &I);
7880
7881 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7882 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7883
7884 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7885
7886 if (IsNewFormat) {
7887 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7888 "Access tag metadata must have either 4 or 5 operands", &I, MD);
7889 } else {
7890 CheckTBAA(MD->getNumOperands() < 5,
7891 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
7892 }
7893
7894 // Check the access size field.
7895 if (IsNewFormat) {
7896 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7897 MD->getOperand(3));
7898 CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
7899 }
7900
7901 // Check the immutability flag.
7902 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7903 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7904 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7905 MD->getOperand(ImmutabilityFlagOpNo));
7906 CheckTBAA(IsImmutableCI,
7907 "Immutability tag on struct tag metadata must be a constant", &I,
7908 MD);
7909 CheckTBAA(
7910 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7911 "Immutability part of the struct tag metadata must be either 0 or 1",
7912 &I, MD);
7913 }
7914
7915 CheckTBAA(BaseNode && AccessType,
7916 "Malformed struct tag metadata: base and access-type "
7917 "should be non-null and point to Metadata nodes",
7918 &I, MD, BaseNode, AccessType);
7919
7920 if (!IsNewFormat) {
7921 CheckTBAA(isValidScalarTBAANode(AccessType),
7922 "Access type node must be a valid scalar type", &I, MD,
7923 AccessType);
7924 }
7925
7927 CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
7928
7929 APInt Offset = OffsetCI->getValue();
7930 bool SeenAccessTypeInPath = false;
7931
7932 SmallPtrSet<MDNode *, 4> StructPath;
7933
7934 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7935 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
7936 IsNewFormat)) {
7937 if (!StructPath.insert(BaseNode).second) {
7938 CheckFailed("Cycle detected in struct path", &I, MD);
7939 return false;
7940 }
7941
7942 bool Invalid;
7943 unsigned BaseNodeBitWidth;
7944 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
7945 IsNewFormat);
7946
7947 // If the base node is invalid in itself, then we've already printed all the
7948 // errors we wanted to print.
7949 if (Invalid)
7950 return false;
7951
7952 SeenAccessTypeInPath |= BaseNode == AccessType;
7953
7954 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
7955 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
7956 &I, MD, &Offset);
7957
7958 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
7959 (BaseNodeBitWidth == 0 && Offset == 0) ||
7960 (IsNewFormat && BaseNodeBitWidth == ~0u),
7961 "Access bit-width not the same as description bit-width", &I, MD,
7962 BaseNodeBitWidth, Offset.getBitWidth());
7963
7964 if (IsNewFormat && SeenAccessTypeInPath)
7965 break;
7966 }
7967
7968 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
7969 MD);
7970 return true;
7971}
7972
7973char VerifierLegacyPass::ID = 0;
7974INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
7975
7977 return new VerifierLegacyPass(FatalErrors);
7978}
7979
7980AnalysisKey VerifierAnalysis::Key;
7987
7992
7994 auto Res = AM.getResult<VerifierAnalysis>(M);
7995 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
7996 report_fatal_error("Broken module found, compilation aborted!");
7997
7998 return PreservedAnalyses::all();
7999}
8000
8002 auto res = AM.getResult<VerifierAnalysis>(F);
8003 if (res.IRBroken && FatalErrors)
8004 report_fatal_error("Broken function found, compilation aborted!");
8005
8006 return PreservedAnalyses::all();
8007}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:678
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:719
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
bool isFiniteNonZero() const
Definition APFloat.h:1459
bool isNegative() const
Definition APFloat.h:1449
const fltSemantics & getSemantics() const
Definition APFloat.h:1457
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1201
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:380
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:417
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:440
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1562
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:399
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:784
bool isIntPredicate() const
Definition InstrTypes.h:785
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:778
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:226
bool isNegative() const
Definition Constants.h:209
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:214
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1072
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1059
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1062
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1065
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI Function * getFunction()
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:291
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:187
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:165
bool empty() const
Definition DenseMap.h:107
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:214
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:831
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:652
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:316
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1077
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1445
bool isTemporary() const
Definition Metadata.h:1261
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1443
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1451
bool isDistinct() const
Definition Metadata.h:1260
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1257
LLVMContext & getContext() const
Definition Metadata.h:1241
bool equalsStr(StringRef Str) const
Definition Metadata.h:921
Metadata * get() const
Definition Metadata.h:928
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:617
static LLVM_ABI bool isTagMD(const Metadata *MD)
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:111
Metadata * getMetadata() const
Definition Metadata.h:200
Root of the metadata hierarchy.
Definition Metadata.h:63
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:103
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
iterator_range< op_iterator > operands()
Definition Metadata.h:1849
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:480
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:269
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:151
static constexpr size_t npos
Definition StringRef.h:57
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:719
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:441
LLVM_ABI bool visitTBAAMetadata(Instruction &I, const MDNode *MD)
Visit an instruction and return true if it is valid, return false if an invalid TBAA is attached.
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:75
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:91
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:62
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1058
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:154
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:497
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:812
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1101
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:108
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:156
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:359
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:240
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:241
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:39
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:804
@ DW_MACINFO_start_file
Definition Dwarf.h:805
@ DW_MACINFO_define
Definition Dwarf.h:803
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:707
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:694
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:666
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:310
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:262
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1707
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:833
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2454
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2118
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:293
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:682
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:288
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1632
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:24
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1879
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:85
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * BranchWeights
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:304
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:156
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:150
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:297
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:152
LLVMContext & Context
Definition Verifier.cpp:147
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:154
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:286
const Module & M
Definition Verifier.cpp:143
const DataLayout & DL
Definition Verifier.cpp:146
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:313
const Triple & TT
Definition Verifier.cpp:145
ModuleSlotTracker MST
Definition Verifier.cpp:144