95#define DEBUG_TYPE "asan"
101 std::numeric_limits<uint64_t>::max();
142 "__asan_unregister_image_globals";
155 "__asan_stack_malloc_always_";
169 "__asan_option_detect_stack_use_after_return";
172 "__asan_shadow_memory_dynamic_address";
198 "asan-kernel",
cl::desc(
"Enable KernelAddressSanitizer instrumentation"),
203 cl::desc(
"Enable recovery mode (continue-after-error)."),
207 "asan-guard-against-version-mismatch",
213 cl::desc(
"instrument read instructions"),
217 "asan-instrument-writes",
cl::desc(
"instrument write instructions"),
226 "asan-instrument-atomics",
236 "asan-always-slow-path",
241 "asan-force-dynamic-shadow",
242 cl::desc(
"Load shadow address into a local variable for each function"),
247 cl::desc(
"Access dynamic shadow through an ifunc global on "
248 "platforms that support this"),
252 "asan-with-ifunc-suppress-remat",
253 cl::desc(
"Suppress rematerialization of dynamic shadow address by passing "
254 "it through inline asm in prologue."),
262 "asan-max-ins-per-bb",
cl::init(10000),
263 cl::desc(
"maximal number of instructions to instrument in any given BB"),
270 "asan-max-inline-poisoning-size",
272 "Inline shadow poisoning for blocks up to the given size in bytes."),
276 "asan-use-after-return",
277 cl::desc(
"Sets the mode of detection for stack-use-after-return."),
280 "Never detect stack use after return."),
283 "Detect stack use after return if "
284 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
286 "Always detect stack use after return.")),
290 cl::desc(
"Create redzones for byval "
291 "arguments (extra copy "
296 cl::desc(
"Check stack-use-after-scope"),
305 cl::desc(
"Handle C++ initializer order"),
309 "asan-detect-invalid-pointer-pair",
314 "asan-detect-invalid-pointer-cmp",
319 "asan-detect-invalid-pointer-sub",
324 "asan-realign-stack",
325 cl::desc(
"Realign stack to the value of this flag (power of two)"),
329 "asan-instrumentation-with-call-threshold",
330 cl::desc(
"If the function being instrumented contains more than "
331 "this number of memory accesses, use callbacks instead of "
332 "inline checks (-1 means never use callbacks)."),
336 "asan-memory-access-callback-prefix",
341 "asan-kernel-mem-intrinsic-prefix",
347 cl::desc(
"instrument dynamic allocas"),
351 "asan-skip-promotable-allocas",
356 "asan-constructor-kind",
357 cl::desc(
"Sets the ASan constructor kind"),
360 "Use global constructors")),
367 cl::desc(
"scale of asan shadow mapping"),
372 cl::desc(
"offset of asan shadow mapping [EXPERIMENTAL]"),
386 "asan-opt-same-temp",
cl::desc(
"Instrument the same temp just once"),
390 cl::desc(
"Don't instrument scalar globals"),
394 "asan-opt-stack",
cl::desc(
"Don't instrument scalar stack variables"),
398 "asan-stack-dynamic-alloca",
403 "asan-force-experiment",
409 cl::desc(
"Use private aliases for global variables"),
414 cl::desc(
"Use odr indicators to improve ODR reporting"),
419 cl::desc(
"Use linker features to support dead "
420 "code stripping of globals"),
427 cl::desc(
"Place ASan constructors in comdat sections"),
431 "asan-destructor-kind",
432 cl::desc(
"Sets the ASan destructor kind. The default is to use the value "
433 "provided to the pass constructor"),
436 "Use global destructors")),
456STATISTIC(NumInstrumentedReads,
"Number of instrumented reads");
457STATISTIC(NumInstrumentedWrites,
"Number of instrumented writes");
459 "Number of optimized accesses to global vars");
461 "Number of optimized accesses to stack vars");
470struct ShadowMapping {
481 bool IsAndroid = TargetTriple.
isAndroid();
484 bool IsMacOS = TargetTriple.
isMacOSX();
487 bool IsPS = TargetTriple.
isPS();
493 bool IsMIPSN32ABI = TargetTriple.
isABIN32();
494 bool IsMIPS32 = TargetTriple.
isMIPS32();
495 bool IsMIPS64 = TargetTriple.
isMIPS64();
496 bool IsArmOrThumb = TargetTriple.
isARM() || TargetTriple.
isThumb();
503 bool IsAMDGPU = TargetTriple.
isAMDGPU();
505 bool IsWasm = TargetTriple.
isWasm();
507 ShadowMapping Mapping;
514 if (LongSize == 32) {
517 else if (IsMIPSN32ABI)
542 else if (IsFreeBSD && IsAArch64)
544 else if (IsFreeBSD && !IsMIPS64) {
549 }
else if (IsNetBSD) {
556 else if (IsLinux && IsX86_64) {
562 }
else if (IsWindows && IsX86_64) {
568 else if (IsMacOS && IsAArch64)
572 else if (IsLoongArch64)
579 else if (IsHaiku && IsX86_64)
599 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
600 !IsRISCV64 && !IsLoongArch64 &&
601 !(Mapping.Offset & (Mapping.Offset - 1)) &&
603 Mapping.InGlobal =
ClWithIfunc && IsAndroid && IsArmOrThumb;
611 int *MappingScale,
bool *OrShadowOffset) {
613 *ShadowBase = Mapping.Offset;
614 *MappingScale = Mapping.Scale;
615 *OrShadowOffset = Mapping.OrShadowOffset;
634 if (!
F.doesNotAccessMemory()) {
635 bool WritesMemory = !
F.onlyReadsMemory();
636 bool ReadsMemory = !
F.onlyWritesMemory();
637 if ((WritesMemory && !ReadsMemory) ||
F.onlyAccessesArgMemory()) {
638 F.removeFnAttr(Attribute::Memory);
644 if (
A.hasAttribute(Attribute::WriteOnly)) {
645 A.removeAttr(Attribute::WriteOnly);
653 F.addFnAttr(Attribute::NoBuiltin);
676 return std::max(32U, 1U << MappingScale);
695class RuntimeCallInserter {
697 bool TrackInsertedCalls =
false;
701 RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {
703 auto Personality = classifyEHPersonality(Fn.getPersonalityFn());
704 if (isScopedEHPersonality(Personality))
705 TrackInsertedCalls = true;
709 ~RuntimeCallInserter() {
710 if (InsertedCalls.empty())
712 assert(TrackInsertedCalls &&
"Calls were wrongly tracked");
714 DenseMap<BasicBlock *, ColorVector> BlockColors =
colorEHFunclets(*OwnerFn);
715 for (CallInst *CI : InsertedCalls) {
717 assert(BB &&
"Instruction doesn't belong to a BasicBlock");
719 "Instruction doesn't belong to the expected Function!");
727 if (Colors.
size() != 1) {
728 OwnerFn->getContext().emitError(
729 "Instruction's BasicBlock is not monochromatic");
736 if (EHPadIt != Color->end() && EHPadIt->isEHPad()) {
740 OB, CI->getIterator());
741 NewCall->copyMetadata(*CI);
742 CI->replaceAllUsesWith(NewCall);
743 CI->eraseFromParent();
748 CallInst *createRuntimeCall(
IRBuilder<> &IRB, FunctionCallee Callee,
750 const Twine &
Name =
"") {
753 CallInst *Inst = IRB.
CreateCall(Callee, Args, Name,
nullptr);
754 if (TrackInsertedCalls)
755 InsertedCalls.push_back(Inst);
761struct AddressSanitizer {
762 AddressSanitizer(
Module &M,
const StackSafetyGlobalInfo *SSGI,
763 int InstrumentationWithCallsThreshold,
764 uint32_t MaxInlinePoisoningSize,
bool CompileKernel =
false,
765 bool Recover =
false,
bool UseAfterScope =
false,
767 AsanDetectStackUseAfterReturnMode::Runtime)
776 InstrumentationWithCallsThreshold(
779 : InstrumentationWithCallsThreshold),
782 : MaxInlinePoisoningSize) {
783 C = &(M.getContext());
784 DL = &M.getDataLayout();
785 LongSize = M.getDataLayout().getPointerSizeInBits();
786 IntptrTy = Type::getIntNTy(*C, LongSize);
787 PtrTy = PointerType::getUnqual(*C);
788 Int32Ty = Type::getInt32Ty(*C);
789 TargetTriple = M.getTargetTriple();
793 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
801 bool isInterestingAlloca(
const AllocaInst &AI);
803 bool ignoreAccess(Instruction *Inst,
Value *
Ptr);
805 Instruction *
I, SmallVectorImpl<InterestingMemoryOperand> &Interesting,
806 const TargetTransformInfo *
TTI);
808 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
809 InterestingMemoryOperand &O,
bool UseCalls,
810 const DataLayout &DL, RuntimeCallInserter &RTCI);
811 void instrumentPointerComparisonOrSubtraction(Instruction *
I,
812 RuntimeCallInserter &RTCI);
814 Value *Addr, MaybeAlign Alignment,
815 uint32_t TypeStoreSize,
bool IsWrite,
816 Value *SizeArgument,
bool UseCalls, uint32_t Exp,
817 RuntimeCallInserter &RTCI);
818 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
819 Instruction *InsertBefore,
Value *Addr,
820 uint32_t TypeStoreSize,
bool IsWrite,
821 Value *SizeArgument);
824 void instrumentUnusualSizeOrAlignment(Instruction *
I,
825 Instruction *InsertBefore,
Value *Addr,
826 TypeSize TypeStoreSize,
bool IsWrite,
827 Value *SizeArgument,
bool UseCalls,
829 RuntimeCallInserter &RTCI);
830 void instrumentMaskedLoadOrStore(AddressSanitizer *
Pass,
const DataLayout &DL,
833 MaybeAlign Alignment,
unsigned Granularity,
834 Type *OpType,
bool IsWrite,
835 Value *SizeArgument,
bool UseCalls,
836 uint32_t Exp, RuntimeCallInserter &RTCI);
838 Value *ShadowValue, uint32_t TypeStoreSize);
840 bool IsWrite,
size_t AccessSizeIndex,
841 Value *SizeArgument, uint32_t Exp,
842 RuntimeCallInserter &RTCI);
843 void instrumentMemIntrinsic(MemIntrinsic *
MI, RuntimeCallInserter &RTCI);
845 bool suppressInstrumentationSiteForDebug(
int &Instrumented);
846 bool instrumentFunction(Function &
F,
const TargetLibraryInfo *TLI,
847 const TargetTransformInfo *
TTI);
848 bool maybeInsertAsanInitAtFunctionEntry(Function &
F);
849 bool maybeInsertDynamicShadowAtFunctionEntry(Function &
F);
850 void markEscapedLocalAllocas(Function &
F);
853 friend struct FunctionStackPoisoner;
855 void initializeCallbacks(
const TargetLibraryInfo *TLI);
857 bool LooksLikeCodeInBug11395(Instruction *
I);
858 bool GlobalIsLinkerInitialized(GlobalVariable *
G);
859 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
Value *Addr,
860 TypeSize TypeStoreSize)
const;
863 struct FunctionStateRAII {
864 AddressSanitizer *Pass;
866 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
867 assert(Pass->ProcessedAllocas.empty() &&
868 "last pass forgot to clear cache");
869 assert(!Pass->LocalDynamicShadow);
872 ~FunctionStateRAII() {
873 Pass->LocalDynamicShadow =
nullptr;
874 Pass->ProcessedAllocas.clear();
880 const DataLayout *DL;
890 ShadowMapping Mapping;
891 FunctionCallee AsanHandleNoReturnFunc;
892 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
900 FunctionCallee AsanErrorCallbackSized[2][2];
901 FunctionCallee AsanMemoryAccessCallbackSized[2][2];
903 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
904 Value *LocalDynamicShadow =
nullptr;
905 const StackSafetyGlobalInfo *SSGI;
906 DenseMap<const AllocaInst *, bool> ProcessedAllocas;
908 FunctionCallee AMDGPUAddressShared;
909 FunctionCallee AMDGPUAddressPrivate;
910 int InstrumentationWithCallsThreshold;
911 uint32_t MaxInlinePoisoningSize;
914class ModuleAddressSanitizer {
916 ModuleAddressSanitizer(
Module &M,
bool InsertVersionCheck,
917 bool CompileKernel =
false,
bool Recover =
false,
918 bool UseGlobalsGC =
true,
bool UseOdrIndicator =
true,
926 : InsertVersionCheck),
928 UseGlobalsGC(UseGlobalsGC &&
ClUseGlobalsGC && !this->CompileKernel),
943 UseCtorComdat(UseGlobalsGC &&
ClWithComdat && !this->CompileKernel),
944 DestructorKind(DestructorKind),
948 C = &(M.getContext());
949 int LongSize = M.getDataLayout().getPointerSizeInBits();
950 IntptrTy = Type::getIntNTy(*C, LongSize);
951 PtrTy = PointerType::getUnqual(*C);
952 TargetTriple = M.getTargetTriple();
957 assert(this->DestructorKind != AsanDtorKind::Invalid);
960 bool instrumentModule();
963 void initializeCallbacks();
965 void instrumentGlobals(
IRBuilder<> &IRB,
bool *CtorComdat);
972 const std::string &UniqueModuleId);
977 InstrumentGlobalsWithMetadataArray(
IRBuilder<> &IRB,
981 GlobalVariable *CreateMetadataGlobal(Constant *Initializer,
982 StringRef OriginalName);
983 void SetComdatForGlobalMetadata(GlobalVariable *
G, GlobalVariable *
Metadata,
984 StringRef InternalSuffix);
987 const GlobalVariable *getExcludedAliasedGlobal(
const GlobalAlias &GA)
const;
988 bool shouldInstrumentGlobal(GlobalVariable *
G)
const;
989 bool ShouldUseMachOGlobalsSection()
const;
990 StringRef getGlobalMetadataSection()
const;
991 void poisonOneInitializer(Function &GlobalInit);
992 void createInitializerPoisonCalls();
993 uint64_t getMinRedzoneSizeForGlobal()
const {
997 int GetAsanVersion()
const;
998 GlobalVariable *getOrCreateModuleName();
1002 bool InsertVersionCheck;
1005 bool UsePrivateAlias;
1006 bool UseOdrIndicator;
1013 Triple TargetTriple;
1014 ShadowMapping Mapping;
1015 FunctionCallee AsanPoisonGlobals;
1016 FunctionCallee AsanUnpoisonGlobals;
1017 FunctionCallee AsanRegisterGlobals;
1018 FunctionCallee AsanUnregisterGlobals;
1019 FunctionCallee AsanRegisterImageGlobals;
1020 FunctionCallee AsanUnregisterImageGlobals;
1021 FunctionCallee AsanRegisterElfGlobals;
1022 FunctionCallee AsanUnregisterElfGlobals;
1024 Function *AsanCtorFunction =
nullptr;
1025 Function *AsanDtorFunction =
nullptr;
1026 GlobalVariable *ModuleName =
nullptr;
1038struct FunctionStackPoisoner :
public InstVisitor<FunctionStackPoisoner> {
1040 AddressSanitizer &ASan;
1041 RuntimeCallInserter &RTCI;
1046 ShadowMapping Mapping;
1050 SmallVector<Instruction *, 8> RetVec;
1054 FunctionCallee AsanSetShadowFunc[0x100] = {};
1055 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1056 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
1059 struct AllocaPoisonCall {
1060 IntrinsicInst *InsBefore;
1070 AllocaInst *DynamicAllocaLayout =
nullptr;
1071 IntrinsicInst *LocalEscapeCall =
nullptr;
1073 bool HasInlineAsm =
false;
1074 bool HasReturnsTwiceCall =
false;
1077 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
1078 RuntimeCallInserter &RTCI)
1079 : F(F), ASan(ASan), RTCI(RTCI),
1081 IntptrTy(ASan.IntptrTy),
1083 Mapping(ASan.Mapping),
1091 copyArgsPassedByValToAllocas();
1096 if (AllocaVec.empty() && DynamicAllocaVec.empty())
return false;
1098 initializeCallbacks(*F.getParent());
1100 processDynamicAllocas();
1101 processStaticAllocas();
1112 void copyArgsPassedByValToAllocas();
1117 void processStaticAllocas();
1118 void processDynamicAllocas();
1120 void createDynamicAllocasInitStorage();
1125 void visitReturnInst(ReturnInst &RI) {
1126 if (CallInst *CI = RI.
getParent()->getTerminatingMustTailCall())
1127 RetVec.push_back(CI);
1129 RetVec.push_back(&RI);
1133 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1136 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1138 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1139 Value *SavedStack) {
1148 Intrinsic::get_dynamic_area_offset, {IntptrTy}, {});
1154 RTCI.createRuntimeCall(
1155 IRB, AsanAllocasUnpoisonFunc,
1156 {IRB.
CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1160 void unpoisonDynamicAllocas() {
1161 for (Instruction *Ret : RetVec)
1162 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1164 for (Instruction *StackRestoreInst : StackRestoreVec)
1165 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1166 StackRestoreInst->getOperand(0));
1179 void handleDynamicAllocaCall(AllocaInst *AI);
1182 void visitAllocaInst(AllocaInst &AI) {
1187 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1191 if (AllocaVec.empty())
1194 StaticAllocasToMoveUp.push_back(&AI);
1200 DynamicAllocaVec.push_back(&AI);
1202 AllocaVec.push_back(&AI);
1207 void visitIntrinsicInst(IntrinsicInst &
II) {
1209 if (
ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&
II);
1210 if (
ID == Intrinsic::localescape) LocalEscapeCall = &
II;
1211 if (!ASan.UseAfterScope)
1213 if (!
II.isLifetimeStartOrEnd())
1218 if (!AI || !ASan.isInterestingAlloca(*AI))
1228 bool DoPoison = (
ID == Intrinsic::lifetime_end);
1229 AllocaPoisonCall APC = {&
II, AI, *
Size, DoPoison};
1231 StaticAllocaPoisonCallVec.push_back(APC);
1233 DynamicAllocaPoisonCallVec.push_back(APC);
1236 void visitCallBase(CallBase &CB) {
1238 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1239 HasReturnsTwiceCall |= CI->canReturnTwice();
1244 void initializeCallbacks(
Module &M);
1249 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1251 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1254 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1255 ArrayRef<uint8_t> ShadowBytes,
size_t Begin,
1260 Value *createAllocaForLayout(
IRBuilder<> &IRB,
const ASanStackFrameLayout &L,
1263 Instruction *ThenTerm,
Value *ValueIfFalse);
1271 OS, MapClassName2PassName);
1273 if (Options.CompileKernel)
1275 if (Options.UseAfterScope)
1276 OS <<
"use-after-scope";
1284 : Options(Options), UseGlobalGC(UseGlobalGC),
1285 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1286 ConstructorKind(ConstructorKind) {}
1295 ModuleAddressSanitizer ModuleSanitizer(
1296 M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
1297 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1309 if (
F.getName().starts_with(
"__asan_"))
1311 if (
F.isPresplitCoroutine())
1313 AddressSanitizer FunctionSanitizer(
1314 M, SSGI, Options.InstrumentationWithCallsThreshold,
1315 Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
1316 Options.UseAfterScope, Options.UseAfterReturn);
1319 Modified |= FunctionSanitizer.instrumentFunction(
F, &TLI, &
TTI);
1321 Modified |= ModuleSanitizer.instrumentModule();
1342 if (
G->getName().starts_with(
"llvm.") ||
1344 G->getName().starts_with(
"__llvm_gcov_ctr") ||
1346 G->getName().starts_with(
"__llvm_rtti_proxy"))
1361 if (AddrSpace == 3 || AddrSpace == 5)
1368 Shadow = IRB.
CreateLShr(Shadow, Mapping.Scale);
1369 if (Mapping.Offset == 0)
return Shadow;
1372 if (LocalDynamicShadow)
1373 ShadowBase = LocalDynamicShadow;
1375 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1376 if (Mapping.OrShadowOffset)
1377 return IRB.
CreateOr(Shadow, ShadowBase);
1379 return IRB.
CreateAdd(Shadow, ShadowBase);
1384 RuntimeCallInserter &RTCI) {
1387 RTCI.createRuntimeCall(
1393 RTCI.createRuntimeCall(
1399 MI->eraseFromParent();
1403bool AddressSanitizer::isInterestingAlloca(
const AllocaInst &AI) {
1404 auto [It,
Inserted] = ProcessedAllocas.try_emplace(&AI);
1407 return It->getSecond();
1409 bool IsInteresting =
1422 !(SSGI && SSGI->
isSafe(AI)));
1424 It->second = IsInteresting;
1425 return IsInteresting;
1439 if (
Ptr->isSwiftError())
1456void AddressSanitizer::getInterestingMemoryOperands(
1460 if (LocalDynamicShadow ==
I)
1466 Interesting.
emplace_back(
I, LI->getPointerOperandIndex(),
false,
1467 LI->getType(), LI->getAlign());
1472 SI->getValueOperand()->getType(),
SI->getAlign());
1476 Interesting.
emplace_back(
I, RMW->getPointerOperandIndex(),
true,
1477 RMW->getValOperand()->getType(), std::nullopt);
1481 Interesting.
emplace_back(
I, XCHG->getPointerOperandIndex(),
true,
1482 XCHG->getCompareOperand()->getType(),
1485 switch (CI->getIntrinsicID()) {
1486 case Intrinsic::masked_load:
1487 case Intrinsic::masked_store:
1488 case Intrinsic::masked_gather:
1489 case Intrinsic::masked_scatter: {
1490 bool IsWrite = CI->getType()->isVoidTy();
1492 unsigned OpOffset = IsWrite ? 1 : 0;
1496 auto BasePtr = CI->getOperand(OpOffset);
1497 if (ignoreAccess(
I, BasePtr))
1499 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1503 Alignment =
Op->getMaybeAlignValue();
1504 Value *
Mask = CI->getOperand(2 + OpOffset);
1505 Interesting.
emplace_back(
I, OpOffset, IsWrite, Ty, Alignment, Mask);
1508 case Intrinsic::masked_expandload:
1509 case Intrinsic::masked_compressstore: {
1510 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1511 unsigned OpOffset = IsWrite ? 1 : 0;
1514 auto BasePtr = CI->getOperand(OpOffset);
1515 if (ignoreAccess(
I, BasePtr))
1518 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1521 Value *
Mask = CI->getOperand(1 + OpOffset);
1524 Value *ExtMask =
IB.CreateZExt(Mask, ExtTy);
1525 Value *EVL =
IB.CreateAddReduce(ExtMask);
1526 Value *TrueMask = ConstantInt::get(
Mask->getType(), 1);
1527 Interesting.
emplace_back(
I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1531 case Intrinsic::vp_load:
1532 case Intrinsic::vp_store:
1533 case Intrinsic::experimental_vp_strided_load:
1534 case Intrinsic::experimental_vp_strided_store: {
1536 unsigned IID = CI->getIntrinsicID();
1537 bool IsWrite = CI->getType()->isVoidTy();
1540 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1541 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1542 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*
DL);
1543 Value *Stride =
nullptr;
1544 if (IID == Intrinsic::experimental_vp_strided_store ||
1545 IID == Intrinsic::experimental_vp_strided_load) {
1546 Stride = VPI->getOperand(PtrOpNo + 1);
1553 Alignment =
Align(1);
1555 Interesting.
emplace_back(
I, PtrOpNo, IsWrite, Ty, Alignment,
1556 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1560 case Intrinsic::vp_gather:
1561 case Intrinsic::vp_scatter: {
1563 unsigned IID = CI->getIntrinsicID();
1564 bool IsWrite = IID == Intrinsic::vp_scatter;
1567 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1568 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1569 MaybeAlign Alignment = VPI->getPointerAlignment();
1570 Interesting.
emplace_back(
I, PtrOpNo, IsWrite, Ty, Alignment,
1571 VPI->getMaskParam(),
1572 VPI->getVectorLengthParam());
1578 if (
TTI->getTgtMemIntrinsic(
II, IntrInfo))
1582 for (
unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1584 ignoreAccess(
I, CI->getArgOperand(ArgNo)))
1586 Type *Ty = CI->getParamByValType(ArgNo);
1602 if (!Cmp->isRelational())
1616 if (BO->getOpcode() != Instruction::Sub)
1629 if (!
G->hasInitializer())
1632 if (
G->hasSanitizerMetadata() &&
G->getSanitizerMetadata().IsDynInit)
1638void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1642 Value *
Param[2] = {
I->getOperand(0),
I->getOperand(1)};
1643 for (
Value *&i : Param) {
1644 if (i->getType()->isPointerTy())
1647 RTCI.createRuntimeCall(IRB,
F, Param);
1653 TypeSize TypeStoreSize,
bool IsWrite,
1654 Value *SizeArgument,
bool UseCalls,
1655 uint32_t Exp, RuntimeCallInserter &RTCI) {
1660 switch (FixedSize) {
1666 if (!Alignment || *Alignment >= Granularity ||
1667 *Alignment >= FixedSize / 8)
1668 return Pass->instrumentAddress(
I, InsertBefore, Addr, Alignment,
1669 FixedSize, IsWrite,
nullptr, UseCalls,
1673 Pass->instrumentUnusualSizeOrAlignment(
I, InsertBefore, Addr, TypeStoreSize,
1674 IsWrite,
nullptr, UseCalls, Exp, RTCI);
1677void AddressSanitizer::instrumentMaskedLoadOrStore(
1680 MaybeAlign Alignment,
unsigned Granularity,
Type *OpType,
bool IsWrite,
1681 Value *SizeArgument,
bool UseCalls, uint32_t Exp,
1682 RuntimeCallInserter &RTCI) {
1684 TypeSize ElemTypeSize =
DL.getTypeStoreSizeInBits(VTy->getScalarType());
1685 auto Zero = ConstantInt::get(IntptrTy, 0);
1693 Value *IsEVLZero =
IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1695 IB.SetInsertPoint(LoopInsertBefore);
1697 EVL =
IB.CreateZExtOrTrunc(EVL, IntptrTy);
1700 Value *
EC =
IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1701 EVL =
IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1703 EVL =
IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1708 Stride =
IB.CreateZExtOrTrunc(Stride, IntptrTy);
1712 Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1713 if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1714 if (MaskElemC->isZero())
1720 Instruction *ThenTerm = SplitBlockAndInsertIfThen(
1721 MaskElem, &*IRB.GetInsertPoint(), false);
1722 IRB.SetInsertPoint(ThenTerm);
1725 Value *InstrumentedAddress;
1728 cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1729 "Expected vector of pointer.");
1730 InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1731 }
else if (Stride) {
1738 Alignment, Granularity, ElemTypeSize, IsWrite,
1739 SizeArgument, UseCalls, Exp, RTCI);
1746 RuntimeCallInserter &RTCI) {
1747 Value *Addr =
O.getPtr();
1767 isSafeAccess(ObjSizeVis, Addr,
O.TypeStoreSize)) {
1768 NumOptimizedAccessesToGlobalVar++;
1776 isSafeAccess(ObjSizeVis, Addr,
O.TypeStoreSize)) {
1777 NumOptimizedAccessesToStackVar++;
1783 NumInstrumentedWrites++;
1785 NumInstrumentedReads++;
1787 if (
O.MaybeByteOffset) {
1792 if (TargetTriple.isRISCV()) {
1797 static_cast<unsigned>(LongSize)) {
1806 unsigned Granularity = 1 << Mapping.Scale;
1808 instrumentMaskedLoadOrStore(
this,
DL, IntptrTy,
O.MaybeMask,
O.MaybeEVL,
1809 O.MaybeStride,
O.getInsn(), Addr,
O.Alignment,
1810 Granularity,
O.OpType,
O.IsWrite,
nullptr,
1811 UseCalls, Exp, RTCI);
1814 Granularity,
O.TypeStoreSize,
O.IsWrite,
nullptr,
1815 UseCalls, Exp, RTCI);
1820 Value *Addr,
bool IsWrite,
1821 size_t AccessSizeIndex,
1822 Value *SizeArgument,
1824 RuntimeCallInserter &RTCI) {
1830 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1831 {Addr, SizeArgument});
1833 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1834 {Addr, SizeArgument, ExpVal});
1837 Call = RTCI.createRuntimeCall(
1838 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1840 Call = RTCI.createRuntimeCall(
1841 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
1850 uint32_t TypeStoreSize) {
1851 size_t Granularity =
static_cast<size_t>(1) << Mapping.Scale;
1853 Value *LastAccessedByte =
1854 IRB.
CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1856 if (TypeStoreSize / 8 > 1)
1858 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1861 IRB.
CreateIntCast(LastAccessedByte, ShadowValue->getType(),
false);
1866Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1868 uint32_t TypeStoreSize,
bool IsWrite,
Value *SizeArgument) {
1875 return InsertBefore;
1880 Value *IsSharedOrPrivate = IRB.
CreateOr(IsShared, IsPrivate);
1882 Value *AddrSpaceZeroLanding =
1885 return InsertBefore;
1901 Trm->getParent()->setName(
"asan.report");
1912void AddressSanitizer::instrumentAddress(
Instruction *OrigIns,
1915 uint32_t TypeStoreSize,
bool IsWrite,
1916 Value *SizeArgument,
bool UseCalls,
1918 RuntimeCallInserter &RTCI) {
1919 if (TargetTriple.isAMDGPU()) {
1920 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1921 TypeStoreSize, IsWrite, SizeArgument);
1930 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1933 ConstantInt::get(
Int32Ty, AccessInfo.Packed)});
1940 RTCI.createRuntimeCall(
1941 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1943 RTCI.createRuntimeCall(
1944 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1945 {AddrLong, ConstantInt::get(IRB.
getInt32Ty(), Exp)});
1952 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1953 const uint64_t ShadowAlign =
1954 std::max<uint64_t>(Alignment.
valueOrOne().
value() >> Mapping.Scale, 1);
1959 size_t Granularity = 1ULL << Mapping.Scale;
1962 bool GenSlowPath = (
ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1964 if (TargetTriple.isAMDGCN()) {
1966 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1969 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1970 }
else if (GenSlowPath) {
1978 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1993 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
2002void AddressSanitizer::instrumentUnusualSizeOrAlignment(
2004 TypeSize TypeStoreSize,
bool IsWrite,
Value *SizeArgument,
bool UseCalls,
2005 uint32_t Exp, RuntimeCallInserter &RTCI) {
2013 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
2016 RTCI.createRuntimeCall(
2017 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
2031void ModuleAddressSanitizer::poisonOneInitializer(
Function &GlobalInit) {
2037 Value *ModuleNameAddr =
2039 IRB.
CreateCall(AsanPoisonGlobals, ModuleNameAddr);
2042 for (
auto &BB : GlobalInit)
2047void ModuleAddressSanitizer::createInitializerPoisonCalls() {
2067 poisonOneInitializer(*
F);
2073ModuleAddressSanitizer::getExcludedAliasedGlobal(
const GlobalAlias &GA)
const {
2078 assert(CompileKernel &&
"Only expecting to be called when compiling kernel");
2090bool ModuleAddressSanitizer::shouldInstrumentGlobal(
GlobalVariable *
G)
const {
2091 Type *Ty =
G->getValueType();
2094 if (
G->hasSanitizerMetadata() &&
G->getSanitizerMetadata().NoAddress)
2096 if (!Ty->
isSized())
return false;
2097 if (!
G->hasInitializer())
return false;
2099 if (
G->getAddressSpace() &&
2106 if (
G->isThreadLocal())
return false;
2108 if (
G->getAlign() && *
G->getAlign() > getMinRedzoneSizeForGlobal())
return false;
2114 if (!TargetTriple.isOSBinFormatCOFF()) {
2115 if (!
G->hasExactDefinition() ||
G->hasComdat())
2119 if (
G->isInterposable())
2123 if (
G->hasAvailableExternallyLinkage())
2130 switch (
C->getSelectionKind()) {
2141 if (
G->hasSection()) {
2151 if (Section ==
"llvm.metadata")
return false;
2158 if (
Section.starts_with(
".preinit_array") ||
2159 Section.starts_with(
".init_array") ||
2160 Section.starts_with(
".fini_array")) {
2166 if (TargetTriple.isOSBinFormatELF()) {
2180 if (TargetTriple.isOSBinFormatCOFF() &&
Section.contains(
'$')) {
2181 LLVM_DEBUG(
dbgs() <<
"Ignoring global in sorted section (contains '$'): "
2186 if (TargetTriple.isOSBinFormatMachO()) {
2188 unsigned TAA = 0, StubSize = 0;
2191 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2196 if (ParsedSegment ==
"__OBJC" ||
2197 (ParsedSegment ==
"__DATA" && ParsedSection.
starts_with(
"__objc_"))) {
2209 if (ParsedSegment ==
"__DATA" && ParsedSection ==
"__cfstring") {
2222 if (CompileKernel) {
2225 if (
G->getName().starts_with(
"__"))
2235bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection()
const {
2236 if (!TargetTriple.isOSBinFormatMachO())
2239 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2241 if (TargetTriple.isiOS() && !TargetTriple.isOSVersionLT(9))
2243 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2245 if (TargetTriple.isDriverKit())
2247 if (TargetTriple.isXROS())
2253StringRef ModuleAddressSanitizer::getGlobalMetadataSection()
const {
2254 switch (TargetTriple.getObjectFormat()) {
2264 "ModuleAddressSanitizer not implemented for object file format");
2271void ModuleAddressSanitizer::initializeCallbacks() {
2277 AsanUnpoisonGlobals =
2281 AsanRegisterGlobals =
M.getOrInsertFunction(
2283 AsanUnregisterGlobals =
M.getOrInsertFunction(
2288 AsanRegisterImageGlobals =
M.getOrInsertFunction(
2290 AsanUnregisterImageGlobals =
M.getOrInsertFunction(
2293 AsanRegisterElfGlobals =
2295 IntptrTy, IntptrTy, IntptrTy);
2296 AsanUnregisterElfGlobals =
2298 IntptrTy, IntptrTy, IntptrTy);
2303void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2308 if (!
G->hasName()) {
2312 G->setName(
genName(
"anon_global"));
2315 if (!InternalSuffix.
empty() &&
G->hasLocalLinkage()) {
2316 std::string
Name = std::string(
G->getName());
2317 Name += InternalSuffix;
2318 C =
M.getOrInsertComdat(Name);
2320 C =
M.getOrInsertComdat(
G->getName());
2326 if (TargetTriple.isOSBinFormatCOFF()) {
2328 if (
G->hasPrivateLinkage())
2341ModuleAddressSanitizer::CreateMetadataGlobal(
Constant *Initializer,
2343 auto Linkage = TargetTriple.isOSBinFormatMachO()
2349 Metadata->setSection(getGlobalMetadataSection());
2356Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor() {
2360 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2368void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2372 auto &
DL =
M.getDataLayout();
2375 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2376 Constant *Initializer = MetadataInitializers[i];
2380 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2386 unsigned SizeOfGlobalStruct =
DL.getTypeAllocSize(Initializer->
getType());
2388 "global metadata will not be padded appropriately");
2391 SetComdatForGlobalMetadata(
G,
Metadata,
"");
2396 if (!MetadataGlobals.empty())
2400void ModuleAddressSanitizer::instrumentGlobalsELF(
2403 const std::string &UniqueModuleId) {
2410 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2413 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2416 CreateMetadataGlobal(MetadataInitializers[i],
G->getName());
2418 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2421 if (UseComdatForGlobalsGC)
2422 SetComdatForGlobalMetadata(
G,
Metadata, UniqueModuleId);
2427 if (!MetadataGlobals.empty())
2444 "__start_" + getGlobalMetadataSection());
2448 "__stop_" + getGlobalMetadataSection());
2462 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2469void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2480 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2481 Constant *Initializer = MetadataInitializers[i];
2487 auto LivenessBinder =
2492 Twine(
"__asan_binder_") +
G->getName());
2493 Liveness->
setSection(
"__DATA,__asan_liveness,regular,live_support");
2494 LivenessGlobals[i] = Liveness;
2501 if (!LivenessGlobals.empty())
2523 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2528void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2532 unsigned N = ExtendedGlobals.
size();
2542 if (Mapping.Scale > 3)
2543 AllGlobals->setAlignment(
Align(1ULL << Mapping.Scale));
2548 ConstantInt::get(IntptrTy,
N)});
2554 IrbDtor.CreateCall(AsanUnregisterGlobals,
2556 ConstantInt::get(IntptrTy,
N)});
2565void ModuleAddressSanitizer::instrumentGlobals(
IRBuilder<> &IRB,
2570 if (CompileKernel) {
2571 for (
auto &GA :
M.aliases()) {
2573 AliasedGlobalExclusions.
insert(GV);
2578 for (
auto &
G :
M.globals()) {
2579 if (!AliasedGlobalExclusions.
count(&
G) && shouldInstrumentGlobal(&
G))
2583 size_t n = GlobalsToChange.
size();
2584 auto &
DL =
M.getDataLayout();
2598 IntptrTy, IntptrTy, IntptrTy);
2602 for (
size_t i = 0; i < n; i++) {
2606 if (
G->hasSanitizerMetadata())
2607 MD =
G->getSanitizerMetadata();
2612 std::string NameForGlobal =
G->getName().str();
2617 Type *Ty =
G->getValueType();
2618 const uint64_t SizeInBytes =
DL.getTypeAllocSize(Ty);
2631 M, NewTy,
G->isConstant(),
Linkage, NewInitializer,
"",
G,
2632 G->getThreadLocalMode(),
G->getAddressSpace());
2642 if (TargetTriple.isOSBinFormatMachO() && !
G->hasSection() &&
2645 if (Seq && Seq->isCString())
2646 NewGlobal->
setSection(
"__TEXT,__asan_cstring,regular");
2660 G->eraseFromParent();
2661 NewGlobals[i] = NewGlobal;
2666 bool CanUsePrivateAliases =
2667 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2668 TargetTriple.isOSBinFormatWasm();
2669 if (CanUsePrivateAliases && UsePrivateAlias) {
2672 InstrumentedGlobal =
2678 ODRIndicator = ConstantInt::get(IntptrTy, -1);
2679 }
else if (UseOdrIndicator) {
2682 auto *ODRIndicatorSym =
2691 ODRIndicatorSym->setAlignment(
Align(1));
2698 ConstantInt::get(IntptrTy, SizeInBytes),
2699 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2702 ConstantInt::get(IntptrTy, MD.
IsDynInit),
2707 Initializers[i] = Initializer;
2713 for (
size_t i = 0; i < n; i++) {
2715 if (
G->getName().empty())
continue;
2720 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2727 }
else if (n == 0) {
2730 *CtorComdat = TargetTriple.isOSBinFormatELF();
2732 *CtorComdat =
false;
2733 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2734 InstrumentGlobalsCOFF(IRB, NewGlobals, Initializers);
2735 }
else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2736 InstrumentGlobalsMachO(IRB, NewGlobals, Initializers);
2738 InstrumentGlobalsWithMetadataArray(IRB, NewGlobals, Initializers);
2744 createInitializerPoisonCalls();
2750ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes)
const {
2751 constexpr uint64_t kMaxRZ = 1 << 18;
2752 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2755 if (SizeInBytes <= MinRZ / 2) {
2759 RZ = MinRZ - SizeInBytes;
2762 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2765 if (SizeInBytes % MinRZ)
2766 RZ += MinRZ - (SizeInBytes % MinRZ);
2769 assert((RZ + SizeInBytes) % MinRZ == 0);
2774int ModuleAddressSanitizer::GetAsanVersion()
const {
2775 int LongSize =
M.getDataLayout().getPointerSizeInBits();
2776 bool isAndroid =
M.getTargetTriple().isAndroid();
2780 Version += (LongSize == 32 && isAndroid);
2795bool ModuleAddressSanitizer::instrumentModule() {
2796 initializeCallbacks();
2804 if (CompileKernel) {
2809 std::string AsanVersion = std::to_string(GetAsanVersion());
2810 std::string VersionCheckName =
2812 std::tie(AsanCtorFunction, std::ignore) =
2815 {}, VersionCheckName);
2819 bool CtorComdat =
true;
2822 if (AsanCtorFunction) {
2823 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2824 instrumentGlobals(IRB, &CtorComdat);
2827 instrumentGlobals(IRB, &CtorComdat);
2836 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2837 if (AsanCtorFunction) {
2841 if (AsanDtorFunction) {
2846 if (AsanCtorFunction)
2848 if (AsanDtorFunction)
2859 for (
int Exp = 0;
Exp < 2;
Exp++) {
2860 for (
size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2861 const std::string TypeStr = AccessIsWrite ?
"store" :
"load";
2862 const std::string ExpStr =
Exp ?
"exp_" :
"";
2863 const std::string EndingStr = Recover ?
"_noabort" :
"";
2873 if (
auto AK = TLI->getExtAttrForI32Param(
false)) {
2874 AL2 = AL2.addParamAttribute(*
C, 2, AK);
2875 AL1 = AL1.addParamAttribute(*
C, 1, AK);
2878 AsanErrorCallbackSized[AccessIsWrite][
Exp] =
M.getOrInsertFunction(
2882 AsanMemoryAccessCallbackSized[AccessIsWrite][
Exp] =
M.getOrInsertFunction(
2887 AccessSizeIndex++) {
2888 const std::string Suffix = TypeStr +
itostr(1ULL << AccessSizeIndex);
2889 AsanErrorCallback[AccessIsWrite][
Exp][AccessSizeIndex] =
2890 M.getOrInsertFunction(
2894 AsanMemoryAccessCallback[AccessIsWrite][
Exp][AccessSizeIndex] =
2895 M.getOrInsertFunction(
2902 const std::string MemIntrinCallbackPrefix =
2906 AsanMemmove =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memmove",
2907 PtrTy, PtrTy, PtrTy, IntptrTy);
2908 AsanMemcpy =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memcpy", PtrTy,
2909 PtrTy, PtrTy, IntptrTy);
2910 AsanMemset =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memset",
2914 AsanHandleNoReturnFunc =
2917 AsanPtrCmpFunction =
2919 AsanPtrSubFunction =
2921 if (Mapping.InGlobal)
2922 AsanShadowGlobal =
M.getOrInsertGlobal(
"__asan_shadow",
2925 AMDGPUAddressShared =
2927 AMDGPUAddressPrivate =
2931bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(
Function &
F) {
2939 if (
F.getName().contains(
" load]")) {
2949bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(
Function &
F) {
2955 if (Mapping.InGlobal) {
2963 LocalDynamicShadow =
2964 IRB.
CreateCall(Asm, {AsanShadowGlobal},
".asan.shadow");
2966 LocalDynamicShadow =
2970 Value *GlobalDynamicAddress =
F.getParent()->getOrInsertGlobal(
2972 LocalDynamicShadow = IRB.
CreateLoad(IntptrTy, GlobalDynamicAddress);
2977void AddressSanitizer::markEscapedLocalAllocas(
Function &
F) {
2982 assert(ProcessedAllocas.empty() &&
"must process localescape before allocas");
2986 if (!
F.getParent()->getFunction(
"llvm.localescape"))
return;
2992 if (
II &&
II->getIntrinsicID() == Intrinsic::localescape) {
2994 for (
Value *Arg :
II->args()) {
2997 "non-static alloca arg to localescape");
2998 ProcessedAllocas[AI] =
false;
3005bool AddressSanitizer::suppressInstrumentationSiteForDebug(
int &Instrumented) {
3006 bool ShouldInstrument =
3010 return !ShouldInstrument;
3013bool AddressSanitizer::instrumentFunction(
Function &
F,
3016 bool FunctionModified =
false;
3019 if (
F.hasFnAttribute(Attribute::Naked))
3020 return FunctionModified;
3025 if (maybeInsertAsanInitAtFunctionEntry(
F))
3026 FunctionModified =
true;
3029 if (!
F.hasFnAttribute(Attribute::SanitizeAddress))
return FunctionModified;
3031 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
3032 return FunctionModified;
3036 initializeCallbacks(TLI);
3038 FunctionStateRAII CleanupObj(
this);
3040 RuntimeCallInserter RTCI(
F);
3042 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(
F);
3046 markEscapedLocalAllocas(
F);
3058 for (
auto &BB :
F) {
3060 TempsToInstrument.
clear();
3061 int NumInsnsPerBB = 0;
3062 for (
auto &Inst : BB) {
3063 if (LooksLikeCodeInBug11395(&Inst))
return false;
3070 if (!InterestingOperands.
empty()) {
3071 for (
auto &Operand : InterestingOperands) {
3077 if (Operand.MaybeMask) {
3081 if (!TempsToInstrument.
insert(
Ptr).second)
3085 OperandsToInstrument.
push_back(Operand);
3092 PointerComparisonsOrSubtracts.
push_back(&Inst);
3100 TempsToInstrument.
clear();
3111 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3112 OperandsToInstrument.
size() + IntrinToInstrument.
size() >
3113 (
unsigned)InstrumentationWithCallsThreshold);
3118 int NumInstrumented = 0;
3119 for (
auto &Operand : OperandsToInstrument) {
3120 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3121 instrumentMop(ObjSizeVis, Operand, UseCalls,
3122 F.getDataLayout(), RTCI);
3123 FunctionModified =
true;
3125 for (
auto *Inst : IntrinToInstrument) {
3126 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3127 instrumentMemIntrinsic(Inst, RTCI);
3128 FunctionModified =
true;
3131 FunctionStackPoisoner FSP(
F, *
this, RTCI);
3132 bool ChangedStack = FSP.runOnFunction();
3136 for (
auto *CI : NoReturnCalls) {
3138 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3141 for (
auto *Inst : PointerComparisonsOrSubtracts) {
3142 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3143 FunctionModified =
true;
3146 if (ChangedStack || !NoReturnCalls.empty())
3147 FunctionModified =
true;
3149 LLVM_DEBUG(
dbgs() <<
"ASAN done instrumenting: " << FunctionModified <<
" "
3152 return FunctionModified;
3158bool AddressSanitizer::LooksLikeCodeInBug11395(
Instruction *
I) {
3159 if (LongSize != 32)
return false;
3168void FunctionStackPoisoner::initializeCallbacks(
Module &M) {
3172 const char *MallocNameTemplate =
3177 std::string Suffix =
itostr(Index);
3178 AsanStackMallocFunc[
Index] =
M.getOrInsertFunction(
3179 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3180 AsanStackFreeFunc[
Index] =
3185 if (ASan.UseAfterScope) {
3186 AsanPoisonStackMemoryFunc =
M.getOrInsertFunction(
3188 AsanUnpoisonStackMemoryFunc =
M.getOrInsertFunction(
3192 for (
size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3193 0xf3, 0xf5, 0xf8}) {
3194 std::ostringstream
Name;
3196 Name << std::setw(2) << std::setfill(
'0') << std::hex << Val;
3197 AsanSetShadowFunc[Val] =
3198 M.getOrInsertFunction(
Name.str(), IRB.
getVoidTy(), IntptrTy, IntptrTy);
3201 AsanAllocaPoisonFunc =
M.getOrInsertFunction(
3203 AsanAllocasUnpoisonFunc =
M.getOrInsertFunction(
3209 size_t Begin,
size_t End,
3211 Value *ShadowBase) {
3215 const size_t LargestStoreSizeInBytes =
3216 std::min<size_t>(
sizeof(uint64_t), ASan.LongSize / 8);
3218 const bool IsLittleEndian =
F.getDataLayout().isLittleEndian();
3224 for (
size_t i = Begin; i < End;) {
3225 if (!ShadowMask[i]) {
3231 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3233 while (StoreSizeInBytes > End - i)
3234 StoreSizeInBytes /= 2;
3237 for (
size_t j = StoreSizeInBytes - 1;
j && !ShadowMask[i +
j]; --
j) {
3238 while (j <= StoreSizeInBytes / 2)
3239 StoreSizeInBytes /= 2;
3243 for (
size_t j = 0;
j < StoreSizeInBytes;
j++) {
3245 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3247 Val = (Val << 8) | ShadowBytes[i + j];
3256 i += StoreSizeInBytes;
3263 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.
size(), IRB, ShadowBase);
3268 size_t Begin,
size_t End,
3271 size_t Done = Begin;
3272 for (
size_t i = Begin, j = Begin + 1; i < End; i =
j++) {
3273 if (!ShadowMask[i]) {
3277 uint8_t Val = ShadowBytes[i];
3278 if (!AsanSetShadowFunc[Val])
3282 for (;
j < End && ShadowMask[
j] && Val == ShadowBytes[
j]; ++
j) {
3285 if (j - i >= ASan.MaxInlinePoisoningSize) {
3286 copyToShadowInline(ShadowMask, ShadowBytes,
Done, i, IRB, ShadowBase);
3287 RTCI.createRuntimeCall(
3288 IRB, AsanSetShadowFunc[Val],
3289 {IRB.
CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3290 ConstantInt::get(IntptrTy, j - i)});
3295 copyToShadowInline(ShadowMask, ShadowBytes,
Done, End, IRB, ShadowBase);
3303 for (
int i = 0;; i++, MaxSize *= 2)
3304 if (LocalStackSize <= MaxSize)
return i;
3308void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3310 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3318 if (Arg.hasByValAttr()) {
3319 Type *Ty = Arg.getParamByValType();
3320 const Align Alignment =
3321 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3325 (Arg.hasName() ? Arg.getName() :
"Arg" +
Twine(Arg.getArgNo())) +
3328 Arg.replaceAllUsesWith(AI);
3330 uint64_t AllocSize =
DL.getTypeAllocSize(Ty);
3331 IRB.
CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3339 Value *ValueIfFalse) {
3342 PHI->addIncoming(ValueIfFalse, CondBlock);
3344 PHI->addIncoming(ValueIfTrue, ThenBlock);
3348Value *FunctionStackPoisoner::createAllocaForLayout(
3357 nullptr,
"MyAlloca");
3361 uint64_t FrameAlignment = std::max(
L.FrameAlignment, uint64_t(
ClRealignStack));
3366void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3369 DynamicAllocaLayout = IRB.
CreateAlloca(IntptrTy,
nullptr);
3374void FunctionStackPoisoner::processDynamicAllocas() {
3381 for (
const auto &APC : DynamicAllocaPoisonCallVec) {
3384 assert(ASan.isInterestingAlloca(*APC.AI));
3385 assert(!APC.AI->isStaticAlloca());
3388 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3395 createDynamicAllocasInitStorage();
3396 for (
auto &AI : DynamicAllocaVec)
3397 handleDynamicAllocaCall(AI);
3398 unpoisonDynamicAllocas();
3410 for (
Instruction *It = Start; It; It = It->getNextNode()) {
3427 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3430 Value *Val = Store->getValueOperand();
3432 bool IsArgInitViaCast =
3437 Val == It->getPrevNode();
3438 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3442 if (IsArgInitViaCast)
3457 if (AI->
hasMetadata(LLVMContext::MD_annotation)) {
3460 for (
auto &Annotation : AllocaAnnotations->
operands()) {
3464 for (
unsigned Index = 0; Index < AnnotationTuple->getNumOperands();
3467 auto MetadataString =
3469 if (MetadataString->getString() ==
"alloca_name_altered")
3478void FunctionStackPoisoner::processStaticAllocas() {
3479 if (AllocaVec.
empty()) {
3484 int StackMallocIdx = -1;
3486 if (
auto SP =
F.getSubprogram())
3487 EntryDebugLocation =
3496 auto InsBeforeB = InsBefore->
getParent();
3497 assert(InsBeforeB == &
F.getEntryBlock());
3498 for (
auto *AI : StaticAllocasToMoveUp)
3509 ArgInitInst->moveBefore(InsBefore->
getIterator());
3512 if (LocalEscapeCall)
3520 ASan.getAllocaSizeInBytes(*AI),
3531 uint64_t Granularity = 1ULL << Mapping.Scale;
3532 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3538 for (
auto &
Desc : SVD)
3542 for (
const auto &APC : StaticAllocaPoisonCallVec) {
3545 assert(ASan.isInterestingAlloca(*APC.AI));
3546 assert(APC.AI->isStaticAlloca());
3551 if (
const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3552 if (LifetimeLoc->getFile() == FnLoc->getFile())
3553 if (
unsigned Line = LifetimeLoc->getLine())
3554 Desc.Line = std::min(
Desc.Line ?
Desc.Line : Line, Line);
3560 LLVM_DEBUG(
dbgs() << DescriptionString <<
" --- " <<
L.FrameSize <<
"\n");
3561 uint64_t LocalStackSize =
L.FrameSize;
3562 bool DoStackMalloc =
3572 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3573 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3575 Value *StaticAlloca =
3576 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L,
false);
3579 Value *LocalStackBase;
3580 Value *LocalStackBaseAlloca;
3583 if (DoStackMalloc) {
3584 LocalStackBaseAlloca =
3585 IRB.
CreateAlloca(IntptrTy,
nullptr,
"asan_local_stack_base");
3592 Constant *OptionDetectUseAfterReturn =
F.getParent()->getOrInsertGlobal(
3602 Value *FakeStackValue =
3603 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3604 ConstantInt::get(IntptrTy, LocalStackSize));
3606 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3607 ConstantInt::get(IntptrTy, 0));
3615 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3616 ConstantInt::get(IntptrTy, LocalStackSize));
3618 Value *NoFakeStack =
3623 Value *AllocaValue =
3624 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L,
true) : StaticAlloca;
3627 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3628 IRB.
CreateStore(LocalStackBase, LocalStackBaseAlloca);
3633 FakeStack = ConstantInt::get(IntptrTy, 0);
3635 DoDynamicAlloca ? createAllocaForLayout(IRB, L,
true) : StaticAlloca;
3636 LocalStackBaseAlloca = LocalStackBase;
3642 Value *LocalStackBaseAllocaPtr =
3645 : LocalStackBaseAlloca;
3647 "Variable descriptions relative to ASan stack base will be dropped");
3651 for (
const auto &
Desc : SVD) {
3656 IRB.
CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy,
Desc.Offset)),
3670 ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3680 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3687 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3690 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3692 if (!StaticAllocaPoisonCallVec.empty()) {
3696 for (
const auto &APC : StaticAllocaPoisonCallVec) {
3699 size_t Begin =
Desc.Offset /
L.Granularity;
3700 size_t End = Begin + (APC.Size +
L.Granularity - 1) /
L.Granularity;
3703 copyToShadow(ShadowAfterScope,
3704 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3710 for (
Value *NewAllocaPtr : NewAllocaPtrs) {
3713 if (
I->isLifetimeStartOrEnd())
3714 I->eraseFromParent();
3727 if (DoStackMalloc) {
3728 assert(StackMallocIdx >= 0);
3745 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3747 ShadowAfterReturn.
resize(ClassSize /
L.Granularity,
3749 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3751 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3753 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3754 Value *SavedFlagPtr = IRBPoison.CreateLoad(
3755 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3756 IRBPoison.CreateStore(
3758 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3761 RTCI.createRuntimeCall(
3762 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3763 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3767 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3769 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3774 for (
auto *AI : AllocaVec)
3778void FunctionStackPoisoner::poisonAlloca(
Value *V, uint64_t
Size,
3782 Value *SizeArg = ConstantInt::get(IntptrTy,
Size);
3783 RTCI.createRuntimeCall(
3784 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3785 {AddrArg, SizeArg});
3796void FunctionStackPoisoner::handleDynamicAllocaCall(
AllocaInst *AI) {
3804 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3810 const unsigned ElementSize =
3814 ConstantInt::get(IntptrTy, ElementSize));
3842 ConstantInt::get(IntptrTy, Alignment.
value()));
3845 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
3856 if (
I->isLifetimeStartOrEnd())
3857 I->eraseFromParent();
3889 Size - uint64_t(
Offset) >= TypeStoreSize / 8;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static Twine genName(StringRef suffix)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static const uint64_t kWebAssemblyShadowOffset
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static StringRef getAllocaName(AllocaInst *AI)
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
Function Alias Analysis false
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
Module.h This file contains the declarations for the Module class.
This defines the Use class.
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
print mir2vec MIR2Vec Vocabulary Printer Pass
Machine Check Debug Module
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
uint64_t getZExtValue() const
Get zero extended value.
int64_t getSExtValue() const
Get sign extended value.
LLVM_ABI AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
bool isInlineAsm() const
Check if this call is an inline asm statement.
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
@ SameSize
The data referenced by the COMDAT must be the same size.
@ Any
The linker may choose any COMDAT.
@ NoDeduplicate
No deduplication is performed.
@ ExactMatch
The data referenced by the COMDAT must be the same.
ConstantArray - Constant Array Declarations.
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static LLVM_ABI bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
bool hasPersonalityFn() const
Check whether this function has a personality function.
const Constant * getAliasee() const
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
LLVM_ABI void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
LLVM_ABI void setComdat(Comdat *C)
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
VisibilityTypes getVisibility() const
void setUnnamedAddr(UnnamedAddr Val)
bool hasLocalLinkage() const
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
ThreadLocalMode getThreadLocalMode() const
@ HiddenVisibility
The GV is hidden.
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ CommonLinkage
Tentative definitions.
@ InternalLinkage
Rename collisions when linking (static functions).
@ AvailableExternallyLinkage
Available for inspection, not emission.
@ ExternalWeakLinkage
ExternalWeak linkage description.
DLLStorageClassTypes getDLLStorageClass() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalVariable.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
BasicBlock * GetInsertBlock() const
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Base class for instruction visitors.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
ArrayRef< MDOperand > operands() const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
This is the common base class for memset/memcpy/memmove.
A Module instance is used to store all the information related to an LLVM module.
Evaluate the size and offset of an object pointed to by a Value* statically.
LLVM_ABI SizeOffsetAPInt compute(Value *V)
Pass interface - Implemented by all 'passes'.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr bool empty() const
empty - Check if the string is empty.
Class to represent struct types.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
Triple - Helper class for working with autoconf configuration names.
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
bool isDriverKit() const
Is this an Apple DriverKit triple.
bool isAndroid() const
Tests whether the target is Android.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isOSWindows() const
Tests whether the OS is Windows.
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isOSLinux() const
Tests whether the OS is Linux.
bool isMacOSX() const
Is this a Mac OS X triple.
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
bool isWatchOS() const
Is this an Apple watchOS triple.
bool isiOS() const
Is this an iOS triple.
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
bool isWasm() const
Tests whether the target is wasm (32- and 64-bit).
bool isOSHaiku() const
Tests whether the OS is Haiku.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Base class of all SIMD vector types.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
This class implements an extremely fast bulk output stream that can only output to a stream.
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Context & getContext() const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
FunctionAddr VTableAddr Value
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
LLVM_ABI GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, Twine NamePrefix="")
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
LLVM_ABI Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
LLVM_ABI bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
LLVM_ABI SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
FunctionAddr VTableAddr uintptr_t uintptr_t Version
LLVM_ABI std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool isAlnum(char C)
Checks whether character C is either a decimal digit or an uppercase or lowercase letter as classifie...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AsanDtorKind
Types of ASan module destructors supported.
@ Invalid
Not a valid destructor Kind.
@ Global
Append to llvm.global_dtors.
@ None
Do not emit any destructors for ASan.
LLVM_ABI ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
OperandBundleDefT< Value * > OperandBundleDef
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
LLVM_ABI void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem)
Remove memory attributes that are incompatible with the instrumentation added by AddressSanitizer and...
DWARFExpression::Operation Op
@ Dynamic
Denotes mode unknown at compile time.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
TinyPtrVector< BasicBlock * > ColorVector
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AsanCtorKind
Types of ASan module constructors supported.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
LLVM_ABI void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
LLVM_ABI void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
DEMANGLE_ABI std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
std::string itostr(int64_t X)
LLVM_ABI void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, BasicBlock::iterator InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
const uint8_t AccessSizeIndex
LLVM_ABI ASanAccessInfo(int32_t Packed)
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Information about a load/store intrinsic defined by the target.
SmallVector< InterestingMemoryOperand, 1 > InterestingOperands
A CRTP mix-in to automatically provide informational APIs needed for passes.
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.