94#define DEBUG_TYPE "asan"
100 std::numeric_limits<uint64_t>::max();
141 "__asan_unregister_image_globals";
154 "__asan_stack_malloc_always_";
168 "__asan_option_detect_stack_use_after_return";
171 "__asan_shadow_memory_dynamic_address";
197 "asan-kernel",
cl::desc(
"Enable KernelAddressSanitizer instrumentation"),
202 cl::desc(
"Enable recovery mode (continue-after-error)."),
206 "asan-guard-against-version-mismatch",
212 cl::desc(
"instrument read instructions"),
216 "asan-instrument-writes",
cl::desc(
"instrument write instructions"),
225 "asan-instrument-atomics",
235 "asan-always-slow-path",
240 "asan-force-dynamic-shadow",
241 cl::desc(
"Load shadow address into a local variable for each function"),
246 cl::desc(
"Access dynamic shadow through an ifunc global on "
247 "platforms that support this"),
251 "asan-with-ifunc-suppress-remat",
252 cl::desc(
"Suppress rematerialization of dynamic shadow address by passing "
253 "it through inline asm in prologue."),
261 "asan-max-ins-per-bb",
cl::init(10000),
262 cl::desc(
"maximal number of instructions to instrument in any given BB"),
269 "asan-max-inline-poisoning-size",
271 "Inline shadow poisoning for blocks up to the given size in bytes."),
275 "asan-use-after-return",
276 cl::desc(
"Sets the mode of detection for stack-use-after-return."),
278 clEnumValN(AsanDetectStackUseAfterReturnMode::Never,
"never",
279 "Never detect stack use after return."),
281 AsanDetectStackUseAfterReturnMode::Runtime,
"runtime",
282 "Detect stack use after return if "
283 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
284 clEnumValN(AsanDetectStackUseAfterReturnMode::Always,
"always",
285 "Always detect stack use after return.")),
289 cl::desc(
"Create redzones for byval "
290 "arguments (extra copy "
295 cl::desc(
"Check stack-use-after-scope"),
304 cl::desc(
"Handle C++ initializer order"),
308 "asan-detect-invalid-pointer-pair",
313 "asan-detect-invalid-pointer-cmp",
318 "asan-detect-invalid-pointer-sub",
323 "asan-realign-stack",
324 cl::desc(
"Realign stack to the value of this flag (power of two)"),
328 "asan-instrumentation-with-call-threshold",
329 cl::desc(
"If the function being instrumented contains more than "
330 "this number of memory accesses, use callbacks instead of "
331 "inline checks (-1 means never use callbacks)."),
335 "asan-memory-access-callback-prefix",
340 "asan-kernel-mem-intrinsic-prefix",
346 cl::desc(
"instrument dynamic allocas"),
350 "asan-skip-promotable-allocas",
355 "asan-constructor-kind",
356 cl::desc(
"Sets the ASan constructor kind"),
359 "Use global constructors")),
366 cl::desc(
"scale of asan shadow mapping"),
371 cl::desc(
"offset of asan shadow mapping [EXPERIMENTAL]"),
385 "asan-opt-same-temp",
cl::desc(
"Instrument the same temp just once"),
389 cl::desc(
"Don't instrument scalar globals"),
393 "asan-opt-stack",
cl::desc(
"Don't instrument scalar stack variables"),
397 "asan-stack-dynamic-alloca",
402 "asan-force-experiment",
408 cl::desc(
"Use private aliases for global variables"),
413 cl::desc(
"Use odr indicators to improve ODR reporting"),
418 cl::desc(
"Use linker features to support dead "
419 "code stripping of globals"),
426 cl::desc(
"Place ASan constructors in comdat sections"),
430 "asan-destructor-kind",
431 cl::desc(
"Sets the ASan destructor kind. The default is to use the value "
432 "provided to the pass constructor"),
435 "Use global destructors")),
455STATISTIC(NumInstrumentedReads,
"Number of instrumented reads");
456STATISTIC(NumInstrumentedWrites,
"Number of instrumented writes");
458 "Number of optimized accesses to global vars");
460 "Number of optimized accesses to stack vars");
469struct ShadowMapping {
480 bool IsAndroid = TargetTriple.
isAndroid();
483 bool IsMacOS = TargetTriple.
isMacOSX();
486 bool IsPS = TargetTriple.
isPS();
492 bool IsMIPSN32ABI = TargetTriple.
isABIN32();
493 bool IsMIPS32 = TargetTriple.
isMIPS32();
494 bool IsMIPS64 = TargetTriple.
isMIPS64();
495 bool IsArmOrThumb = TargetTriple.
isARM() || TargetTriple.
isThumb();
502 bool IsAMDGPU = TargetTriple.
isAMDGPU();
504 bool IsWasm = TargetTriple.
isWasm();
506 ShadowMapping Mapping;
513 if (LongSize == 32) {
516 else if (IsMIPSN32ABI)
541 else if (IsFreeBSD && IsAArch64)
543 else if (IsFreeBSD && !IsMIPS64) {
548 }
else if (IsNetBSD) {
555 else if (IsLinux && IsX86_64) {
561 }
else if (IsWindows && IsX86_64) {
567 else if (IsMacOS && IsAArch64)
571 else if (IsLoongArch64)
578 else if (IsHaiku && IsX86_64)
598 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
599 !IsRISCV64 && !IsLoongArch64 &&
600 !(Mapping.Offset & (Mapping.Offset - 1)) &&
602 bool IsAndroidWithIfuncSupport =
604 Mapping.InGlobal =
ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb;
612 int *MappingScale,
bool *OrShadowOffset) {
614 *ShadowBase = Mapping.Offset;
615 *MappingScale = Mapping.Scale;
616 *OrShadowOffset = Mapping.OrShadowOffset;
634 bool Changed =
false;
635 if (!
F.doesNotAccessMemory()) {
636 bool WritesMemory = !
F.onlyReadsMemory();
637 bool ReadsMemory = !
F.onlyWritesMemory();
638 if ((WritesMemory && !ReadsMemory) ||
F.onlyAccessesArgMemory()) {
639 F.removeFnAttr(Attribute::Memory);
645 if (
A.hasAttribute(Attribute::WriteOnly)) {
646 A.removeAttr(Attribute::WriteOnly);
654 F.addFnAttr(Attribute::NoBuiltin);
669 AccessSizeIndex(AccessSizeIndex), IsWrite(IsWrite),
670 CompileKernel(CompileKernel) {}
677 return std::max(32U, 1U << MappingScale);
696class RuntimeCallInserter {
698 bool TrackInsertedCalls =
false;
702 RuntimeCallInserter(
Function &Fn) : OwnerFn(&Fn) {
706 TrackInsertedCalls =
true;
710 ~RuntimeCallInserter() {
711 if (InsertedCalls.
empty())
713 assert(TrackInsertedCalls &&
"Calls were wrongly tracked");
716 for (
CallInst *CI : InsertedCalls) {
718 assert(BB &&
"Instruction doesn't belong to a BasicBlock");
720 "Instruction doesn't belong to the expected Function!");
728 if (Colors.
size() != 1) {
730 "Instruction's BasicBlock is not monochromatic");
737 if (EHPadIt != Color->end() && EHPadIt->isEHPad()) {
741 OB, CI->getIterator());
742 NewCall->copyMetadata(*CI);
743 CI->replaceAllUsesWith(NewCall);
744 CI->eraseFromParent();
755 if (TrackInsertedCalls)
756 InsertedCalls.push_back(Inst);
762struct AddressSanitizer {
764 int InstrumentationWithCallsThreshold,
765 uint32_t MaxInlinePoisoningSize,
bool CompileKernel =
false,
766 bool Recover =
false,
bool UseAfterScope =
false,
768 AsanDetectStackUseAfterReturnMode::Runtime)
777 InstrumentationWithCallsThreshold(
780 : InstrumentationWithCallsThreshold),
783 : MaxInlinePoisoningSize) {
784 C = &(
M.getContext());
785 DL = &
M.getDataLayout();
786 LongSize =
M.getDataLayout().getPointerSizeInBits();
788 PtrTy = PointerType::getUnqual(*C);
790 TargetTriple =
M.getTargetTriple();
794 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
802 bool isInterestingAlloca(
const AllocaInst &AI);
810 const DataLayout &DL, RuntimeCallInserter &RTCI);
811 void instrumentPointerComparisonOrSubtraction(
Instruction *
I,
812 RuntimeCallInserter &RTCI);
815 uint32_t TypeStoreSize,
bool IsWrite,
817 RuntimeCallInserter &RTCI);
820 uint32_t TypeStoreSize,
bool IsWrite,
821 Value *SizeArgument);
826 TypeSize TypeStoreSize,
bool IsWrite,
827 Value *SizeArgument,
bool UseCalls,
829 RuntimeCallInserter &RTCI);
830 void instrumentMaskedLoadOrStore(AddressSanitizer *
Pass,
const DataLayout &DL,
834 Type *OpType,
bool IsWrite,
835 Value *SizeArgument,
bool UseCalls,
836 uint32_t Exp, RuntimeCallInserter &RTCI);
840 bool IsWrite,
size_t AccessSizeIndex,
842 RuntimeCallInserter &RTCI);
843 void instrumentMemIntrinsic(
MemIntrinsic *
MI, RuntimeCallInserter &RTCI);
845 bool suppressInstrumentationSiteForDebug(
int &Instrumented);
847 bool maybeInsertAsanInitAtFunctionEntry(
Function &
F);
848 bool maybeInsertDynamicShadowAtFunctionEntry(
Function &
F);
849 void markEscapedLocalAllocas(
Function &
F);
852 friend struct FunctionStackPoisoner;
862 struct FunctionStateRAII {
863 AddressSanitizer *
Pass;
865 FunctionStateRAII(AddressSanitizer *
Pass) :
Pass(
Pass) {
867 "last pass forgot to clear cache");
871 ~FunctionStateRAII() {
872 Pass->LocalDynamicShadow =
nullptr;
873 Pass->ProcessedAllocas.clear();
889 ShadowMapping Mapping;
903 Value *LocalDynamicShadow =
nullptr;
909 int InstrumentationWithCallsThreshold;
913class ModuleAddressSanitizer {
915 ModuleAddressSanitizer(
Module &M,
bool InsertVersionCheck,
916 bool CompileKernel =
false,
bool Recover =
false,
917 bool UseGlobalsGC =
true,
bool UseOdrIndicator =
true,
925 : InsertVersionCheck),
927 UseGlobalsGC(UseGlobalsGC &&
ClUseGlobalsGC && !this->CompileKernel),
942 UseCtorComdat(UseGlobalsGC &&
ClWithComdat && !this->CompileKernel),
943 DestructorKind(DestructorKind),
947 C = &(
M.getContext());
948 int LongSize =
M.getDataLayout().getPointerSizeInBits();
950 PtrTy = PointerType::getUnqual(*C);
951 TargetTriple =
M.getTargetTriple();
956 assert(this->DestructorKind != AsanDtorKind::Invalid);
959 bool instrumentModule();
962 void initializeCallbacks();
964 void instrumentGlobals(
IRBuilder<> &IRB,
bool *CtorComdat);
971 const std::string &UniqueModuleId);
976 InstrumentGlobalsWithMetadataArray(
IRBuilder<> &IRB,
988 bool ShouldUseMachOGlobalsSection()
const;
989 StringRef getGlobalMetadataSection()
const;
990 void poisonOneInitializer(
Function &GlobalInit);
991 void createInitializerPoisonCalls();
992 uint64_t getMinRedzoneSizeForGlobal()
const {
996 int GetAsanVersion()
const;
1001 bool InsertVersionCheck;
1004 bool UsePrivateAlias;
1005 bool UseOdrIndicator;
1013 ShadowMapping Mapping;
1023 Function *AsanCtorFunction =
nullptr;
1024 Function *AsanDtorFunction =
nullptr;
1037struct FunctionStackPoisoner :
public InstVisitor<FunctionStackPoisoner> {
1039 AddressSanitizer &ASan;
1040 RuntimeCallInserter &RTCI;
1045 ShadowMapping Mapping;
1054 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1058 struct AllocaPoisonCall {
1072 bool HasInlineAsm =
false;
1073 bool HasReturnsTwiceCall =
false;
1076 FunctionStackPoisoner(
Function &F, AddressSanitizer &ASan,
1077 RuntimeCallInserter &RTCI)
1078 :
F(
F), ASan(ASan), RTCI(RTCI),
1080 IntptrTy(ASan.IntptrTy),
1082 Mapping(ASan.Mapping),
1090 copyArgsPassedByValToAllocas();
1095 if (AllocaVec.
empty() && DynamicAllocaVec.
empty())
return false;
1097 initializeCallbacks(*
F.getParent());
1099 processDynamicAllocas();
1100 processStaticAllocas();
1111 void copyArgsPassedByValToAllocas();
1116 void processStaticAllocas();
1117 void processDynamicAllocas();
1119 void createDynamicAllocasInitStorage();
1137 void unpoisonDynamicAllocasBeforeInst(
Instruction *InstBefore,
1138 Value *SavedStack) {
1145 if (!isa<ReturnInst>(InstBefore)) {
1147 Intrinsic::get_dynamic_area_offset, {IntptrTy}, {});
1153 RTCI.createRuntimeCall(
1154 IRB, AsanAllocasUnpoisonFunc,
1155 {IRB.
CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1159 void unpoisonDynamicAllocas() {
1161 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1163 for (
Instruction *StackRestoreInst : StackRestoreVec)
1164 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1165 StackRestoreInst->getOperand(0));
1178 void handleDynamicAllocaCall(
AllocaInst *AI);
1184 const auto *STy = dyn_cast<StructType>(AllocaType);
1185 if (!ASan.isInterestingAlloca(AI) || isa<ScalableVectorType>(AllocaType) ||
1186 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1190 if (AllocaVec.
empty())
1208 if (
ID == Intrinsic::stackrestore) StackRestoreVec.
push_back(&
II);
1209 if (
ID == Intrinsic::localescape) LocalEscapeCall = &
II;
1210 if (!ASan.UseAfterScope)
1212 if (!
II.isLifetimeStartOrEnd())
1215 AllocaInst *AI = dyn_cast<AllocaInst>(
II.getArgOperand(0));
1217 if (!AI || !ASan.isInterestingAlloca(*AI))
1227 bool DoPoison = (
ID == Intrinsic::lifetime_end);
1228 AllocaPoisonCall APC = {&
II, AI, *
Size, DoPoison};
1230 StaticAllocaPoisonCallVec.
push_back(APC);
1232 DynamicAllocaPoisonCallVec.
push_back(APC);
1236 if (
CallInst *CI = dyn_cast<CallInst>(&CB)) {
1237 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1238 HasReturnsTwiceCall |= CI->canReturnTwice();
1243 void initializeCallbacks(
Module &M);
1270 OS, MapClassName2PassName);
1275 OS <<
"use-after-scope";
1284 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1285 ConstructorKind(ConstructorKind) {}
1294 ModuleAddressSanitizer ModuleSanitizer(
1296 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1308 if (
F.getName().starts_with(
"__asan_"))
1310 if (
F.isPresplitCoroutine())
1312 AddressSanitizer FunctionSanitizer(
1317 Modified |= FunctionSanitizer.instrumentFunction(
F, &TLI);
1319 Modified |= ModuleSanitizer.instrumentModule();
1340 if (
G->getName().starts_with(
"llvm.") ||
1342 G->getName().starts_with(
"__llvm_gcov_ctr") ||
1344 G->getName().starts_with(
"__llvm_rtti_proxy"))
1357 Type *PtrTy = cast<PointerType>(
Addr->getType()->getScalarType());
1359 if (AddrSpace == 3 || AddrSpace == 5)
1366 Shadow = IRB.
CreateLShr(Shadow, Mapping.Scale);
1367 if (Mapping.Offset == 0)
return Shadow;
1370 if (LocalDynamicShadow)
1371 ShadowBase = LocalDynamicShadow;
1373 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1374 if (Mapping.OrShadowOffset)
1375 return IRB.
CreateOr(Shadow, ShadowBase);
1377 return IRB.
CreateAdd(Shadow, ShadowBase);
1382 RuntimeCallInserter &RTCI) {
1384 if (isa<MemTransferInst>(
MI)) {
1385 RTCI.createRuntimeCall(
1386 IRB, isa<MemMoveInst>(
MI) ? AsanMemmove : AsanMemcpy,
1390 }
else if (isa<MemSetInst>(
MI)) {
1391 RTCI.createRuntimeCall(
1397 MI->eraseFromParent();
1401bool AddressSanitizer::isInterestingAlloca(
const AllocaInst &AI) {
1402 auto [It,
Inserted] = ProcessedAllocas.try_emplace(&AI);
1405 return It->getSecond();
1407 bool IsInteresting =
1420 !(SSGI && SSGI->
isSafe(AI)));
1422 It->second = IsInteresting;
1423 return IsInteresting;
1428 Type *PtrTy = cast<PointerType>(
Ptr->getType()->getScalarType());
1437 if (
Ptr->isSwiftError())
1443 if (
auto AI = dyn_cast_or_null<AllocaInst>(
Ptr))
1454void AddressSanitizer::getInterestingMemoryOperands(
1457 if (LocalDynamicShadow ==
I)
1460 if (
LoadInst *LI = dyn_cast<LoadInst>(
I)) {
1463 Interesting.
emplace_back(
I, LI->getPointerOperandIndex(),
false,
1464 LI->getType(), LI->getAlign());
1465 }
else if (
StoreInst *SI = dyn_cast<StoreInst>(
I)) {
1469 SI->getValueOperand()->getType(),
SI->getAlign());
1473 Interesting.
emplace_back(
I, RMW->getPointerOperandIndex(),
true,
1474 RMW->getValOperand()->getType(), std::nullopt);
1478 Interesting.
emplace_back(
I, XCHG->getPointerOperandIndex(),
true,
1479 XCHG->getCompareOperand()->getType(),
1481 }
else if (
auto CI = dyn_cast<CallInst>(
I)) {
1482 switch (CI->getIntrinsicID()) {
1483 case Intrinsic::masked_load:
1484 case Intrinsic::masked_store:
1485 case Intrinsic::masked_gather:
1486 case Intrinsic::masked_scatter: {
1487 bool IsWrite = CI->getType()->isVoidTy();
1489 unsigned OpOffset = IsWrite ? 1 : 0;
1493 auto BasePtr = CI->getOperand(OpOffset);
1494 if (ignoreAccess(
I, BasePtr))
1496 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1499 if (
auto *
Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
1500 Alignment =
Op->getMaybeAlignValue();
1501 Value *
Mask = CI->getOperand(2 + OpOffset);
1502 Interesting.
emplace_back(
I, OpOffset, IsWrite, Ty, Alignment, Mask);
1505 case Intrinsic::masked_expandload:
1506 case Intrinsic::masked_compressstore: {
1507 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1508 unsigned OpOffset = IsWrite ? 1 : 0;
1511 auto BasePtr = CI->getOperand(OpOffset);
1512 if (ignoreAccess(
I, BasePtr))
1515 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1518 Value *
Mask = CI->getOperand(1 + OpOffset);
1521 Value *ExtMask =
IB.CreateZExt(Mask, ExtTy);
1522 Value *EVL =
IB.CreateAddReduce(ExtMask);
1523 Value *TrueMask = ConstantInt::get(
Mask->getType(), 1);
1524 Interesting.
emplace_back(
I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1528 case Intrinsic::vp_load:
1529 case Intrinsic::vp_store:
1530 case Intrinsic::experimental_vp_strided_load:
1531 case Intrinsic::experimental_vp_strided_store: {
1532 auto *VPI = cast<VPIntrinsic>(CI);
1533 unsigned IID = CI->getIntrinsicID();
1534 bool IsWrite = CI->getType()->isVoidTy();
1537 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1538 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1539 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*
DL);
1540 Value *Stride =
nullptr;
1541 if (IID == Intrinsic::experimental_vp_strided_store ||
1542 IID == Intrinsic::experimental_vp_strided_load) {
1543 Stride = VPI->getOperand(PtrOpNo + 1);
1548 if (!isa<ConstantInt>(Stride) ||
1549 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
1550 Alignment =
Align(1);
1552 Interesting.
emplace_back(
I, PtrOpNo, IsWrite, Ty, Alignment,
1553 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1557 case Intrinsic::vp_gather:
1558 case Intrinsic::vp_scatter: {
1559 auto *VPI = cast<VPIntrinsic>(CI);
1560 unsigned IID = CI->getIntrinsicID();
1561 bool IsWrite = IID == Intrinsic::vp_scatter;
1564 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1565 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1566 MaybeAlign Alignment = VPI->getPointerAlignment();
1567 Interesting.
emplace_back(
I, PtrOpNo, IsWrite, Ty, Alignment,
1568 VPI->getMaskParam(),
1569 VPI->getVectorLengthParam());
1573 for (
unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1575 ignoreAccess(
I, CI->getArgOperand(ArgNo)))
1577 Type *Ty = CI->getParamByValType(ArgNo);
1585 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1592 if (
ICmpInst *Cmp = dyn_cast<ICmpInst>(
I)) {
1593 if (!Cmp->isRelational())
1607 if (BO->getOpcode() != Instruction::Sub)
1620 if (!
G->hasInitializer())
1623 if (
G->hasSanitizerMetadata() &&
G->getSanitizerMetadata().IsDynInit)
1629void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1632 FunctionCallee F = isa<ICmpInst>(
I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1633 Value *
Param[2] = {
I->getOperand(0),
I->getOperand(1)};
1634 for (
Value *&i : Param) {
1635 if (i->getType()->isPointerTy())
1638 RTCI.createRuntimeCall(IRB,
F, Param);
1644 TypeSize TypeStoreSize,
bool IsWrite,
1645 Value *SizeArgument,
bool UseCalls,
1646 uint32_t Exp, RuntimeCallInserter &RTCI) {
1651 switch (FixedSize) {
1657 if (!Alignment || *Alignment >= Granularity ||
1658 *Alignment >= FixedSize / 8)
1659 return Pass->instrumentAddress(
I, InsertBefore,
Addr, Alignment,
1660 FixedSize, IsWrite,
nullptr, UseCalls,
1664 Pass->instrumentUnusualSizeOrAlignment(
I, InsertBefore,
Addr, TypeStoreSize,
1665 IsWrite,
nullptr, UseCalls, Exp, RTCI);
1668void AddressSanitizer::instrumentMaskedLoadOrStore(
1671 MaybeAlign Alignment,
unsigned Granularity,
Type *OpType,
bool IsWrite,
1673 RuntimeCallInserter &RTCI) {
1674 auto *VTy = cast<VectorType>(OpType);
1675 TypeSize ElemTypeSize =
DL.getTypeStoreSizeInBits(VTy->getScalarType());
1676 auto Zero = ConstantInt::get(IntptrTy, 0);
1684 Value *IsEVLZero =
IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1686 IB.SetInsertPoint(LoopInsertBefore);
1688 EVL =
IB.CreateZExtOrTrunc(EVL, IntptrTy);
1691 Value *
EC =
IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1692 EVL =
IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1694 EVL =
IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1699 Stride =
IB.CreateZExtOrTrunc(Stride, IntptrTy);
1703 Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1704 if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1705 if (MaskElemC->isZero())
1711 Instruction *ThenTerm = SplitBlockAndInsertIfThen(
1712 MaskElem, &*IRB.GetInsertPoint(), false);
1713 IRB.SetInsertPoint(ThenTerm);
1716 Value *InstrumentedAddress;
1717 if (isa<VectorType>(
Addr->getType())) {
1719 cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1720 "Expected vector of pointer.");
1721 InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1722 }
else if (Stride) {
1729 Alignment, Granularity, ElemTypeSize, IsWrite,
1730 SizeArgument, UseCalls, Exp, RTCI);
1737 RuntimeCallInserter &RTCI) {
1758 isSafeAccess(ObjSizeVis,
Addr,
O.TypeStoreSize)) {
1759 NumOptimizedAccessesToGlobalVar++;
1767 isSafeAccess(ObjSizeVis,
Addr,
O.TypeStoreSize)) {
1768 NumOptimizedAccessesToStackVar++;
1774 NumInstrumentedWrites++;
1776 NumInstrumentedReads++;
1778 unsigned Granularity = 1 << Mapping.Scale;
1780 instrumentMaskedLoadOrStore(
this,
DL, IntptrTy,
O.MaybeMask,
O.MaybeEVL,
1781 O.MaybeStride,
O.getInsn(),
Addr,
O.Alignment,
1782 Granularity,
O.OpType,
O.IsWrite,
nullptr,
1783 UseCalls, Exp, RTCI);
1786 Granularity,
O.TypeStoreSize,
O.IsWrite,
nullptr,
1787 UseCalls, Exp, RTCI);
1793 size_t AccessSizeIndex,
1794 Value *SizeArgument,
1796 RuntimeCallInserter &RTCI) {
1802 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1803 {
Addr, SizeArgument});
1805 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1806 {
Addr, SizeArgument, ExpVal});
1809 Call = RTCI.createRuntimeCall(
1810 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex],
Addr);
1812 Call = RTCI.createRuntimeCall(
1813 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {
Addr, ExpVal});
1816 Call->setCannotMerge();
1823 size_t Granularity =
static_cast<size_t>(1) << Mapping.Scale;
1825 Value *LastAccessedByte =
1826 IRB.
CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1828 if (TypeStoreSize / 8 > 1)
1830 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1838Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1840 uint32_t TypeStoreSize,
bool IsWrite,
Value *SizeArgument) {
1844 Type *PtrTy = cast<PointerType>(
Addr->getType()->getScalarType());
1847 return InsertBefore;
1852 Value *IsSharedOrPrivate = IRB.
CreateOr(IsShared, IsPrivate);
1854 Value *AddrSpaceZeroLanding =
1856 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1857 return InsertBefore;
1873 Trm->getParent()->setName(
"asan.report");
1884void AddressSanitizer::instrumentAddress(
Instruction *OrigIns,
1887 uint32_t TypeStoreSize,
bool IsWrite,
1888 Value *SizeArgument,
bool UseCalls,
1890 RuntimeCallInserter &RTCI) {
1891 if (TargetTriple.isAMDGPU()) {
1892 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore,
Addr,
1893 TypeStoreSize, IsWrite, SizeArgument);
1902 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1905 ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1912 RTCI.createRuntimeCall(
1913 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1915 RTCI.createRuntimeCall(
1916 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1917 {AddrLong, ConstantInt::get(IRB.
getInt32Ty(), Exp)});
1924 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1926 std::max<uint64_t>(Alignment.
valueOrOne().
value() >> Mapping.Scale, 1);
1931 size_t Granularity = 1ULL << Mapping.Scale;
1934 bool GenSlowPath = (
ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1936 if (TargetTriple.isAMDGCN()) {
1938 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1941 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1942 }
else if (GenSlowPath) {
1947 assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1950 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1965 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
1974void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1976 TypeSize TypeStoreSize,
bool IsWrite,
Value *SizeArgument,
bool UseCalls,
1977 uint32_t Exp, RuntimeCallInserter &RTCI) {
1985 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
1988 RTCI.createRuntimeCall(
1989 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
2003void ModuleAddressSanitizer::poisonOneInitializer(
Function &GlobalInit) {
2009 Value *ModuleNameAddr =
2011 IRB.
CreateCall(AsanPoisonGlobals, ModuleNameAddr);
2014 for (
auto &BB : GlobalInit)
2019void ModuleAddressSanitizer::createInitializerPoisonCalls() {
2029 if (isa<ConstantAggregateZero>(
OP))
continue;
2035 auto *Priority = cast<ConstantInt>(CS->
getOperand(0));
2039 poisonOneInitializer(*
F);
2045ModuleAddressSanitizer::getExcludedAliasedGlobal(
const GlobalAlias &GA)
const {
2050 assert(CompileKernel &&
"Only expecting to be called when compiling kernel");
2057 return dyn_cast<GlobalVariable>(
C->stripPointerCastsAndAliases());
2062bool ModuleAddressSanitizer::shouldInstrumentGlobal(
GlobalVariable *
G)
const {
2063 Type *Ty =
G->getValueType();
2066 if (
G->hasSanitizerMetadata() &&
G->getSanitizerMetadata().NoAddress)
2068 if (!Ty->
isSized())
return false;
2069 if (!
G->hasInitializer())
return false;
2071 if (
G->getAddressSpace() &&
2078 if (
G->isThreadLocal())
return false;
2080 if (
G->getAlign() && *
G->getAlign() > getMinRedzoneSizeForGlobal())
return false;
2086 if (!TargetTriple.isOSBinFormatCOFF()) {
2087 if (!
G->hasExactDefinition() ||
G->hasComdat())
2091 if (
G->isInterposable())
2095 if (
G->hasAvailableExternallyLinkage())
2102 switch (
C->getSelectionKind()) {
2113 if (
G->hasSection()) {
2123 if (Section ==
"llvm.metadata")
return false;
2130 if (
Section.starts_with(
".preinit_array") ||
2131 Section.starts_with(
".init_array") ||
2132 Section.starts_with(
".fini_array")) {
2138 if (TargetTriple.isOSBinFormatELF()) {
2140 [](
char c) {
return llvm::isAlnum(c) || c ==
'_'; }))
2152 if (TargetTriple.isOSBinFormatCOFF() &&
Section.contains(
'$')) {
2153 LLVM_DEBUG(
dbgs() <<
"Ignoring global in sorted section (contains '$'): "
2158 if (TargetTriple.isOSBinFormatMachO()) {
2160 unsigned TAA = 0, StubSize = 0;
2163 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2168 if (ParsedSegment ==
"__OBJC" ||
2169 (ParsedSegment ==
"__DATA" && ParsedSection.
starts_with(
"__objc_"))) {
2181 if (ParsedSegment ==
"__DATA" && ParsedSection ==
"__cfstring") {
2194 if (CompileKernel) {
2197 if (
G->getName().starts_with(
"__"))
2207bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection()
const {
2208 if (!TargetTriple.isOSBinFormatMachO())
2211 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2213 if (TargetTriple.isiOS() && !TargetTriple.isOSVersionLT(9))
2215 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2217 if (TargetTriple.isDriverKit())
2219 if (TargetTriple.isXROS())
2225StringRef ModuleAddressSanitizer::getGlobalMetadataSection()
const {
2226 switch (TargetTriple.getObjectFormat()) {
2236 "ModuleAddressSanitizer not implemented for object file format");
2243void ModuleAddressSanitizer::initializeCallbacks() {
2249 AsanUnpoisonGlobals =
2253 AsanRegisterGlobals =
M.getOrInsertFunction(
2255 AsanUnregisterGlobals =
M.getOrInsertFunction(
2260 AsanRegisterImageGlobals =
M.getOrInsertFunction(
2262 AsanUnregisterImageGlobals =
M.getOrInsertFunction(
2265 AsanRegisterElfGlobals =
2267 IntptrTy, IntptrTy, IntptrTy);
2268 AsanUnregisterElfGlobals =
2270 IntptrTy, IntptrTy, IntptrTy);
2275void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2280 if (!
G->hasName()) {
2284 G->setName(
genName(
"anon_global"));
2287 if (!InternalSuffix.
empty() &&
G->hasLocalLinkage()) {
2288 std::string
Name = std::string(
G->getName());
2289 Name += InternalSuffix;
2290 C =
M.getOrInsertComdat(
Name);
2292 C =
M.getOrInsertComdat(
G->getName());
2298 if (TargetTriple.isOSBinFormatCOFF()) {
2300 if (
G->hasPrivateLinkage())
2313ModuleAddressSanitizer::CreateMetadataGlobal(
Constant *Initializer,
2315 auto Linkage = TargetTriple.isOSBinFormatMachO()
2319 M, Initializer->
getType(),
false, Linkage, Initializer,
2321 Metadata->setSection(getGlobalMetadataSection());
2328Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor() {
2332 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2340void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2344 auto &
DL =
M.getDataLayout();
2347 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2348 Constant *Initializer = MetadataInitializers[i];
2352 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2358 unsigned SizeOfGlobalStruct =
DL.getTypeAllocSize(Initializer->
getType());
2360 "global metadata will not be padded appropriately");
2363 SetComdatForGlobalMetadata(
G,
Metadata,
"");
2368 if (!MetadataGlobals.empty())
2372void ModuleAddressSanitizer::instrumentGlobalsELF(
2375 const std::string &UniqueModuleId) {
2382 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2385 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2388 CreateMetadataGlobal(MetadataInitializers[i],
G->getName());
2390 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2393 if (UseComdatForGlobalsGC)
2394 SetComdatForGlobalMetadata(
G,
Metadata, UniqueModuleId);
2399 if (!MetadataGlobals.empty())
2416 "__start_" + getGlobalMetadataSection());
2420 "__stop_" + getGlobalMetadataSection());
2434 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2441void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2452 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2453 Constant *Initializer = MetadataInitializers[i];
2459 auto LivenessBinder =
2464 Twine(
"__asan_binder_") +
G->getName());
2465 Liveness->
setSection(
"__DATA,__asan_liveness,regular,live_support");
2466 LivenessGlobals[i] = Liveness;
2473 if (!LivenessGlobals.empty())
2495 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2500void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2504 unsigned N = ExtendedGlobals.
size();
2514 if (Mapping.Scale > 3)
2515 AllGlobals->setAlignment(
Align(1ULL << Mapping.Scale));
2520 ConstantInt::get(IntptrTy,
N)});
2526 IrbDtor.CreateCall(AsanUnregisterGlobals,
2528 ConstantInt::get(IntptrTy,
N)});
2537void ModuleAddressSanitizer::instrumentGlobals(
IRBuilder<> &IRB,
2542 if (CompileKernel) {
2543 for (
auto &GA :
M.aliases()) {
2545 AliasedGlobalExclusions.
insert(GV);
2550 for (
auto &
G :
M.globals()) {
2551 if (!AliasedGlobalExclusions.
count(&
G) && shouldInstrumentGlobal(&
G))
2555 size_t n = GlobalsToChange.
size();
2556 auto &
DL =
M.getDataLayout();
2570 IntptrTy, IntptrTy, IntptrTy);
2574 for (
size_t i = 0; i < n; i++) {
2578 if (
G->hasSanitizerMetadata())
2579 MD =
G->getSanitizerMetadata();
2584 std::string NameForGlobal =
G->getName().str();
2589 Type *Ty =
G->getValueType();
2590 const uint64_t SizeInBytes =
DL.getTypeAllocSize(Ty);
2603 M, NewTy,
G->isConstant(), Linkage, NewInitializer,
"",
G,
2604 G->getThreadLocalMode(),
G->getAddressSpace());
2614 if (TargetTriple.isOSBinFormatMachO() && !
G->hasSection() &&
2616 auto Seq = dyn_cast<ConstantDataSequential>(
G->getInitializer());
2617 if (Seq && Seq->isCString())
2618 NewGlobal->
setSection(
"__TEXT,__asan_cstring,regular");
2629 G->replaceAllUsesWith(
2632 G->eraseFromParent();
2633 NewGlobals[i] = NewGlobal;
2638 bool CanUsePrivateAliases =
2639 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2640 TargetTriple.isOSBinFormatWasm();
2641 if (CanUsePrivateAliases && UsePrivateAlias) {
2644 InstrumentedGlobal =
2652 }
else if (UseOdrIndicator) {
2655 auto *ODRIndicatorSym =
2664 ODRIndicatorSym->setAlignment(
Align(1));
2665 ODRIndicator = ODRIndicatorSym;
2671 ConstantInt::get(IntptrTy, SizeInBytes),
2672 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2675 ConstantInt::get(IntptrTy, MD.
IsDynInit),
2681 Initializers[i] = Initializer;
2687 for (
size_t i = 0; i < n; i++) {
2689 if (
G->getName().empty())
continue;
2694 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2701 }
else if (n == 0) {
2704 *CtorComdat = TargetTriple.isOSBinFormatELF();
2706 *CtorComdat =
false;
2707 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2708 InstrumentGlobalsCOFF(IRB, NewGlobals, Initializers);
2709 }
else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2710 InstrumentGlobalsMachO(IRB, NewGlobals, Initializers);
2712 InstrumentGlobalsWithMetadataArray(IRB, NewGlobals, Initializers);
2718 createInitializerPoisonCalls();
2724ModuleAddressSanitizer::getRedzoneSizeForGlobal(
uint64_t SizeInBytes)
const {
2725 constexpr uint64_t kMaxRZ = 1 << 18;
2726 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2729 if (SizeInBytes <= MinRZ / 2) {
2733 RZ = MinRZ - SizeInBytes;
2736 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2739 if (SizeInBytes % MinRZ)
2740 RZ += MinRZ - (SizeInBytes % MinRZ);
2743 assert((RZ + SizeInBytes) % MinRZ == 0);
2748int ModuleAddressSanitizer::GetAsanVersion()
const {
2749 int LongSize =
M.getDataLayout().getPointerSizeInBits();
2750 bool isAndroid =
M.getTargetTriple().isAndroid();
2754 Version += (LongSize == 32 && isAndroid);
2769bool ModuleAddressSanitizer::instrumentModule() {
2770 initializeCallbacks();
2778 if (CompileKernel) {
2783 std::string AsanVersion = std::to_string(GetAsanVersion());
2784 std::string VersionCheckName =
2786 std::tie(AsanCtorFunction, std::ignore) =
2789 {}, VersionCheckName);
2793 bool CtorComdat =
true;
2796 if (AsanCtorFunction) {
2797 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2798 instrumentGlobals(IRB, &CtorComdat);
2801 instrumentGlobals(IRB, &CtorComdat);
2810 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2811 if (AsanCtorFunction) {
2815 if (AsanDtorFunction) {
2820 if (AsanCtorFunction)
2822 if (AsanDtorFunction)
2833 for (
int Exp = 0;
Exp < 2;
Exp++) {
2834 for (
size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2835 const std::string TypeStr = AccessIsWrite ?
"store" :
"load";
2836 const std::string ExpStr =
Exp ?
"exp_" :
"";
2837 const std::string EndingStr = Recover ?
"_noabort" :
"";
2846 Args1.push_back(ExpType);
2847 if (
auto AK = TLI->getExtAttrForI32Param(
false)) {
2852 AsanErrorCallbackSized[AccessIsWrite][
Exp] =
M.getOrInsertFunction(
2856 AsanMemoryAccessCallbackSized[AccessIsWrite][
Exp] =
M.getOrInsertFunction(
2861 AccessSizeIndex++) {
2862 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2863 AsanErrorCallback[AccessIsWrite][
Exp][AccessSizeIndex] =
2864 M.getOrInsertFunction(
2868 AsanMemoryAccessCallback[AccessIsWrite][
Exp][AccessSizeIndex] =
2869 M.getOrInsertFunction(
2876 const std::string MemIntrinCallbackPrefix =
2880 AsanMemmove =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memmove",
2881 PtrTy, PtrTy, PtrTy, IntptrTy);
2882 AsanMemcpy =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memcpy", PtrTy,
2883 PtrTy, PtrTy, IntptrTy);
2884 AsanMemset =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memset",
2888 AsanHandleNoReturnFunc =
2891 AsanPtrCmpFunction =
2893 AsanPtrSubFunction =
2895 if (Mapping.InGlobal)
2896 AsanShadowGlobal =
M.getOrInsertGlobal(
"__asan_shadow",
2899 AMDGPUAddressShared =
2901 AMDGPUAddressPrivate =
2905bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(
Function &
F) {
2913 if (
F.getName().contains(
" load]")) {
2923bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(
Function &
F) {
2929 if (Mapping.InGlobal) {
2937 LocalDynamicShadow =
2938 IRB.
CreateCall(Asm, {AsanShadowGlobal},
".asan.shadow");
2940 LocalDynamicShadow =
2944 Value *GlobalDynamicAddress =
F.getParent()->getOrInsertGlobal(
2946 LocalDynamicShadow = IRB.
CreateLoad(IntptrTy, GlobalDynamicAddress);
2951void AddressSanitizer::markEscapedLocalAllocas(
Function &
F) {
2956 assert(ProcessedAllocas.empty() &&
"must process localescape before allocas");
2960 if (!
F.getParent()->getFunction(
"llvm.localescape"))
return;
2966 if (
II &&
II->getIntrinsicID() == Intrinsic::localescape) {
2968 for (
Value *Arg :
II->args()) {
2969 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
2971 "non-static alloca arg to localescape");
2972 ProcessedAllocas[AI] =
false;
2979bool AddressSanitizer::suppressInstrumentationSiteForDebug(
int &Instrumented) {
2980 bool ShouldInstrument =
2984 return !ShouldInstrument;
2987bool AddressSanitizer::instrumentFunction(
Function &
F,
2989 bool FunctionModified =
false;
2992 if (
F.hasFnAttribute(Attribute::Naked))
2993 return FunctionModified;
2998 if (maybeInsertAsanInitAtFunctionEntry(
F))
2999 FunctionModified =
true;
3002 if (!
F.hasFnAttribute(Attribute::SanitizeAddress))
return FunctionModified;
3004 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
3005 return FunctionModified;
3009 initializeCallbacks(TLI);
3011 FunctionStateRAII CleanupObj(
this);
3013 RuntimeCallInserter RTCI(
F);
3015 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(
F);
3019 markEscapedLocalAllocas(
F);
3031 for (
auto &BB :
F) {
3033 TempsToInstrument.
clear();
3034 int NumInsnsPerBB = 0;
3035 for (
auto &Inst : BB) {
3036 if (LooksLikeCodeInBug11395(&Inst))
return false;
3043 if (!InterestingOperands.
empty()) {
3044 for (
auto &Operand : InterestingOperands) {
3050 if (Operand.MaybeMask) {
3054 if (!TempsToInstrument.
insert(
Ptr).second)
3058 OperandsToInstrument.
push_back(Operand);
3065 PointerComparisonsOrSubtracts.
push_back(&Inst);
3071 if (
auto *CB = dyn_cast<CallBase>(&Inst)) {
3073 TempsToInstrument.
clear();
3077 if (
CallInst *CI = dyn_cast<CallInst>(&Inst))
3084 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3085 OperandsToInstrument.
size() + IntrinToInstrument.
size() >
3086 (
unsigned)InstrumentationWithCallsThreshold);
3091 int NumInstrumented = 0;
3092 for (
auto &Operand : OperandsToInstrument) {
3093 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3094 instrumentMop(ObjSizeVis, Operand, UseCalls,
3095 F.getDataLayout(), RTCI);
3096 FunctionModified =
true;
3098 for (
auto *Inst : IntrinToInstrument) {
3099 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3100 instrumentMemIntrinsic(Inst, RTCI);
3101 FunctionModified =
true;
3104 FunctionStackPoisoner FSP(
F, *
this, RTCI);
3105 bool ChangedStack = FSP.runOnFunction();
3109 for (
auto *CI : NoReturnCalls) {
3111 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3114 for (
auto *Inst : PointerComparisonsOrSubtracts) {
3115 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3116 FunctionModified =
true;
3119 if (ChangedStack || !NoReturnCalls.empty())
3120 FunctionModified =
true;
3122 LLVM_DEBUG(
dbgs() <<
"ASAN done instrumenting: " << FunctionModified <<
" "
3125 return FunctionModified;
3131bool AddressSanitizer::LooksLikeCodeInBug11395(
Instruction *
I) {
3132 if (LongSize != 32)
return false;
3141void FunctionStackPoisoner::initializeCallbacks(
Module &M) {
3145 const char *MallocNameTemplate =
3150 std::string Suffix = itostr(Index);
3151 AsanStackMallocFunc[
Index] =
M.getOrInsertFunction(
3152 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3153 AsanStackFreeFunc[
Index] =
3158 if (ASan.UseAfterScope) {
3159 AsanPoisonStackMemoryFunc =
M.getOrInsertFunction(
3161 AsanUnpoisonStackMemoryFunc =
M.getOrInsertFunction(
3165 for (
size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3166 0xf3, 0xf5, 0xf8}) {
3167 std::ostringstream
Name;
3169 Name << std::setw(2) << std::setfill(
'0') << std::hex << Val;
3170 AsanSetShadowFunc[Val] =
3171 M.getOrInsertFunction(
Name.str(), IRB.
getVoidTy(), IntptrTy, IntptrTy);
3174 AsanAllocaPoisonFunc =
M.getOrInsertFunction(
3176 AsanAllocasUnpoisonFunc =
M.getOrInsertFunction(
3182 size_t Begin,
size_t End,
3184 Value *ShadowBase) {
3188 const size_t LargestStoreSizeInBytes =
3189 std::min<size_t>(
sizeof(
uint64_t), ASan.LongSize / 8);
3191 const bool IsLittleEndian =
F.getDataLayout().isLittleEndian();
3197 for (
size_t i = Begin; i <
End;) {
3198 if (!ShadowMask[i]) {
3204 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3206 while (StoreSizeInBytes >
End - i)
3207 StoreSizeInBytes /= 2;
3210 for (
size_t j = StoreSizeInBytes - 1;
j && !ShadowMask[i +
j]; --
j) {
3211 while (j <= StoreSizeInBytes / 2)
3212 StoreSizeInBytes /= 2;
3216 for (
size_t j = 0;
j < StoreSizeInBytes;
j++) {
3218 Val |= (
uint64_t)ShadowBytes[i + j] << (8 * j);
3220 Val = (Val << 8) | ShadowBytes[i + j];
3229 i += StoreSizeInBytes;
3236 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.
size(), IRB, ShadowBase);
3241 size_t Begin,
size_t End,
3244 size_t Done = Begin;
3245 for (
size_t i = Begin, j = Begin + 1; i <
End; i =
j++) {
3246 if (!ShadowMask[i]) {
3251 if (!AsanSetShadowFunc[Val])
3255 for (;
j <
End && ShadowMask[
j] && Val == ShadowBytes[
j]; ++
j) {
3258 if (j - i >= ASan.MaxInlinePoisoningSize) {
3259 copyToShadowInline(ShadowMask, ShadowBytes,
Done, i, IRB, ShadowBase);
3260 RTCI.createRuntimeCall(
3261 IRB, AsanSetShadowFunc[Val],
3262 {IRB.
CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3263 ConstantInt::get(IntptrTy, j - i)});
3268 copyToShadowInline(ShadowMask, ShadowBytes,
Done,
End, IRB, ShadowBase);
3276 for (
int i = 0;; i++, MaxSize *= 2)
3277 if (LocalStackSize <= MaxSize)
return i;
3281void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3283 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3291 if (Arg.hasByValAttr()) {
3292 Type *Ty = Arg.getParamByValType();
3293 const Align Alignment =
3294 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3298 (Arg.hasName() ? Arg.getName() :
"Arg" +
Twine(Arg.getArgNo())) +
3301 Arg.replaceAllUsesWith(AI);
3303 uint64_t AllocSize =
DL.getTypeAllocSize(Ty);
3304 IRB.
CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3312 Value *ValueIfFalse) {
3315 PHI->addIncoming(ValueIfFalse, CondBlock);
3317 PHI->addIncoming(ValueIfTrue, ThenBlock);
3321Value *FunctionStackPoisoner::createAllocaForLayout(
3330 nullptr,
"MyAlloca");
3339void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3342 DynamicAllocaLayout = IRB.
CreateAlloca(IntptrTy,
nullptr);
3347void FunctionStackPoisoner::processDynamicAllocas() {
3354 for (
const auto &APC : DynamicAllocaPoisonCallVec) {
3357 assert(ASan.isInterestingAlloca(*APC.AI));
3358 assert(!APC.AI->isStaticAlloca());
3361 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3368 createDynamicAllocasInitStorage();
3369 for (
auto &AI : DynamicAllocaVec)
3370 handleDynamicAllocaCall(AI);
3371 unpoisonDynamicAllocas();
3383 for (
Instruction *It = Start; It; It = It->getNextNode()) {
3393 if (isa<AllocaInst>(It) || isa<CastInst>(It))
3395 if (
auto *Store = dyn_cast<StoreInst>(It)) {
3399 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3400 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3403 Value *Val = Store->getValueOperand();
3404 bool IsDirectArgInit = isa<Argument>(Val);
3405 bool IsArgInitViaCast =
3406 isa<CastInst>(Val) &&
3407 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3410 Val == It->getPrevNode();
3411 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3415 if (IsArgInitViaCast)
3416 InitInsts.
push_back(cast<Instruction>(Val));
3430 if (AI->
hasMetadata(LLVMContext::MD_annotation)) {
3432 cast<MDTuple>(AI->
getMetadata(LLVMContext::MD_annotation));
3433 for (
auto &Annotation : AllocaAnnotations->
operands()) {
3434 if (!isa<MDTuple>(Annotation))
3436 auto AnnotationTuple = cast<MDTuple>(Annotation);
3437 for (
unsigned Index = 0; Index < AnnotationTuple->getNumOperands();
3440 auto MetadataString =
3441 cast<MDString>(AnnotationTuple->getOperand(Index));
3442 if (MetadataString->getString() ==
"alloca_name_altered")
3443 return cast<MDString>(AnnotationTuple->getOperand(Index + 1))
3451void FunctionStackPoisoner::processStaticAllocas() {
3452 if (AllocaVec.
empty()) {
3457 int StackMallocIdx = -1;
3459 if (
auto SP =
F.getSubprogram())
3460 EntryDebugLocation =
3469 auto InsBeforeB = InsBefore->
getParent();
3470 assert(InsBeforeB == &
F.getEntryBlock());
3471 for (
auto *AI : StaticAllocasToMoveUp)
3482 ArgInitInst->moveBefore(InsBefore->
getIterator());
3485 if (LocalEscapeCall)
3493 ASan.getAllocaSizeInBytes(*AI),
3504 uint64_t Granularity = 1ULL << Mapping.Scale;
3505 uint64_t MinHeaderSize = std::max((
uint64_t)ASan.LongSize / 2, Granularity);
3511 for (
auto &
Desc : SVD)
3515 for (
const auto &APC : StaticAllocaPoisonCallVec) {
3518 assert(ASan.isInterestingAlloca(*APC.AI));
3519 assert(APC.AI->isStaticAlloca());
3524 if (
const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3525 if (LifetimeLoc->getFile() == FnLoc->getFile())
3526 if (
unsigned Line = LifetimeLoc->getLine())
3527 Desc.Line = std::min(
Desc.Line ?
Desc.Line : Line, Line);
3533 LLVM_DEBUG(
dbgs() << DescriptionString <<
" --- " <<
L.FrameSize <<
"\n");
3535 bool DoStackMalloc =
3545 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3546 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3548 Value *StaticAlloca =
3549 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L,
false);
3552 Value *LocalStackBase;
3553 Value *LocalStackBaseAlloca;
3556 if (DoStackMalloc) {
3557 LocalStackBaseAlloca =
3558 IRB.
CreateAlloca(IntptrTy,
nullptr,
"asan_local_stack_base");
3565 Constant *OptionDetectUseAfterReturn =
F.getParent()->getOrInsertGlobal(
3575 Value *FakeStackValue =
3576 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3577 ConstantInt::get(IntptrTy, LocalStackSize));
3579 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3580 ConstantInt::get(IntptrTy, 0));
3588 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3589 ConstantInt::get(IntptrTy, LocalStackSize));
3591 Value *NoFakeStack =
3596 Value *AllocaValue =
3597 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L,
true) : StaticAlloca;
3600 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3601 IRB.
CreateStore(LocalStackBase, LocalStackBaseAlloca);
3606 FakeStack = ConstantInt::get(IntptrTy, 0);
3608 DoDynamicAlloca ? createAllocaForLayout(IRB, L,
true) : StaticAlloca;
3609 LocalStackBaseAlloca = LocalStackBase;
3615 Value *LocalStackBaseAllocaPtr =
3616 isa<PtrToIntInst>(LocalStackBaseAlloca)
3617 ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand()
3618 : LocalStackBaseAlloca;
3619 assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) &&
3620 "Variable descriptions relative to ASan stack base will be dropped");
3624 for (
const auto &
Desc : SVD) {
3629 IRB.
CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy,
Desc.Offset)),
3643 ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3653 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3660 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3663 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3665 if (!StaticAllocaPoisonCallVec.empty()) {
3669 for (
const auto &APC : StaticAllocaPoisonCallVec) {
3672 size_t Begin =
Desc.Offset /
L.Granularity;
3673 size_t End = Begin + (APC.Size +
L.Granularity - 1) /
L.Granularity;
3676 copyToShadow(ShadowAfterScope,
3677 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin,
End,
3683 for (
Value *NewAllocaPtr : NewAllocaPtrs) {
3685 auto *
I = cast<Instruction>(U);
3686 if (
I->isLifetimeStartOrEnd())
3687 I->eraseFromParent();
3700 if (DoStackMalloc) {
3701 assert(StackMallocIdx >= 0);
3718 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3720 ShadowAfterReturn.
resize(ClassSize /
L.Granularity,
3722 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3724 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3726 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3727 Value *SavedFlagPtr = IRBPoison.CreateLoad(
3728 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3729 IRBPoison.CreateStore(
3731 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3734 RTCI.createRuntimeCall(
3735 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3736 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3740 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3742 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3747 for (
auto *AI : AllocaVec)
3755 Value *SizeArg = ConstantInt::get(IntptrTy,
Size);
3756 RTCI.createRuntimeCall(
3757 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3758 {AddrArg, SizeArg});
3769void FunctionStackPoisoner::handleDynamicAllocaCall(
AllocaInst *AI) {
3777 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3783 const unsigned ElementSize =
3787 ConstantInt::get(IntptrTy, ElementSize));
3815 ConstantInt::get(IntptrTy, Alignment.
value()));
3818 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
3828 auto *
I = cast<Instruction>(U);
3829 if (
I->isLifetimeStartOrEnd())
3830 I->eraseFromParent();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static Twine genName(StringRef suffix)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static const uint64_t kWebAssemblyShadowOffset
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static StringRef getAllocaName(AllocaInst *AI)
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
Module.h This file contains the declarations for the Module class.
This defines the Use class.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
uint64_t getZExtValue() const
Get zero extended value.
int64_t getSExtValue() const
Get sign extended value.
LLVM_ABI AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const
Add an argument attribute to the list.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
@ SameSize
The data referenced by the COMDAT must be the same size.
@ Any
The linker may choose any COMDAT.
@ NoDeduplicate
No deduplication is performed.
@ ExactMatch
The data referenced by the COMDAT must be the same.
ConstantArray - Constant Array Declarations.
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static LLVM_ABI bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
const Constant * getAliasee() const
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
LLVM_ABI void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
LLVM_ABI void setComdat(Comdat *C)
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
VisibilityTypes getVisibility() const
void setUnnamedAddr(UnnamedAddr Val)
bool hasLocalLinkage() const
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
ThreadLocalMode getThreadLocalMode() const
@ HiddenVisibility
The GV is hidden.
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ CommonLinkage
Tentative definitions.
@ InternalLinkage
Rename collisions when linking (static functions).
@ AvailableExternallyLinkage
Available for inspection, not emission.
@ ExternalWeakLinkage
ExternalWeak linkage description.
DLLStorageClassTypes getDLLStorageClass() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalVariable.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
BasicBlock * GetInsertBlock() const
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Base class for instruction visitors.
RetTy visitCallBase(CallBase &I)
RetTy visitCleanupReturnInst(CleanupReturnInst &I)
RetTy visitIntrinsicInst(IntrinsicInst &I)
void visit(Iterator Start, Iterator End)
RetTy visitReturnInst(ReturnInst &I)
RetTy visitAllocaInst(AllocaInst &I)
RetTy visitResumeInst(ResumeInst &I)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
An instruction for reading from memory.
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
ArrayRef< MDOperand > operands() const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
This is the common base class for memset/memcpy/memmove.
A Module instance is used to store all the information related to an LLVM module.
Evaluate the size and offset of an object pointed to by a Value* statically.
LLVM_ABI SizeOffsetAPInt compute(Value *V)
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Pass interface - Implemented by all 'passes'.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr bool empty() const
empty - Check if the string is empty.
Class to represent struct types.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Triple - Helper class for working with autoconf configuration names.
bool isAndroidVersionLT(unsigned Major) const
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
bool isDriverKit() const
Is this an Apple DriverKit triple.
bool isAndroid() const
Tests whether the target is Android.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isOSWindows() const
Tests whether the OS is Windows.
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isOSLinux() const
Tests whether the OS is Linux.
bool isMacOSX() const
Is this a Mac OS X triple.
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
bool isWatchOS() const
Is this an Apple watchOS triple.
bool isiOS() const
Is this an iOS triple.
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
bool isWasm() const
Tests whether the target is wasm (32- and 64-bit).
bool isOSHaiku() const
Tests whether the OS is Haiku.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
This class implements an extremely fast bulk output stream that can only output to a stream.
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Linkage
Describes symbol linkage. This can be used to resolve definition clashes.
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
LLVM_ABI GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, Twine NamePrefix="")
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
LLVM_ABI Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
LLVM_ABI SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
LLVM_ABI std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
AsanDtorKind
Types of ASan module destructors supported.
@ None
Do not emit any destructors for ASan.
LLVM_ABI ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
LLVM_ABI void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem)
Remove memory attributes that are incompatible with the instrumentation added by AddressSanitizer and...
@ Dynamic
Denotes mode unknown at compile time.
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AsanCtorKind
Types of ASan module constructors supported.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
LLVM_ABI void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
LLVM_ABI void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
DEMANGLE_ABI std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
LLVM_ABI void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, BasicBlock::iterator InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
LLVM_ABI ASanAccessInfo(int32_t Packed)
AsanDetectStackUseAfterReturnMode UseAfterReturn
int InstrumentationWithCallsThreshold
uint32_t MaxInlinePoisoningSize
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
A CRTP mix-in to automatically provide informational APIs needed for passes.
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.