LLVM 22.0.0git
HWAddressSanitizer.cpp
Go to the documentation of this file.
1//===- HWAddressSanitizer.cpp - memory access error detector --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address basic correctness
11/// checker based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/StringRef.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/Dominators.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/IRBuilder.h"
41#include "llvm/IR/InlineAsm.h"
43#include "llvm/IR/Instruction.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/LLVMContext.h"
48#include "llvm/IR/MDBuilder.h"
49#include "llvm/IR/Module.h"
50#include "llvm/IR/Type.h"
51#include "llvm/IR/Value.h"
54#include "llvm/Support/Debug.h"
55#include "llvm/Support/MD5.h"
66#include <optional>
67#include <random>
68
69using namespace llvm;
70
71#define DEBUG_TYPE "hwasan"
72
73const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
74const char kHwasanNoteName[] = "hwasan.note";
75const char kHwasanInitName[] = "__hwasan_init";
76const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
77
79 "__hwasan_shadow_memory_dynamic_address";
80
81// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
82static const size_t kNumberOfAccessSizes = 5;
83
84static const size_t kDefaultShadowScale = 4;
85
86static const unsigned kShadowBaseAlignment = 32;
87
88namespace {
89enum class OffsetKind {
90 kFixed = 0,
91 kGlobal,
92 kIfunc,
93 kTls,
94};
95}
96
98 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
99 cl::desc("Prefix for memory access callbacks"),
100 cl::Hidden, cl::init("__hwasan_"));
101
103 "hwasan-kernel-mem-intrinsic-prefix",
104 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
105 cl::init(false));
106
108 "hwasan-instrument-with-calls",
109 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
110 cl::init(false));
111
112static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
113 cl::desc("instrument read instructions"),
114 cl::Hidden, cl::init(true));
115
116static cl::opt<bool>
117 ClInstrumentWrites("hwasan-instrument-writes",
118 cl::desc("instrument write instructions"), cl::Hidden,
119 cl::init(true));
120
122 "hwasan-instrument-atomics",
123 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
124 cl::init(true));
125
126static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
127 cl::desc("instrument byval arguments"),
128 cl::Hidden, cl::init(true));
129
130static cl::opt<bool>
131 ClRecover("hwasan-recover",
132 cl::desc("Enable recovery mode (continue-after-error)."),
133 cl::Hidden, cl::init(false));
134
135static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
136 cl::desc("instrument stack (allocas)"),
137 cl::Hidden, cl::init(true));
138
139static cl::opt<bool>
140 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
141 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
143
145 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
147 cl::desc("How many lifetime ends to handle for a single alloca."),
149
150static cl::opt<bool>
151 ClUseAfterScope("hwasan-use-after-scope",
152 cl::desc("detect use after scope within function"),
153 cl::Hidden, cl::init(true));
154
156 "hwasan-generate-tags-with-calls",
157 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
158 cl::init(false));
159
160static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
161 cl::Hidden, cl::init(false));
162
164 "hwasan-all-globals",
165 cl::desc(
166 "Instrument globals, even those within user-defined sections. Warning: "
167 "This may break existing code which walks globals via linker-generated "
168 "symbols, expects certain globals to be contiguous with each other, or "
169 "makes other assumptions which are invalidated by HWASan "
170 "instrumentation."),
171 cl::Hidden, cl::init(false));
172
174 "hwasan-match-all-tag",
175 cl::desc("don't report bad accesses via pointers with this tag"),
176 cl::Hidden, cl::init(-1));
177
178static cl::opt<bool>
179 ClEnableKhwasan("hwasan-kernel",
180 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
181 cl::Hidden, cl::init(false));
182
183// These flags allow to change the shadow mapping and control how shadow memory
184// is accessed. The shadow mapping looks like:
185// Shadow = (Mem >> scale) + offset
186
188 ClMappingOffset("hwasan-mapping-offset",
189 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
190 cl::Hidden);
191
193 "hwasan-mapping-offset-dynamic",
194 cl::desc("HWASan shadow mapping dynamic offset location"), cl::Hidden,
195 cl::values(clEnumValN(OffsetKind::kGlobal, "global", "Use global"),
196 clEnumValN(OffsetKind::kIfunc, "ifunc", "Use ifunc global"),
197 clEnumValN(OffsetKind::kTls, "tls", "Use TLS")));
198
199static cl::opt<bool>
200 ClFrameRecords("hwasan-with-frame-record",
201 cl::desc("Use ring buffer for stack allocations"),
202 cl::Hidden);
203
204static cl::opt<int> ClHotPercentileCutoff("hwasan-percentile-cutoff-hot",
205 cl::desc("Hot percentile cutoff."));
206
207static cl::opt<float>
208 ClRandomKeepRate("hwasan-random-rate",
209 cl::desc("Probability value in the range [0.0, 1.0] "
210 "to keep instrumentation of a function. "
211 "Note: instrumentation can be skipped randomly "
212 "OR because of the hot percentile cutoff, if "
213 "both are supplied."));
214
216 "hwasan-static-linking",
217 cl::desc("Don't use .note.hwasan.globals section to instrument globals "
218 "from loadable libraries. "
219 "Note: in static binaries, the global variables section can be "
220 "accessed directly via linker-provided "
221 "__start_hwasan_globals and __stop_hwasan_globals symbols"),
222 cl::Hidden, cl::init(false));
223
224STATISTIC(NumTotalFuncs, "Number of total funcs");
225STATISTIC(NumInstrumentedFuncs, "Number of instrumented funcs");
226STATISTIC(NumNoProfileSummaryFuncs, "Number of funcs without PS");
227
228// Mode for selecting how to insert frame record info into the stack ring
229// buffer.
231 // Do not record frame record info.
233
234 // Insert instructions into the prologue for storing into the stack ring
235 // buffer directly.
237
238 // Add a call to __hwasan_add_frame_record in the runtime.
240};
241
243 "hwasan-record-stack-history",
244 cl::desc("Record stack frames with tagged allocations in a thread-local "
245 "ring buffer"),
246 cl::values(clEnumVal(none, "Do not record stack ring history"),
247 clEnumVal(instr, "Insert instructions into the prologue for "
248 "storing into the stack ring buffer directly"),
249 clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for "
250 "storing into the stack ring buffer")),
252
253static cl::opt<bool>
254 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
255 cl::desc("instrument memory intrinsics"),
256 cl::Hidden, cl::init(true));
257
258static cl::opt<bool>
259 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
260 cl::desc("instrument landing pads"), cl::Hidden,
261 cl::init(false));
262
264 "hwasan-use-short-granules",
265 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
266 cl::init(false));
267
269 "hwasan-instrument-personality-functions",
270 cl::desc("instrument personality functions"), cl::Hidden);
271
272static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
273 cl::desc("inline all checks"),
274 cl::Hidden, cl::init(false));
275
276static cl::opt<bool> ClInlineFastPathChecks("hwasan-inline-fast-path-checks",
277 cl::desc("inline all checks"),
278 cl::Hidden, cl::init(false));
279
280// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
281static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
282 cl::desc("Use page aliasing in HWASan"),
283 cl::Hidden, cl::init(false));
284
285namespace {
286
287template <typename T> T optOr(cl::opt<T> &Opt, T Other) {
288 return Opt.getNumOccurrences() ? Opt : Other;
289}
290
291bool shouldUsePageAliases(const Triple &TargetTriple) {
292 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
293}
294
295bool shouldInstrumentStack(const Triple &TargetTriple) {
296 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
297}
298
299bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
300 return optOr(ClInstrumentWithCalls, TargetTriple.getArch() == Triple::x86_64);
301}
302
303bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
304 return optOr(ClUseStackSafety, !DisableOptimization);
305}
306
307bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
308 bool DisableOptimization) {
309 return shouldInstrumentStack(TargetTriple) &&
310 mightUseStackSafetyAnalysis(DisableOptimization);
311}
312
313bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
314 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
315}
316
317/// An instrumentation pass implementing detection of addressability bugs
318/// using tagged pointers.
319class HWAddressSanitizer {
320public:
321 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
322 const StackSafetyGlobalInfo *SSI)
323 : M(M), SSI(SSI) {
324 this->Recover = optOr(ClRecover, Recover);
325 this->CompileKernel = optOr(ClEnableKhwasan, CompileKernel);
326 this->Rng = ClRandomKeepRate.getNumOccurrences() ? M.createRNG(DEBUG_TYPE)
327 : nullptr;
328
329 initializeModule();
330 }
331
332 void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
333
334private:
335 struct ShadowTagCheckInfo {
336 Instruction *TagMismatchTerm = nullptr;
337 Value *PtrLong = nullptr;
338 Value *AddrLong = nullptr;
339 Value *PtrTag = nullptr;
340 Value *MemTag = nullptr;
341 };
342
343 bool selectiveInstrumentationShouldSkip(Function &F,
345 void initializeModule();
346 void createHwasanCtorComdat();
347 void createHwasanNote();
348
349 void initializeCallbacks(Module &M);
350
351 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
352
353 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
354 Value *getShadowNonTls(IRBuilder<> &IRB);
355
356 void untagPointerOperand(Instruction *I, Value *Addr);
357 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
358
359 int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
360 ShadowTagCheckInfo insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
361 DomTreeUpdater &DTU, LoopInfo *LI);
362 void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
363 unsigned AccessSizeIndex,
364 Instruction *InsertBefore,
365 DomTreeUpdater &DTU, LoopInfo *LI);
366 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
367 unsigned AccessSizeIndex,
368 Instruction *InsertBefore, DomTreeUpdater &DTU,
369 LoopInfo *LI);
370 bool ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE, MemIntrinsic *MI);
371 void instrumentMemIntrinsic(MemIntrinsic *MI);
372 bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
373 LoopInfo *LI, const DataLayout &DL);
374 bool ignoreAccessWithoutRemark(Instruction *Inst, Value *Ptr);
375 bool ignoreAccess(OptimizationRemarkEmitter &ORE, Instruction *Inst,
376 Value *Ptr);
377
380 const TargetLibraryInfo &TLI,
382
383 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
384 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
385 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
386 void instrumentStack(memtag::StackInfo &Info, Value *StackTag, Value *UARTag,
387 const DominatorTree &DT, const PostDominatorTree &PDT,
388 const LoopInfo &LI);
389 void instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
390 Value *getNextTagWithCall(IRBuilder<> &IRB);
391 Value *getStackBaseTag(IRBuilder<> &IRB);
392 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, unsigned AllocaNo);
393 Value *getUARTag(IRBuilder<> &IRB);
394
395 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB);
396 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
397 unsigned retagMask(unsigned AllocaNo);
398
399 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
400
401 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
402 void instrumentGlobals();
403
404 Value *getCachedFP(IRBuilder<> &IRB);
405 Value *getFrameRecordInfo(IRBuilder<> &IRB);
406
407 void instrumentPersonalityFunctions();
408
409 LLVMContext *C;
410 Module &M;
411 const StackSafetyGlobalInfo *SSI;
412 Triple TargetTriple;
413 std::unique_ptr<RandomNumberGenerator> Rng;
414
415 /// This struct defines the shadow mapping using the rule:
416 /// If `kFixed`, then
417 /// shadow = (mem >> Scale) + Offset.
418 /// If `kGlobal`, then
419 /// extern char* __hwasan_shadow_memory_dynamic_address;
420 /// shadow = (mem >> Scale) + __hwasan_shadow_memory_dynamic_address
421 /// If `kIfunc`, then
422 /// extern char __hwasan_shadow[];
423 /// shadow = (mem >> Scale) + &__hwasan_shadow
424 /// If `kTls`, then
425 /// extern char *__hwasan_tls;
426 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
427 ///
428 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
429 /// ring buffer for storing stack allocations on targets that support it.
430 class ShadowMapping {
433 uint8_t Scale;
434 bool WithFrameRecord;
435
436 void SetFixed(uint64_t O) {
437 Kind = OffsetKind::kFixed;
438 Offset = O;
439 }
440
441 public:
442 void init(Triple &TargetTriple, bool InstrumentWithCalls,
443 bool CompileKernel);
444 Align getObjectAlignment() const { return Align(1ULL << Scale); }
445 bool isInGlobal() const { return Kind == OffsetKind::kGlobal; }
446 bool isInIfunc() const { return Kind == OffsetKind::kIfunc; }
447 bool isInTls() const { return Kind == OffsetKind::kTls; }
448 bool isFixed() const { return Kind == OffsetKind::kFixed; }
449 uint8_t scale() const { return Scale; };
450 uint64_t offset() const {
451 assert(isFixed());
452 return Offset;
453 };
454 bool withFrameRecord() const { return WithFrameRecord; };
455 };
456
457 ShadowMapping Mapping;
458
459 Type *VoidTy = Type::getVoidTy(M.getContext());
460 Type *IntptrTy = M.getDataLayout().getIntPtrType(M.getContext());
461 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
462 Type *Int8Ty = Type::getInt8Ty(M.getContext());
463 Type *Int32Ty = Type::getInt32Ty(M.getContext());
464 Type *Int64Ty = Type::getInt64Ty(M.getContext());
465
466 bool CompileKernel;
467 bool Recover;
468 bool OutlinedChecks;
469 bool InlineFastPath;
470 bool UseShortGranules;
471 bool InstrumentLandingPads;
472 bool InstrumentWithCalls;
473 bool InstrumentStack;
474 bool InstrumentGlobals;
475 bool DetectUseAfterScope;
476 bool UsePageAliases;
477 bool UseMatchAllCallback;
478
479 std::optional<uint8_t> MatchAllTag;
480
481 unsigned PointerTagShift;
482 uint64_t TagMaskByte;
483
484 Function *HwasanCtorFunction;
485
486 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
487 FunctionCallee HwasanMemoryAccessCallbackSized[2];
488
489 FunctionCallee HwasanMemmove, HwasanMemcpy, HwasanMemset;
490 FunctionCallee HwasanHandleVfork;
491
492 FunctionCallee HwasanTagMemoryFunc;
493 FunctionCallee HwasanGenerateTagFunc;
494 FunctionCallee HwasanRecordFrameRecordFunc;
495
496 Constant *ShadowGlobal;
497
498 Value *ShadowBase = nullptr;
499 Value *StackBaseTag = nullptr;
500 Value *CachedFP = nullptr;
501 GlobalValue *ThreadPtrGlobal = nullptr;
502};
503
504} // end anonymous namespace
505
508 // Return early if nosanitize_hwaddress module flag is present for the module.
509 if (checkIfAlreadyInstrumented(M, "nosanitize_hwaddress"))
510 return PreservedAnalyses::all();
511 const StackSafetyGlobalInfo *SSI = nullptr;
512 const Triple &TargetTriple = M.getTargetTriple();
513 if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
515
516 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
517 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
518 for (Function &F : M)
519 HWASan.sanitizeFunction(F, FAM);
520
522 // DominatorTreeAnalysis, PostDominatorTreeAnalysis, and LoopAnalysis
523 // are incrementally updated throughout this pass whenever
524 // SplitBlockAndInsertIfThen is called.
528 // GlobalsAA is considered stateless and does not get invalidated unless
529 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
530 // make changes that require GlobalsAA to be invalidated.
531 PA.abandon<GlobalsAA>();
532 return PA;
533}
535 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
537 OS, MapClassName2PassName);
538 OS << '<';
539 if (Options.CompileKernel)
540 OS << "kernel;";
541 if (Options.Recover)
542 OS << "recover";
543 OS << '>';
544}
545
546void HWAddressSanitizer::createHwasanNote() {
547 // Create a note that contains pointers to the list of global
548 // descriptors. Adding a note to the output file will cause the linker to
549 // create a PT_NOTE program header pointing to the note that we can use to
550 // find the descriptor list starting from the program headers. A function
551 // provided by the runtime initializes the shadow memory for the globals by
552 // accessing the descriptor list via the note. The dynamic loader needs to
553 // call this function whenever a library is loaded.
554 //
555 // The reason why we use a note for this instead of a more conventional
556 // approach of having a global constructor pass a descriptor list pointer to
557 // the runtime is because of an order of initialization problem. With
558 // constructors we can encounter the following problematic scenario:
559 //
560 // 1) library A depends on library B and also interposes one of B's symbols
561 // 2) B's constructors are called before A's (as required for correctness)
562 // 3) during construction, B accesses one of its "own" globals (actually
563 // interposed by A) and triggers a HWASAN failure due to the initialization
564 // for A not having happened yet
565 //
566 // Even without interposition it is possible to run into similar situations in
567 // cases where two libraries mutually depend on each other.
568 //
569 // We only need one note per binary, so put everything for the note in a
570 // comdat. This needs to be a comdat with an .init_array section to prevent
571 // newer versions of lld from discarding the note.
572 //
573 // Create the note even if we aren't instrumenting globals. This ensures that
574 // binaries linked from object files with both instrumented and
575 // non-instrumented globals will end up with a note, even if a comdat from an
576 // object file with non-instrumented globals is selected. The note is harmless
577 // if the runtime doesn't support it, since it will just be ignored.
578 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
579
580 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
581 auto *Start =
582 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
583 nullptr, "__start_hwasan_globals");
584 Start->setVisibility(GlobalValue::HiddenVisibility);
585 auto *Stop =
586 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
587 nullptr, "__stop_hwasan_globals");
588 Stop->setVisibility(GlobalValue::HiddenVisibility);
589
590 // Null-terminated so actually 8 bytes, which are required in order to align
591 // the note properly.
592 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
593
594 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
595 Int32Ty, Int32Ty);
596 auto *Note =
597 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
599 Note->setSection(".note.hwasan.globals");
600 Note->setComdat(NoteComdat);
601 Note->setAlignment(Align(4));
602
603 // The pointers in the note need to be relative so that the note ends up being
604 // placed in rodata, which is the standard location for notes.
605 auto CreateRelPtr = [&](Constant *Ptr) {
609 Int32Ty);
610 };
611 Note->setInitializer(ConstantStruct::getAnon(
612 {ConstantInt::get(Int32Ty, 8), // n_namesz
613 ConstantInt::get(Int32Ty, 8), // n_descsz
614 ConstantInt::get(Int32Ty, ELF::NT_LLVM_HWASAN_GLOBALS), // n_type
615 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
617
618 // Create a zero-length global in hwasan_globals so that the linker will
619 // always create start and stop symbols.
620 auto *Dummy = new GlobalVariable(
621 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
622 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
623 Dummy->setSection("hwasan_globals");
624 Dummy->setComdat(NoteComdat);
625 Dummy->setMetadata(LLVMContext::MD_associated,
627 appendToCompilerUsed(M, Dummy);
628}
629
630void HWAddressSanitizer::createHwasanCtorComdat() {
631 std::tie(HwasanCtorFunction, std::ignore) =
634 /*InitArgTypes=*/{},
635 /*InitArgs=*/{},
636 // This callback is invoked when the functions are created the first
637 // time. Hook them into the global ctors list in that case:
638 [&](Function *Ctor, FunctionCallee) {
639 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
640 Ctor->setComdat(CtorComdat);
641 appendToGlobalCtors(M, Ctor, 0, Ctor);
642 });
643
644 // Do not create .note.hwasan.globals for static binaries, as it is only
645 // needed for instrumenting globals from dynamic libraries. In static
646 // binaries, the global variables section can be accessed directly via the
647 // __start_hwasan_globals and __stop_hwasan_globals symbols inserted by the
648 // linker.
649 if (!ClStaticLinking)
650 createHwasanNote();
651}
652
653/// Module-level initialization.
654///
655/// inserts a call to __hwasan_init to the module's constructor list.
656void HWAddressSanitizer::initializeModule() {
657 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
658 TargetTriple = M.getTargetTriple();
659
660 // HWASan may do short granule checks on function arguments read from the
661 // argument memory (last byte of the granule), which invalidates writeonly.
662 for (Function &F : M.functions())
663 removeASanIncompatibleFnAttributes(F, /*ReadsArgMem=*/true);
664
665 // x86_64 currently has two modes:
666 // - Intel LAM (default)
667 // - pointer aliasing (heap only)
668 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
669 UsePageAliases = shouldUsePageAliases(TargetTriple);
670 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
671 InstrumentStack = shouldInstrumentStack(TargetTriple);
672 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
673 PointerTagShift = IsX86_64 ? 57 : 56;
674 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
675
676 Mapping.init(TargetTriple, InstrumentWithCalls, CompileKernel);
677
678 C = &(M.getContext());
679 IRBuilder<> IRB(*C);
680
681 HwasanCtorFunction = nullptr;
682
683 // Older versions of Android do not have the required runtime support for
684 // short granules, global or personality function instrumentation. On other
685 // platforms we currently require using the latest version of the runtime.
686 bool NewRuntime =
687 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
688
689 UseShortGranules = optOr(ClUseShortGranules, NewRuntime);
690 OutlinedChecks = (TargetTriple.isAArch64() || TargetTriple.isRISCV64()) &&
691 TargetTriple.isOSBinFormatELF() &&
692 !optOr(ClInlineAllChecks, Recover);
693
694 // These platforms may prefer less inlining to reduce binary size.
695 InlineFastPath = optOr(ClInlineFastPathChecks, !(TargetTriple.isAndroid() ||
696 TargetTriple.isOSFuchsia()));
697
698 if (ClMatchAllTag.getNumOccurrences()) {
699 if (ClMatchAllTag != -1) {
700 MatchAllTag = ClMatchAllTag & 0xFF;
701 }
702 } else if (CompileKernel) {
703 MatchAllTag = 0xFF;
704 }
705 UseMatchAllCallback = !CompileKernel && MatchAllTag.has_value();
706
707 // If we don't have personality function support, fall back to landing pads.
708 InstrumentLandingPads = optOr(ClInstrumentLandingPads, !NewRuntime);
709
710 InstrumentGlobals =
711 !CompileKernel && !UsePageAliases && optOr(ClGlobals, NewRuntime);
712
713 if (!CompileKernel) {
714 if (InstrumentGlobals)
715 instrumentGlobals();
716
717 createHwasanCtorComdat();
718
719 bool InstrumentPersonalityFunctions =
720 optOr(ClInstrumentPersonalityFunctions, NewRuntime);
721 if (InstrumentPersonalityFunctions)
722 instrumentPersonalityFunctions();
723 }
724
725 if (!TargetTriple.isAndroid()) {
726 ThreadPtrGlobal = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
727 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
729 "__hwasan_tls", nullptr,
732 return GV;
733 });
734 }
735}
736
737void HWAddressSanitizer::initializeCallbacks(Module &M) {
738 IRBuilder<> IRB(*C);
739 const std::string MatchAllStr = UseMatchAllCallback ? "_match_all" : "";
740 FunctionType *HwasanMemoryAccessCallbackSizedFnTy,
741 *HwasanMemoryAccessCallbackFnTy, *HwasanMemTransferFnTy,
742 *HwasanMemsetFnTy;
743 if (UseMatchAllCallback) {
744 HwasanMemoryAccessCallbackSizedFnTy =
745 FunctionType::get(VoidTy, {IntptrTy, IntptrTy, Int8Ty}, false);
746 HwasanMemoryAccessCallbackFnTy =
747 FunctionType::get(VoidTy, {IntptrTy, Int8Ty}, false);
748 HwasanMemTransferFnTy =
749 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy, Int8Ty}, false);
750 HwasanMemsetFnTy =
751 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy, Int8Ty}, false);
752 } else {
753 HwasanMemoryAccessCallbackSizedFnTy =
754 FunctionType::get(VoidTy, {IntptrTy, IntptrTy}, false);
755 HwasanMemoryAccessCallbackFnTy =
756 FunctionType::get(VoidTy, {IntptrTy}, false);
757 HwasanMemTransferFnTy =
758 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy}, false);
759 HwasanMemsetFnTy =
760 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy}, false);
761 }
762
763 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
764 const std::string TypeStr = AccessIsWrite ? "store" : "load";
765 const std::string EndingStr = Recover ? "_noabort" : "";
766
767 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
768 ClMemoryAccessCallbackPrefix + TypeStr + "N" + MatchAllStr + EndingStr,
769 HwasanMemoryAccessCallbackSizedFnTy);
770
771 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
772 AccessSizeIndex++) {
773 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
774 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr +
775 itostr(1ULL << AccessSizeIndex) +
776 MatchAllStr + EndingStr,
777 HwasanMemoryAccessCallbackFnTy);
778 }
779 }
780
781 const std::string MemIntrinCallbackPrefix =
782 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
783 ? std::string("")
785
786 HwasanMemmove = M.getOrInsertFunction(
787 MemIntrinCallbackPrefix + "memmove" + MatchAllStr, HwasanMemTransferFnTy);
788 HwasanMemcpy = M.getOrInsertFunction(
789 MemIntrinCallbackPrefix + "memcpy" + MatchAllStr, HwasanMemTransferFnTy);
790 HwasanMemset = M.getOrInsertFunction(
791 MemIntrinCallbackPrefix + "memset" + MatchAllStr, HwasanMemsetFnTy);
792
793 HwasanTagMemoryFunc = M.getOrInsertFunction("__hwasan_tag_memory", VoidTy,
794 PtrTy, Int8Ty, IntptrTy);
795 HwasanGenerateTagFunc =
796 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
797
798 HwasanRecordFrameRecordFunc =
799 M.getOrInsertFunction("__hwasan_add_frame_record", VoidTy, Int64Ty);
800
801 ShadowGlobal =
802 M.getOrInsertGlobal("__hwasan_shadow", ArrayType::get(Int8Ty, 0));
803
804 HwasanHandleVfork =
805 M.getOrInsertFunction("__hwasan_handle_vfork", VoidTy, IntptrTy);
806}
807
808Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
809 // An empty inline asm with input reg == output reg.
810 // An opaque no-op cast, basically.
811 // This prevents code bloat as a result of rematerializing trivial definitions
812 // such as constants or global addresses at every load and store.
813 InlineAsm *Asm =
814 InlineAsm::get(FunctionType::get(PtrTy, {Val->getType()}, false),
815 StringRef(""), StringRef("=r,0"),
816 /*hasSideEffects=*/false);
817 return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
818}
819
820Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
821 return getOpaqueNoopCast(IRB, ShadowGlobal);
822}
823
824Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
825 if (Mapping.isFixed()) {
826 return getOpaqueNoopCast(
828 ConstantInt::get(IntptrTy, Mapping.offset()), PtrTy));
829 }
830
831 if (Mapping.isInIfunc())
832 return getDynamicShadowIfunc(IRB);
833
834 Value *GlobalDynamicAddress =
837 return IRB.CreateLoad(PtrTy, GlobalDynamicAddress);
838}
839
840bool HWAddressSanitizer::ignoreAccessWithoutRemark(Instruction *Inst,
841 Value *Ptr) {
842 // Do not instrument accesses from different address spaces; we cannot deal
843 // with them.
844 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
845 if (PtrTy->getPointerAddressSpace() != 0)
846 return true;
847
848 // Ignore swifterror addresses.
849 // swifterror memory addresses are mem2reg promoted by instruction
850 // selection. As such they cannot have regular uses like an instrumentation
851 // function and it makes no sense to track them as memory.
852 if (Ptr->isSwiftError())
853 return true;
854
855 if (findAllocaForValue(Ptr)) {
856 if (!InstrumentStack)
857 return true;
858 if (SSI && SSI->stackAccessIsSafe(*Inst))
859 return true;
860 }
861
862 if (isa<GlobalVariable>(getUnderlyingObject(Ptr))) {
863 if (!InstrumentGlobals)
864 return true;
865 // TODO: Optimize inbound global accesses, like Asan `instrumentMop`.
866 }
867
868 return false;
869}
870
871bool HWAddressSanitizer::ignoreAccess(OptimizationRemarkEmitter &ORE,
872 Instruction *Inst, Value *Ptr) {
873 bool Ignored = ignoreAccessWithoutRemark(Inst, Ptr);
874 if (Ignored) {
875 ORE.emit(
876 [&]() { return OptimizationRemark(DEBUG_TYPE, "ignoreAccess", Inst); });
877 } else {
878 ORE.emit([&]() {
879 return OptimizationRemarkMissed(DEBUG_TYPE, "ignoreAccess", Inst);
880 });
881 }
882 return Ignored;
883}
884
885void HWAddressSanitizer::getInterestingMemoryOperands(
887 const TargetLibraryInfo &TLI,
889 // Skip memory accesses inserted by another instrumentation.
890 if (I->hasMetadata(LLVMContext::MD_nosanitize))
891 return;
892
893 // Do not instrument the load fetching the dynamic shadow address.
894 if (ShadowBase == I)
895 return;
896
897 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
898 if (!ClInstrumentReads || ignoreAccess(ORE, I, LI->getPointerOperand()))
899 return;
900 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
901 LI->getType(), LI->getAlign());
902 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
903 if (!ClInstrumentWrites || ignoreAccess(ORE, I, SI->getPointerOperand()))
904 return;
905 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
906 SI->getValueOperand()->getType(), SI->getAlign());
907 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
908 if (!ClInstrumentAtomics || ignoreAccess(ORE, I, RMW->getPointerOperand()))
909 return;
910 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
911 RMW->getValOperand()->getType(), std::nullopt);
912 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
913 if (!ClInstrumentAtomics || ignoreAccess(ORE, I, XCHG->getPointerOperand()))
914 return;
915 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
916 XCHG->getCompareOperand()->getType(),
917 std::nullopt);
918 } else if (auto *CI = dyn_cast<CallInst>(I)) {
919 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
920 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
921 ignoreAccess(ORE, I, CI->getArgOperand(ArgNo)))
922 continue;
923 Type *Ty = CI->getParamByValType(ArgNo);
924 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
925 }
927 }
928}
929
931 if (LoadInst *LI = dyn_cast<LoadInst>(I))
932 return LI->getPointerOperandIndex();
933 if (StoreInst *SI = dyn_cast<StoreInst>(I))
934 return SI->getPointerOperandIndex();
935 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
936 return RMW->getPointerOperandIndex();
937 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
938 return XCHG->getPointerOperandIndex();
939 report_fatal_error("Unexpected instruction");
940 return -1;
941}
942
944 size_t Res = llvm::countr_zero(TypeSize / 8);
946 return Res;
947}
948
949void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
950 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64 ||
951 TargetTriple.isRISCV64())
952 return;
953
954 IRBuilder<> IRB(I);
955 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
956 Value *UntaggedPtr =
957 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
958 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
959}
960
961Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
962 // Mem >> Scale
963 Value *Shadow = IRB.CreateLShr(Mem, Mapping.scale());
964 if (Mapping.isFixed() && Mapping.offset() == 0)
965 return IRB.CreateIntToPtr(Shadow, PtrTy);
966 // (Mem >> Scale) + Offset
967 return IRB.CreatePtrAdd(ShadowBase, Shadow);
968}
969
970int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
971 unsigned AccessSizeIndex) {
972 return (CompileKernel << HWASanAccessInfo::CompileKernelShift) |
973 (MatchAllTag.has_value() << HWASanAccessInfo::HasMatchAllShift) |
974 (MatchAllTag.value_or(0) << HWASanAccessInfo::MatchAllShift) |
975 (Recover << HWASanAccessInfo::RecoverShift) |
976 (IsWrite << HWASanAccessInfo::IsWriteShift) |
977 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
978}
979
980HWAddressSanitizer::ShadowTagCheckInfo
981HWAddressSanitizer::insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
982 DomTreeUpdater &DTU, LoopInfo *LI) {
983 ShadowTagCheckInfo R;
984
985 IRBuilder<> IRB(InsertBefore);
986
987 R.PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
988 R.PtrTag =
989 IRB.CreateTrunc(IRB.CreateLShr(R.PtrLong, PointerTagShift), Int8Ty);
990 R.AddrLong = untagPointer(IRB, R.PtrLong);
991 Value *Shadow = memToShadow(R.AddrLong, IRB);
992 R.MemTag = IRB.CreateLoad(Int8Ty, Shadow);
993 Value *TagMismatch = IRB.CreateICmpNE(R.PtrTag, R.MemTag);
994
995 if (MatchAllTag.has_value()) {
996 Value *TagNotIgnored = IRB.CreateICmpNE(
997 R.PtrTag, ConstantInt::get(R.PtrTag->getType(), *MatchAllTag));
998 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
999 }
1000
1001 R.TagMismatchTerm = SplitBlockAndInsertIfThen(
1002 TagMismatch, InsertBefore, false,
1003 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
1004
1005 return R;
1006}
1007
1008void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
1009 unsigned AccessSizeIndex,
1010 Instruction *InsertBefore,
1011 DomTreeUpdater &DTU,
1012 LoopInfo *LI) {
1013 assert(!UsePageAliases);
1014 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
1015
1016 if (InlineFastPath)
1017 InsertBefore =
1018 insertShadowTagCheck(Ptr, InsertBefore, DTU, LI).TagMismatchTerm;
1019
1020 IRBuilder<> IRB(InsertBefore);
1021 bool UseFixedShadowIntrinsic = false;
1022 // The memaccess fixed shadow intrinsic is only supported on AArch64,
1023 // which allows a 16-bit immediate to be left-shifted by 32.
1024 // Since kShadowBaseAlignment == 32, and Linux by default will not
1025 // mmap above 48-bits, practically any valid shadow offset is
1026 // representable.
1027 // In particular, an offset of 4TB (1024 << 32) is representable, and
1028 // ought to be good enough for anybody.
1029 if (TargetTriple.isAArch64() && Mapping.isFixed()) {
1030 uint16_t OffsetShifted = Mapping.offset() >> 32;
1031 UseFixedShadowIntrinsic =
1032 static_cast<uint64_t>(OffsetShifted) << 32 == Mapping.offset();
1033 }
1034
1035 if (UseFixedShadowIntrinsic) {
1036 IRB.CreateIntrinsic(
1037 UseShortGranules
1038 ? Intrinsic::hwasan_check_memaccess_shortgranules_fixedshadow
1039 : Intrinsic::hwasan_check_memaccess_fixedshadow,
1040 {Ptr, ConstantInt::get(Int32Ty, AccessInfo),
1041 ConstantInt::get(Int64Ty, Mapping.offset())});
1042 } else {
1043 IRB.CreateIntrinsic(
1044 UseShortGranules ? Intrinsic::hwasan_check_memaccess_shortgranules
1045 : Intrinsic::hwasan_check_memaccess,
1046 {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
1047 }
1048}
1049
1050void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
1051 unsigned AccessSizeIndex,
1052 Instruction *InsertBefore,
1053 DomTreeUpdater &DTU,
1054 LoopInfo *LI) {
1055 assert(!UsePageAliases);
1056 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
1057
1058 ShadowTagCheckInfo TCI = insertShadowTagCheck(Ptr, InsertBefore, DTU, LI);
1059
1060 IRBuilder<> IRB(TCI.TagMismatchTerm);
1061 Value *OutOfShortGranuleTagRange =
1062 IRB.CreateICmpUGT(TCI.MemTag, ConstantInt::get(Int8Ty, 15));
1063 Instruction *CheckFailTerm = SplitBlockAndInsertIfThen(
1064 OutOfShortGranuleTagRange, TCI.TagMismatchTerm, !Recover,
1065 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
1066
1067 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1068 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(TCI.PtrLong, 15), Int8Ty);
1069 PtrLowBits = IRB.CreateAdd(
1070 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
1071 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, TCI.MemTag);
1072 SplitBlockAndInsertIfThen(PtrLowBitsOOB, TCI.TagMismatchTerm, false,
1074 LI, CheckFailTerm->getParent());
1075
1076 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1077 Value *InlineTagAddr = IRB.CreateOr(TCI.AddrLong, 15);
1078 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, PtrTy);
1079 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
1080 Value *InlineTagMismatch = IRB.CreateICmpNE(TCI.PtrTag, InlineTag);
1081 SplitBlockAndInsertIfThen(InlineTagMismatch, TCI.TagMismatchTerm, false,
1083 LI, CheckFailTerm->getParent());
1084
1085 IRB.SetInsertPoint(CheckFailTerm);
1086 InlineAsm *Asm;
1087 switch (TargetTriple.getArch()) {
1088 case Triple::x86_64:
1089 // The signal handler will find the data address in rdi.
1091 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1092 "int3\nnopl " +
1093 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
1094 "(%rax)",
1095 "{rdi}",
1096 /*hasSideEffects=*/true);
1097 break;
1098 case Triple::aarch64:
1099 case Triple::aarch64_be:
1100 // The signal handler will find the data address in x0.
1102 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1103 "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1104 "{x0}",
1105 /*hasSideEffects=*/true);
1106 break;
1107 case Triple::riscv64:
1108 // The signal handler will find the data address in x10.
1110 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1111 "ebreak\naddiw x0, x11, " +
1112 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1113 "{x10}",
1114 /*hasSideEffects=*/true);
1115 break;
1116 default:
1117 report_fatal_error("unsupported architecture");
1118 }
1119 IRB.CreateCall(Asm, TCI.PtrLong);
1120 if (Recover)
1121 cast<BranchInst>(CheckFailTerm)
1122 ->setSuccessor(0, TCI.TagMismatchTerm->getParent());
1123}
1124
1125bool HWAddressSanitizer::ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE,
1126 MemIntrinsic *MI) {
1127 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
1128 return (!ClInstrumentWrites || ignoreAccess(ORE, MTI, MTI->getDest())) &&
1129 (!ClInstrumentReads || ignoreAccess(ORE, MTI, MTI->getSource()));
1130 }
1131 if (isa<MemSetInst>(MI))
1132 return !ClInstrumentWrites || ignoreAccess(ORE, MI, MI->getDest());
1133 return false;
1134}
1135
1136void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1137 IRBuilder<> IRB(MI);
1138 if (isa<MemTransferInst>(MI)) {
1140 MI->getOperand(0), MI->getOperand(1),
1141 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1142
1143 if (UseMatchAllCallback)
1144 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1145 IRB.CreateCall(isa<MemMoveInst>(MI) ? HwasanMemmove : HwasanMemcpy, Args);
1146 } else if (isa<MemSetInst>(MI)) {
1148 MI->getOperand(0),
1149 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1150 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1151 if (UseMatchAllCallback)
1152 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1153 IRB.CreateCall(HwasanMemset, Args);
1154 }
1155 MI->eraseFromParent();
1156}
1157
1158bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O,
1159 DomTreeUpdater &DTU, LoopInfo *LI,
1160 const DataLayout &DL) {
1161 Value *Addr = O.getPtr();
1162
1163 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
1164
1165 // If the pointer is statically known to be zero, the tag check will pass
1166 // since:
1167 // 1) it has a zero tag
1168 // 2) the shadow memory corresponding to address 0 is initialized to zero and
1169 // never updated.
1170 // We can therefore elide the tag check.
1171 llvm::KnownBits Known(DL.getPointerTypeSizeInBits(Addr->getType()));
1173 if (Known.isZero())
1174 return false;
1175
1176 if (O.MaybeMask)
1177 return false; // FIXME
1178
1179 IRBuilder<> IRB(O.getInsn());
1180 if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(O.TypeStoreSize) &&
1181 (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
1182 (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
1183 *O.Alignment >= O.TypeStoreSize / 8)) {
1184 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeStoreSize);
1185 if (InstrumentWithCalls) {
1187 if (UseMatchAllCallback)
1188 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1189 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1190 Args);
1191 } else if (OutlinedChecks) {
1192 instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1193 DTU, LI);
1194 } else {
1195 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1196 DTU, LI);
1197 }
1198 } else {
1200 IRB.CreatePointerCast(Addr, IntptrTy),
1201 IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy, O.TypeStoreSize),
1202 ConstantInt::get(IntptrTy, 8))};
1203 if (UseMatchAllCallback)
1204 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1205 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], Args);
1206 }
1207 untagPointerOperand(O.getInsn(), Addr);
1208
1209 return true;
1210}
1211
1212void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1213 size_t Size) {
1214 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1215 if (!UseShortGranules)
1216 Size = AlignedSize;
1217
1218 Tag = IRB.CreateTrunc(Tag, Int8Ty);
1219 if (InstrumentWithCalls) {
1220 IRB.CreateCall(HwasanTagMemoryFunc,
1221 {IRB.CreatePointerCast(AI, PtrTy), Tag,
1222 ConstantInt::get(IntptrTy, AlignedSize)});
1223 } else {
1224 size_t ShadowSize = Size >> Mapping.scale();
1225 Value *AddrLong = untagPointer(IRB, IRB.CreatePointerCast(AI, IntptrTy));
1226 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1227 // If this memset is not inlined, it will be intercepted in the hwasan
1228 // runtime library. That's OK, because the interceptor skips the checks if
1229 // the address is in the shadow region.
1230 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1231 // llvm.memset right here into either a sequence of stores, or a call to
1232 // hwasan_tag_memory.
1233 if (ShadowSize)
1234 IRB.CreateMemSet(ShadowPtr, Tag, ShadowSize, Align(1));
1235 if (Size != AlignedSize) {
1236 const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
1237 IRB.CreateStore(ConstantInt::get(Int8Ty, SizeRemainder),
1238 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
1239 IRB.CreateStore(
1240 Tag, IRB.CreateConstGEP1_32(Int8Ty, IRB.CreatePointerCast(AI, PtrTy),
1241 AlignedSize - 1));
1242 }
1243 }
1244}
1245
1246unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1247 if (TargetTriple.getArch() == Triple::x86_64)
1248 return AllocaNo & TagMaskByte;
1249
1250 // A list of 8-bit numbers that have at most one run of non-zero bits.
1251 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1252 // masks.
1253 // The list does not include the value 255, which is used for UAR.
1254 //
1255 // Because we are more likely to use earlier elements of this list than later
1256 // ones, it is sorted in increasing order of probability of collision with a
1257 // mask allocated (temporally) nearby. The program that generated this list
1258 // can be found at:
1259 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1260 static const unsigned FastMasks[] = {
1261 0, 128, 64, 192, 32, 96, 224, 112, 240, 48, 16, 120,
1262 248, 56, 24, 8, 124, 252, 60, 28, 12, 4, 126, 254,
1263 62, 30, 14, 6, 2, 127, 63, 31, 15, 7, 3, 1};
1264 return FastMasks[AllocaNo % std::size(FastMasks)];
1265}
1266
1267Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1268 if (TagMaskByte == 0xFF)
1269 return OldTag; // No need to clear the tag byte.
1270 return IRB.CreateAnd(OldTag,
1271 ConstantInt::get(OldTag->getType(), TagMaskByte));
1272}
1273
1274Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1275 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1276}
1277
1278Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1280 return nullptr;
1281 if (StackBaseTag)
1282 return StackBaseTag;
1283 // Extract some entropy from the stack pointer for the tags.
1284 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1285 // between functions).
1286 Value *FramePointerLong = getCachedFP(IRB);
1287 Value *StackTag =
1288 applyTagMask(IRB, IRB.CreateXor(FramePointerLong,
1289 IRB.CreateLShr(FramePointerLong, 20)));
1290 StackTag->setName("hwasan.stack.base.tag");
1291 return StackTag;
1292}
1293
1294Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1295 unsigned AllocaNo) {
1297 return getNextTagWithCall(IRB);
1298 return IRB.CreateXor(
1299 StackTag, ConstantInt::get(StackTag->getType(), retagMask(AllocaNo)));
1300}
1301
1302Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB) {
1303 Value *FramePointerLong = getCachedFP(IRB);
1304 Value *UARTag =
1305 applyTagMask(IRB, IRB.CreateLShr(FramePointerLong, PointerTagShift));
1306
1307 UARTag->setName("hwasan.uar.tag");
1308 return UARTag;
1309}
1310
1311// Add a tag to an address.
1312Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1313 Value *PtrLong, Value *Tag) {
1314 assert(!UsePageAliases);
1315 Value *TaggedPtrLong;
1316 if (CompileKernel) {
1317 // Kernel addresses have 0xFF in the most significant byte.
1318 Value *ShiftedTag =
1319 IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1320 ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1321 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1322 } else {
1323 // Userspace can simply do OR (tag << PointerTagShift);
1324 Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1325 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1326 }
1327 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1328}
1329
1330// Remove tag from an address.
1331Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1332 assert(!UsePageAliases);
1333 Value *UntaggedPtrLong;
1334 if (CompileKernel) {
1335 // Kernel addresses have 0xFF in the most significant byte.
1336 UntaggedPtrLong =
1337 IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1338 TagMaskByte << PointerTagShift));
1339 } else {
1340 // Userspace addresses have 0x00.
1341 UntaggedPtrLong = IRB.CreateAnd(
1342 PtrLong, ConstantInt::get(PtrLong->getType(),
1343 ~(TagMaskByte << PointerTagShift)));
1344 }
1345 return UntaggedPtrLong;
1346}
1347
1348Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB) {
1349 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1350 // in Bionic's libc/platform/bionic/tls_defines.h.
1351 constexpr int SanitizerSlot = 6;
1352 if (TargetTriple.isAArch64() && TargetTriple.isAndroid())
1353 return memtag::getAndroidSlotPtr(IRB, SanitizerSlot);
1354 return ThreadPtrGlobal;
1355}
1356
1357Value *HWAddressSanitizer::getCachedFP(IRBuilder<> &IRB) {
1358 if (!CachedFP)
1359 CachedFP = memtag::getFP(IRB);
1360 return CachedFP;
1361}
1362
1363Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
1364 // Prepare ring buffer data.
1365 Value *PC = memtag::getPC(TargetTriple, IRB);
1366 Value *FP = getCachedFP(IRB);
1367
1368 // Mix FP and PC.
1369 // Assumptions:
1370 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1371 // FP is 0xfffffffffffFFFF0 (4 lower bits are zero)
1372 // We only really need ~20 lower non-zero bits (FFFF), so we mix like this:
1373 // 0xFFFFPPPPPPPPPPPP
1374 //
1375 // FP works because in AArch64FrameLowering::getFrameIndexReference, we
1376 // prefer FP-relative offsets for functions compiled with HWASan.
1377 FP = IRB.CreateShl(FP, 44);
1378 return IRB.CreateOr(PC, FP);
1379}
1380
1381void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1382 if (!Mapping.isInTls())
1383 ShadowBase = getShadowNonTls(IRB);
1384 else if (!WithFrameRecord && TargetTriple.isAndroid())
1385 ShadowBase = getDynamicShadowIfunc(IRB);
1386
1387 if (!WithFrameRecord && ShadowBase)
1388 return;
1389
1390 Value *SlotPtr = nullptr;
1391 Value *ThreadLong = nullptr;
1392 Value *ThreadLongMaybeUntagged = nullptr;
1393
1394 auto getThreadLongMaybeUntagged = [&]() {
1395 if (!SlotPtr)
1396 SlotPtr = getHwasanThreadSlotPtr(IRB);
1397 if (!ThreadLong)
1398 ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1399 // Extract the address field from ThreadLong. Unnecessary on AArch64 with
1400 // TBI.
1401 return TargetTriple.isAArch64() ? ThreadLong
1402 : untagPointer(IRB, ThreadLong);
1403 };
1404
1405 if (WithFrameRecord) {
1406 switch (ClRecordStackHistory) {
1407 case libcall: {
1408 // Emit a runtime call into hwasan rather than emitting instructions for
1409 // recording stack history.
1410 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1411 IRB.CreateCall(HwasanRecordFrameRecordFunc, {FrameRecordInfo});
1412 break;
1413 }
1414 case instr: {
1415 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1416
1417 StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1418
1419 // Store data to ring buffer.
1420 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1421 Value *RecordPtr =
1422 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IRB.getPtrTy(0));
1423 IRB.CreateStore(FrameRecordInfo, RecordPtr);
1424
1425 IRB.CreateStore(memtag::incrementThreadLong(IRB, ThreadLong, 8), SlotPtr);
1426 break;
1427 }
1428 case none: {
1430 "A stack history recording mode should've been selected.");
1431 }
1432 }
1433 }
1434
1435 if (!ShadowBase) {
1436 if (!ThreadLongMaybeUntagged)
1437 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1438
1439 // Get shadow base address by aligning RecordPtr up.
1440 // Note: this is not correct if the pointer is already aligned.
1441 // Runtime library will make sure this never happens.
1442 ShadowBase = IRB.CreateAdd(
1443 IRB.CreateOr(
1444 ThreadLongMaybeUntagged,
1445 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1446 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1447 ShadowBase = IRB.CreateIntToPtr(ShadowBase, PtrTy);
1448 }
1449}
1450
1451void HWAddressSanitizer::instrumentLandingPads(
1452 SmallVectorImpl<Instruction *> &LandingPadVec) {
1453 for (auto *LP : LandingPadVec) {
1454 IRBuilder<> IRB(LP->getNextNode());
1455 IRB.CreateCall(
1456 HwasanHandleVfork,
1458 IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp" : "sp")});
1459 }
1460}
1461
1462void HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
1463 Value *StackTag, Value *UARTag,
1464 const DominatorTree &DT,
1465 const PostDominatorTree &PDT,
1466 const LoopInfo &LI) {
1467 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1468 // alloca addresses using that. Unfortunately, offsets are not known yet
1469 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1470 // temp, shift-OR it into each alloca address and xor with the retag mask.
1471 // This generates one extra instruction per alloca use.
1472 unsigned int I = 0;
1473
1474 for (auto &KV : SInfo.AllocasToInstrument) {
1475 auto N = I++;
1476 auto *AI = KV.first;
1477 memtag::AllocaInfo &Info = KV.second;
1478 IRBuilder<> IRB(AI->getNextNode());
1479
1480 // Replace uses of the alloca with tagged address.
1481 Value *Tag = getAllocaTag(IRB, StackTag, N);
1482 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1483 Value *AINoTagLong = untagPointer(IRB, AILong);
1484 Value *Replacement = tagPointer(IRB, AI->getType(), AINoTagLong, Tag);
1485 std::string Name =
1486 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1487 Replacement->setName(Name + ".hwasan");
1488
1489 size_t Size = memtag::getAllocaSizeInBytes(*AI);
1490 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1491
1492 AI->replaceUsesWithIf(Replacement, [AILong](const Use &U) {
1493 auto *User = U.getUser();
1494 return User != AILong && !isa<LifetimeIntrinsic>(User);
1495 });
1496
1497 memtag::annotateDebugRecords(Info, retagMask(N));
1498
1499 auto TagEnd = [&](Instruction *Node) {
1500 IRB.SetInsertPoint(Node);
1501 // When untagging, use the `AlignedSize` because we need to set the tags
1502 // for the entire alloca to original. If we used `Size` here, we would
1503 // keep the last granule tagged, and store zero in the last byte of the
1504 // last granule, due to how short granules are implemented.
1505 tagAlloca(IRB, AI, UARTag, AlignedSize);
1506 };
1507 // Calls to functions that may return twice (e.g. setjmp) confuse the
1508 // postdominator analysis, and will leave us to keep memory tagged after
1509 // function return. Work around this by always untagging at every return
1510 // statement if return_twice functions are called.
1511 bool StandardLifetime =
1512 !SInfo.CallsReturnTwice &&
1513 memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, &DT,
1514 &LI, ClMaxLifetimes);
1515 if (DetectUseAfterScope && StandardLifetime) {
1516 IntrinsicInst *Start = Info.LifetimeStart[0];
1517 IRB.SetInsertPoint(Start->getNextNode());
1518 tagAlloca(IRB, AI, Tag, Size);
1519 if (!memtag::forAllReachableExits(DT, PDT, LI, Start, Info.LifetimeEnd,
1520 SInfo.RetVec, TagEnd)) {
1521 for (auto *End : Info.LifetimeEnd)
1522 End->eraseFromParent();
1523 }
1524 } else {
1525 tagAlloca(IRB, AI, Tag, Size);
1526 for (auto *RI : SInfo.RetVec)
1527 TagEnd(RI);
1528 // We inserted tagging outside of the lifetimes, so we have to remove
1529 // them.
1530 for (auto &II : Info.LifetimeStart)
1531 II->eraseFromParent();
1532 for (auto &II : Info.LifetimeEnd)
1533 II->eraseFromParent();
1534 }
1535 memtag::alignAndPadAlloca(Info, Mapping.getObjectAlignment());
1536 }
1537}
1538
1540 bool Skip) {
1541 if (Skip) {
1542 ORE.emit([&]() {
1543 return OptimizationRemark(DEBUG_TYPE, "Skip", &F)
1544 << "Skipped: F=" << ore::NV("Function", &F);
1545 });
1546 } else {
1547 ORE.emit([&]() {
1548 return OptimizationRemarkMissed(DEBUG_TYPE, "Sanitize", &F)
1549 << "Sanitized: F=" << ore::NV("Function", &F);
1550 });
1551 }
1552}
1553
1554bool HWAddressSanitizer::selectiveInstrumentationShouldSkip(
1556 auto SkipHot = [&]() {
1557 if (!ClHotPercentileCutoff.getNumOccurrences())
1558 return false;
1560 ProfileSummaryInfo *PSI =
1561 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
1562 if (!PSI || !PSI->hasProfileSummary()) {
1563 ++NumNoProfileSummaryFuncs;
1564 return false;
1565 }
1566 return PSI->isFunctionHotInCallGraphNthPercentile(
1568 };
1569
1570 auto SkipRandom = [&]() {
1571 if (!ClRandomKeepRate.getNumOccurrences())
1572 return false;
1573 std::bernoulli_distribution D(ClRandomKeepRate);
1574 return !D(*Rng);
1575 };
1576
1577 bool Skip = SkipRandom() || SkipHot();
1579 return Skip;
1580}
1581
1582void HWAddressSanitizer::sanitizeFunction(Function &F,
1584 if (&F == HwasanCtorFunction)
1585 return;
1586
1587 // Do not apply any instrumentation for naked functions.
1588 if (F.hasFnAttribute(Attribute::Naked))
1589 return;
1590
1591 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1592 return;
1593
1594 if (F.empty())
1595 return;
1596
1597 if (F.isPresplitCoroutine())
1598 return;
1599
1600 NumTotalFuncs++;
1601
1604
1605 if (selectiveInstrumentationShouldSkip(F, FAM))
1606 return;
1607
1608 NumInstrumentedFuncs++;
1609
1610 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1611
1612 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1613 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1614 SmallVector<Instruction *, 8> LandingPadVec;
1616
1618 for (auto &Inst : instructions(F)) {
1619 if (InstrumentStack) {
1620 SIB.visit(ORE, Inst);
1621 }
1622
1623 if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1624 LandingPadVec.push_back(&Inst);
1625
1626 getInterestingMemoryOperands(ORE, &Inst, TLI, OperandsToInstrument);
1627
1628 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
1629 if (!ignoreMemIntrinsic(ORE, MI))
1630 IntrinToInstrument.push_back(MI);
1631 }
1632
1633 memtag::StackInfo &SInfo = SIB.get();
1634
1635 initializeCallbacks(*F.getParent());
1636
1637 if (!LandingPadVec.empty())
1638 instrumentLandingPads(LandingPadVec);
1639
1640 if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1641 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1642 // __hwasan_personality_thunk is a no-op for functions without an
1643 // instrumented stack, so we can drop it.
1644 F.setPersonalityFn(nullptr);
1645 }
1646
1647 if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1648 IntrinToInstrument.empty())
1649 return;
1650
1651 assert(!ShadowBase);
1652
1653 BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
1654 IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
1655 emitPrologue(EntryIRB,
1656 /*WithFrameRecord*/ ClRecordStackHistory != none &&
1657 Mapping.withFrameRecord() &&
1658 !SInfo.AllocasToInstrument.empty());
1659
1660 if (!SInfo.AllocasToInstrument.empty()) {
1663 const LoopInfo &LI = FAM.getResult<LoopAnalysis>(F);
1664 Value *StackTag = getStackBaseTag(EntryIRB);
1665 Value *UARTag = getUARTag(EntryIRB);
1666 instrumentStack(SInfo, StackTag, UARTag, DT, PDT, LI);
1667 }
1668
1669 // If we split the entry block, move any allocas that were originally in the
1670 // entry block back into the entry block so that they aren't treated as
1671 // dynamic allocas.
1672 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1673 InsertPt = F.getEntryBlock().begin();
1674 for (Instruction &I :
1675 llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
1676 if (auto *AI = dyn_cast<AllocaInst>(&I))
1677 if (isa<ConstantInt>(AI->getArraySize()))
1678 I.moveBefore(F.getEntryBlock(), InsertPt);
1679 }
1680 }
1681
1685 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Lazy);
1686 const DataLayout &DL = F.getDataLayout();
1687 for (auto &Operand : OperandsToInstrument)
1688 instrumentMemAccess(Operand, DTU, LI, DL);
1689 DTU.flush();
1690
1691 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1692 for (auto *Inst : IntrinToInstrument)
1693 instrumentMemIntrinsic(Inst);
1694 }
1695
1696 ShadowBase = nullptr;
1697 StackBaseTag = nullptr;
1698 CachedFP = nullptr;
1699}
1700
1701void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1702 assert(!UsePageAliases);
1703 Constant *Initializer = GV->getInitializer();
1704 uint64_t SizeInBytes =
1705 M.getDataLayout().getTypeAllocSize(Initializer->getType());
1706 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1707 if (SizeInBytes != NewSize) {
1708 // Pad the initializer out to the next multiple of 16 bytes and add the
1709 // required short granule tag.
1710 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1711 Init.back() = Tag;
1713 Initializer = ConstantStruct::getAnon({Initializer, Padding});
1714 }
1715
1716 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1717 GlobalValue::ExternalLinkage, Initializer,
1718 GV->getName() + ".hwasan");
1719 NewGV->copyAttributesFrom(GV);
1720 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1721 NewGV->copyMetadata(GV, 0);
1722 NewGV->setAlignment(
1723 std::max(GV->getAlign().valueOrOne(), Mapping.getObjectAlignment()));
1724
1725 // It is invalid to ICF two globals that have different tags. In the case
1726 // where the size of the global is a multiple of the tag granularity the
1727 // contents of the globals may be the same but the tags (i.e. symbol values)
1728 // may be different, and the symbols are not considered during ICF. In the
1729 // case where the size is not a multiple of the granularity, the short granule
1730 // tags would discriminate two globals with different tags, but there would
1731 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1732 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1733 // granule tag in the last byte.
1734 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1735
1736 // Descriptor format (assuming little-endian):
1737 // bytes 0-3: relative address of global
1738 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1739 // it isn't, we create multiple descriptors)
1740 // byte 7: tag
1741 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1742 const uint64_t MaxDescriptorSize = 0xfffff0;
1743 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1744 DescriptorPos += MaxDescriptorSize) {
1745 auto *Descriptor =
1746 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1747 nullptr, GV->getName() + ".hwasan.descriptor");
1748 auto *GVRelPtr = ConstantExpr::getTrunc(
1751 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1752 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1753 ConstantInt::get(Int64Ty, DescriptorPos)),
1754 Int32Ty);
1755 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1756 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1757 Descriptor->setComdat(NewGV->getComdat());
1758 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1759 Descriptor->setSection("hwasan_globals");
1760 Descriptor->setMetadata(LLVMContext::MD_associated,
1762 appendToCompilerUsed(M, Descriptor);
1763 }
1764
1767 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1768 ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1769 GV->getType());
1770 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1771 GV->getLinkage(), "", Aliasee, &M);
1772 Alias->setVisibility(GV->getVisibility());
1773 Alias->takeName(GV);
1774 GV->replaceAllUsesWith(Alias);
1775 GV->eraseFromParent();
1776}
1777
1778void HWAddressSanitizer::instrumentGlobals() {
1779 std::vector<GlobalVariable *> Globals;
1780 for (GlobalVariable &GV : M.globals()) {
1782 continue;
1783
1784 if (GV.isDeclarationForLinker() || GV.getName().starts_with("llvm.") ||
1785 GV.isThreadLocal())
1786 continue;
1787
1788 // Common symbols can't have aliases point to them, so they can't be tagged.
1789 if (GV.hasCommonLinkage())
1790 continue;
1791
1792 if (ClAllGlobals) {
1793 // Avoid instrumenting intrinsic global variables.
1794 if (GV.getSection() == "llvm.metadata")
1795 continue;
1796 } else {
1797 // Globals with custom sections may be used in __start_/__stop_
1798 // enumeration, which would be broken both by adding tags and potentially
1799 // by the extra padding/alignment that we insert.
1800 if (GV.hasSection())
1801 continue;
1802 }
1803
1804 Globals.push_back(&GV);
1805 }
1806
1807 MD5 Hasher;
1808 Hasher.update(M.getSourceFileName());
1809 MD5::MD5Result Hash;
1810 Hasher.final(Hash);
1811 uint8_t Tag = Hash[0];
1812
1813 assert(TagMaskByte >= 16);
1814
1815 for (GlobalVariable *GV : Globals) {
1816 // Don't allow globals to be tagged with something that looks like a
1817 // short-granule tag, otherwise we lose inter-granule overflow detection, as
1818 // the fast path shadow-vs-address check succeeds.
1819 if (Tag < 16 || Tag > TagMaskByte)
1820 Tag = 16;
1821 instrumentGlobal(GV, Tag++);
1822 }
1823}
1824
1825void HWAddressSanitizer::instrumentPersonalityFunctions() {
1826 // We need to untag stack frames as we unwind past them. That is the job of
1827 // the personality function wrapper, which either wraps an existing
1828 // personality function or acts as a personality function on its own. Each
1829 // function that has a personality function or that can be unwound past has
1830 // its personality function changed to a thunk that calls the personality
1831 // function wrapper in the runtime.
1833 for (Function &F : M) {
1834 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1835 continue;
1836
1837 if (F.hasPersonalityFn()) {
1838 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1839 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1840 PersonalityFns[nullptr].push_back(&F);
1841 }
1842 }
1843
1844 if (PersonalityFns.empty())
1845 return;
1846
1847 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1848 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty, PtrTy,
1849 PtrTy, PtrTy, PtrTy, PtrTy);
1850 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1851 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1852
1853 for (auto &P : PersonalityFns) {
1854 std::string ThunkName = kHwasanPersonalityThunkName;
1855 if (P.first)
1856 ThunkName += ("." + P.first->getName()).str();
1857 FunctionType *ThunkFnTy = FunctionType::get(
1858 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, PtrTy, PtrTy}, false);
1859 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1860 cast<GlobalValue>(P.first)->hasLocalLinkage());
1861 auto *ThunkFn = Function::Create(ThunkFnTy,
1864 ThunkName, &M);
1865 // TODO: think about other attributes as well.
1866 if (any_of(P.second, [](const Function *F) {
1867 return F->hasFnAttribute("branch-target-enforcement");
1868 })) {
1869 ThunkFn->addFnAttr("branch-target-enforcement");
1870 }
1871 if (!IsLocal) {
1872 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1873 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1874 }
1875
1876 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1877 IRBuilder<> IRB(BB);
1878 CallInst *WrapperCall = IRB.CreateCall(
1879 HwasanPersonalityWrapper,
1880 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1881 ThunkFn->getArg(3), ThunkFn->getArg(4),
1882 P.first ? P.first : Constant::getNullValue(PtrTy),
1883 UnwindGetGR.getCallee(), UnwindGetCFA.getCallee()});
1884 WrapperCall->setTailCall();
1885 IRB.CreateRet(WrapperCall);
1886
1887 for (Function *F : P.second)
1888 F->setPersonalityFn(ThunkFn);
1889 }
1890}
1891
1892void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1893 bool InstrumentWithCalls,
1894 bool CompileKernel) {
1895 // Start with defaults.
1896 Scale = kDefaultShadowScale;
1897 Kind = OffsetKind::kTls;
1898 WithFrameRecord = true;
1899
1900 // Tune for the target.
1901 if (TargetTriple.isOSFuchsia()) {
1902 // Fuchsia is always PIE, which means that the beginning of the address
1903 // space is always available.
1904 SetFixed(0);
1905 } else if (CompileKernel || InstrumentWithCalls) {
1906 SetFixed(0);
1907 WithFrameRecord = false;
1908 }
1909
1910 WithFrameRecord = optOr(ClFrameRecords, WithFrameRecord);
1911
1912 // Apply the last of ClMappingOffset and ClMappingOffsetDynamic.
1913 Kind = optOr(ClMappingOffsetDynamic, Kind);
1914 if (ClMappingOffset.getNumOccurrences() > 0 &&
1915 !(ClMappingOffsetDynamic.getNumOccurrences() > 0 &&
1916 ClMappingOffsetDynamic.getPosition() > ClMappingOffset.getPosition())) {
1917 SetFixed(ClMappingOffset);
1918 }
1919}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< size_t > ClMaxLifetimes("stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
static cl::opt< StackTaggingRecordStackHistoryMode > ClRecordStackHistory("stack-tagging-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer")), cl::Hidden, cl::init(none))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const uint64_t kDefaultShadowScale
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static uint64_t scale(uint64_t Num, uint32_t N, uint32_t D)
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:687
#define clEnumVal(ENUMVAL, DESC)
Definition: CommandLine.h:685
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file contains constants used for implementing Dwarf debug support.
uint64_t Addr
std::string Name
uint64_t Size
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1328
bool End
Definition: ELF_riscv.cpp:480
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
static cl::opt< bool > ClInstrumentWrites("hwasan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static const size_t kDefaultShadowScale
static cl::opt< uint64_t > ClMappingOffset("hwasan-mapping-offset", cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden)
static cl::opt< RecordStackHistoryMode > ClRecordStackHistory("hwasan-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer directly"), clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for " "storing into the stack ring buffer")), cl::Hidden, cl::init(instr))
const char kHwasanModuleCtorName[]
static cl::opt< bool > ClFrameRecords("hwasan-with-frame-record", cl::desc("Use ring buffer for stack allocations"), cl::Hidden)
static cl::opt< int > ClMatchAllTag("hwasan-match-all-tag", cl::desc("don't report bad accesses via pointers with this tag"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUseAfterScope("hwasan-use-after-scope", cl::desc("detect use after scope within function"), cl::Hidden, cl::init(true))
const char kHwasanNoteName[]
static const unsigned kShadowBaseAlignment
static cl::opt< bool > ClGenerateTagsWithCalls("hwasan-generate-tags-with-calls", cl::desc("generate new tags with runtime library calls"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentReads("hwasan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< float > ClRandomKeepRate("hwasan-random-rate", cl::desc("Probability value in the range [0.0, 1.0] " "to keep instrumentation of a function. " "Note: instrumentation can be skipped randomly " "OR because of the hot percentile cutoff, if " "both are supplied."))
static cl::opt< bool > ClInstrumentWithCalls("hwasan-instrument-with-calls", cl::desc("instrument reads and writes with callbacks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
static cl::opt< bool > ClInstrumentAtomics("hwasan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentStack("hwasan-instrument-stack", cl::desc("instrument stack (allocas)"), cl::Hidden, cl::init(true))
static cl::opt< OffsetKind > ClMappingOffsetDynamic("hwasan-mapping-offset-dynamic", cl::desc("HWASan shadow mapping dynamic offset location"), cl::Hidden, cl::values(clEnumValN(OffsetKind::kGlobal, "global", "Use global"), clEnumValN(OffsetKind::kIfunc, "ifunc", "Use ifunc global"), clEnumValN(OffsetKind::kTls, "tls", "Use TLS")))
static cl::opt< bool > ClRecover("hwasan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClEnableKhwasan("hwasan-kernel", cl::desc("Enable KernelHWAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInlineAllChecks("hwasan-inline-all-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUsePageAliases("hwasan-experimental-use-page-aliases", cl::desc("Use page aliasing in HWASan"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__hwasan_"))
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClGlobals("hwasan-globals", cl::desc("Instrument globals"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("hwasan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentByval("hwasan-instrument-byval", cl::desc("instrument byval arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseShortGranules("hwasan-use-short-granules", cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, cl::init(false))
const char kHwasanShadowMemoryDynamicAddress[]
static unsigned getPointerOperandIndex(Instruction *I)
#define DEBUG_TYPE
static cl::opt< bool > ClInlineFastPathChecks("hwasan-inline-fast-path-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentPersonalityFunctions("hwasan-instrument-personality-functions", cl::desc("instrument personality functions"), cl::Hidden)
const char kHwasanInitName[]
static cl::opt< bool > ClAllGlobals("hwasan-all-globals", cl::desc("Instrument globals, even those within user-defined sections. Warning: " "This may break existing code which walks globals via linker-generated " "symbols, expects certain globals to be contiguous with each other, or " "makes other assumptions which are invalidated by HWASan " "instrumentation."), cl::Hidden, cl::init(false))
RecordStackHistoryMode
static cl::opt< bool > ClInstrumentLandingPads("hwasan-instrument-landing-pads", cl::desc("instrument landing pads"), cl::Hidden, cl::init(false))
static cl::opt< size_t > ClMaxLifetimes("hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
const char kHwasanPersonalityThunkName[]
static cl::opt< bool > ClStaticLinking("hwasan-static-linking", cl::desc("Don't use .note.hwasan.globals section to instrument globals " "from loadable libraries. " "Note: in static binaries, the global variables section can be " "accessed directly via linker-provided " "__start_hwasan_globals and __stop_hwasan_globals symbols"), cl::Hidden, cl::init(false))
static void emitRemark(const Function &F, OptimizationRemarkEmitter &ORE, bool Skip)
static cl::opt< int > ClHotPercentileCutoff("hwasan-percentile-cutoff-hot", cl::desc("Hot percentile cutoff."))
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
uint64_t IntrinsicInst * II
#define P(N)
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition: Debug.h:119
an instruction to allocate memory on the stack
Definition: Instructions.h:64
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:101
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:97
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:255
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:431
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:412
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:506
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:709
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:206
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:213
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:170
Analysis pass which computes BlockFrequencyInfo.
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition: Constants.h:715
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2314
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2654
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2300
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2647
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2272
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition: Constants.h:486
This is an important base class in LLVM.
Definition: Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:284
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:166
void flush()
Apply all pending updates to available trees and flush all BasicBlocks awaiting deletion.
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:585
StringRef getSection() const
Get the custom section of this global if it has one.
Definition: GlobalObject.h:114
LLVM_ABI void setComdat(Comdat *C)
Definition: Globals.cpp:214
bool hasSection() const
Check if this global has a custom object file section.
Definition: GlobalObject.h:106
LLVM_ABI const SanitizerMetadata & getSanitizerMetadata() const
Definition: Globals.cpp:245
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:265
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:250
LinkageTypes getLinkage() const
Definition: GlobalValue.h:548
bool isDeclarationForLinker() const
Definition: GlobalValue.h:625
bool hasSanitizerMetadata() const
Definition: GlobalValue.h:357
unsigned getAddressSpace() const
Definition: GlobalValue.h:207
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:663
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:296
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:69
bool hasCommonLinkage() const
Definition: GlobalValue.h:534
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:60
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:53
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:56
Type * getValueType() const
Definition: GlobalValue.h:298
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
LLVM_ABI void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition: Globals.cpp:507
Analysis pass providing a never-invalidated alias analysis result.
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1936
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2251
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2199
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1513
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:562
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:2036
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:1172
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:201
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1454
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2333
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:834
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2337
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1847
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1492
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Definition: IRBuilder.h:630
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2082
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1551
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1860
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1403
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2508
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2068
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:605
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Definition: IRBuilder.cpp:130
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2341
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2277
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:207
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1532
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1599
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition: IRBuilder.h:1573
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2780
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:43
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:585
LLVM_ABI void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:49
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
An instruction for reading from memory.
Definition: Instructions.h:180
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:570
Definition: MD5.h:42
LLVM_ABI void update(ArrayRef< uint8_t > Data)
Updates the hash for the byte stream provided.
Definition: MD5.cpp:189
LLVM_ABI void final(MD5Result &Result)
Finishes off the hash and puts the result in result.
Definition: MD5.cpp:234
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition: MDBuilder.cpp:48
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1565
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
bool empty() const
Definition: MapVector.h:75
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
GlobalVariable * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition: Module.cpp:256
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Definition: PassManager.h:716
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:118
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
Definition: Analysis.h:171
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition: Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool empty() const
Definition: SmallVector.h:82
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:938
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
An instruction for storing to memory.
Definition: Instructions.h:296
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:233
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:269
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:414
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:47
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:818
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:816
@ aarch64_be
Definition: Triple.h:55
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:408
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
Definition: Triple.h:1075
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition: Triple.h:995
bool isOSFuchsia() const
Definition: Triple.h:639
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:766
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:502
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:390
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:546
LLVM_ABI void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:554
bool hasName() const
Definition: Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:322
int getNumOccurrences() const
Definition: CommandLine.h:400
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:359
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ NT_LLVM_HWASAN_GLOBALS
Definition: ELF.h:1792
@ ReallyHidden
Definition: CommandLine.h:139
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:712
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
Value * getFP(IRBuilder<> &IRB)
bool isStandardLifetime(const SmallVectorImpl< IntrinsicInst * > &LifetimeStart, const SmallVectorImpl< IntrinsicInst * > &LifetimeEnd, const DominatorTree *DT, const LoopInfo *LI, size_t MaxLifetimes)
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const LoopInfo &LI, const Instruction *Start, const SmallVectorImpl< IntrinsicInst * > &Ends, const SmallVectorImpl< Instruction * > &RetVec, llvm::function_ref< void(Instruction *)> Callback)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Value * getAndroidSlotPtr(IRBuilder<> &IRB, int Slot)
Value * readRegister(IRBuilder<> &IRB, StringRef Name)
Value * incrementThreadLong(IRBuilder<> &IRB, Value *ThreadLong, unsigned int Inc)
void annotateDebugRecords(AllocaInfo &Info, unsigned int Tag)
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align)
Value * getPC(const Triple &TargetTriple, IRBuilder<> &IRB)
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:663
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:293
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:157
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1751
LLVM_ABI std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition: Error.cpp:167
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem)
Remove memory attributes that are incompatible with the instrumentation added by AddressSanitizer and...
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:74
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:3829
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:70
MapVector< AllocaInst *, AllocaInfo > AllocasToInstrument
SmallVector< Instruction *, 8 > RetVec