LLVM 22.0.0git
AddressSanitizer.cpp
Go to the documentation of this file.
1//===- AddressSanitizer.cpp - memory error detector -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address basic correctness
10// checker.
11// Details of the algorithm:
12// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
13//
14// FIXME: This sanitizer does not yet handle scalable vectors
15//
16//===----------------------------------------------------------------------===//
17
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/Statistic.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
38#include "llvm/IR/BasicBlock.h"
39#include "llvm/IR/Comdat.h"
40#include "llvm/IR/Constant.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DIBuilder.h"
43#include "llvm/IR/DataLayout.h"
45#include "llvm/IR/DebugLoc.h"
48#include "llvm/IR/Function.h"
49#include "llvm/IR/GlobalAlias.h"
50#include "llvm/IR/GlobalValue.h"
52#include "llvm/IR/IRBuilder.h"
53#include "llvm/IR/InlineAsm.h"
54#include "llvm/IR/InstVisitor.h"
55#include "llvm/IR/InstrTypes.h"
56#include "llvm/IR/Instruction.h"
59#include "llvm/IR/Intrinsics.h"
60#include "llvm/IR/LLVMContext.h"
61#include "llvm/IR/MDBuilder.h"
62#include "llvm/IR/Metadata.h"
63#include "llvm/IR/Module.h"
64#include "llvm/IR/Type.h"
65#include "llvm/IR/Use.h"
66#include "llvm/IR/Value.h"
70#include "llvm/Support/Debug.h"
83#include <algorithm>
84#include <cassert>
85#include <cstddef>
86#include <cstdint>
87#include <iomanip>
88#include <limits>
89#include <sstream>
90#include <string>
91#include <tuple>
92
93using namespace llvm;
94
95#define DEBUG_TYPE "asan"
96
98static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
99static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
101 std::numeric_limits<uint64_t>::max();
102static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G.
104static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
105static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
106static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
107static const uint64_t kMIPS_ShadowOffsetN32 = 1ULL << 29;
108static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
109static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
110static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
111static const uint64_t kLoongArch64_ShadowOffset64 = 1ULL << 46;
113static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
114static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
115static const uint64_t kFreeBSDAArch64_ShadowOffset64 = 1ULL << 47;
116static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000;
117static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
118static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
119static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
120static const uint64_t kPS_ShadowOffset64 = 1ULL << 40;
121static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
123
124// The shadow memory space is dynamically allocated.
126
127static const size_t kMinStackMallocSize = 1 << 6; // 64B
128static const size_t kMaxStackMallocSize = 1 << 16; // 64K
129static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
130static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
131
132const char kAsanModuleCtorName[] = "asan.module_ctor";
133const char kAsanModuleDtorName[] = "asan.module_dtor";
135// On Emscripten, the system needs more than one priorities for constructors.
137const char kAsanReportErrorTemplate[] = "__asan_report_";
138const char kAsanRegisterGlobalsName[] = "__asan_register_globals";
139const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals";
140const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals";
142 "__asan_unregister_image_globals";
143const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals";
144const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals";
145const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init";
146const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init";
147const char kAsanInitName[] = "__asan_init";
148const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v";
149const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp";
150const char kAsanPtrSub[] = "__sanitizer_ptr_sub";
151const char kAsanHandleNoReturnName[] = "__asan_handle_no_return";
152static const int kMaxAsanStackMallocSizeClass = 10;
153const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_";
155 "__asan_stack_malloc_always_";
156const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_";
157const char kAsanGenPrefix[] = "___asan_gen_";
158const char kODRGenPrefix[] = "__odr_asan_gen_";
159const char kSanCovGenPrefix[] = "__sancov_gen_";
160const char kAsanSetShadowPrefix[] = "__asan_set_shadow_";
161const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory";
162const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory";
163
164// ASan version script has __asan_* wildcard. Triple underscore prevents a
165// linker (gold) warning about attempting to export a local symbol.
166const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered";
167
169 "__asan_option_detect_stack_use_after_return";
170
172 "__asan_shadow_memory_dynamic_address";
173
174const char kAsanAllocaPoison[] = "__asan_alloca_poison";
175const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison";
176
177const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared";
178const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private";
179const char kAMDGPUBallotName[] = "llvm.amdgcn.ballot.i64";
180const char kAMDGPUUnreachableName[] = "llvm.amdgcn.unreachable";
181
182// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
183static const size_t kNumberOfAccessSizes = 5;
184
185static const uint64_t kAllocaRzSize = 32;
186
187// ASanAccessInfo implementation constants.
188constexpr size_t kCompileKernelShift = 0;
189constexpr size_t kCompileKernelMask = 0x1;
190constexpr size_t kAccessSizeIndexShift = 1;
191constexpr size_t kAccessSizeIndexMask = 0xf;
192constexpr size_t kIsWriteShift = 5;
193constexpr size_t kIsWriteMask = 0x1;
194
195// Command-line flags.
196
198 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
199 cl::Hidden, cl::init(false));
200
202 "asan-recover",
203 cl::desc("Enable recovery mode (continue-after-error)."),
204 cl::Hidden, cl::init(false));
205
207 "asan-guard-against-version-mismatch",
208 cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
209 cl::init(true));
210
211// This flag may need to be replaced with -f[no-]asan-reads.
212static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
213 cl::desc("instrument read instructions"),
214 cl::Hidden, cl::init(true));
215
217 "asan-instrument-writes", cl::desc("instrument write instructions"),
218 cl::Hidden, cl::init(true));
219
220static cl::opt<bool>
221 ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true),
222 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
224
226 "asan-instrument-atomics",
227 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
228 cl::init(true));
229
230static cl::opt<bool>
231 ClInstrumentByval("asan-instrument-byval",
232 cl::desc("instrument byval call arguments"), cl::Hidden,
233 cl::init(true));
234
236 "asan-always-slow-path",
237 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
238 cl::init(false));
239
241 "asan-force-dynamic-shadow",
242 cl::desc("Load shadow address into a local variable for each function"),
243 cl::Hidden, cl::init(false));
244
245static cl::opt<bool>
246 ClWithIfunc("asan-with-ifunc",
247 cl::desc("Access dynamic shadow through an ifunc global on "
248 "platforms that support this"),
249 cl::Hidden, cl::init(true));
250
252 "asan-with-ifunc-suppress-remat",
253 cl::desc("Suppress rematerialization of dynamic shadow address by passing "
254 "it through inline asm in prologue."),
255 cl::Hidden, cl::init(true));
256
257// This flag limits the number of instructions to be instrumented
258// in any given BB. Normally, this should be set to unlimited (INT_MAX),
259// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
260// set it to 10000.
262 "asan-max-ins-per-bb", cl::init(10000),
263 cl::desc("maximal number of instructions to instrument in any given BB"),
264 cl::Hidden);
265
266// This flag may need to be replaced with -f[no]asan-stack.
267static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
268 cl::Hidden, cl::init(true));
270 "asan-max-inline-poisoning-size",
271 cl::desc(
272 "Inline shadow poisoning for blocks up to the given size in bytes."),
273 cl::Hidden, cl::init(64));
274
276 "asan-use-after-return",
277 cl::desc("Sets the mode of detection for stack-use-after-return."),
280 "Never detect stack use after return."),
283 "Detect stack use after return if "
284 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
286 "Always detect stack use after return.")),
288
289static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
290 cl::desc("Create redzones for byval "
291 "arguments (extra copy "
292 "required)"), cl::Hidden,
293 cl::init(true));
294
295static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
296 cl::desc("Check stack-use-after-scope"),
297 cl::Hidden, cl::init(false));
298
299// This flag may need to be replaced with -f[no]asan-globals.
300static cl::opt<bool> ClGlobals("asan-globals",
301 cl::desc("Handle global objects"), cl::Hidden,
302 cl::init(true));
303
304static cl::opt<bool> ClInitializers("asan-initialization-order",
305 cl::desc("Handle C++ initializer order"),
306 cl::Hidden, cl::init(true));
307
309 "asan-detect-invalid-pointer-pair",
310 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
311 cl::init(false));
312
314 "asan-detect-invalid-pointer-cmp",
315 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
316 cl::init(false));
317
319 "asan-detect-invalid-pointer-sub",
320 cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
321 cl::init(false));
322
324 "asan-realign-stack",
325 cl::desc("Realign stack to the value of this flag (power of two)"),
326 cl::Hidden, cl::init(32));
327
329 "asan-instrumentation-with-call-threshold",
330 cl::desc("If the function being instrumented contains more than "
331 "this number of memory accesses, use callbacks instead of "
332 "inline checks (-1 means never use callbacks)."),
333 cl::Hidden, cl::init(7000));
334
336 "asan-memory-access-callback-prefix",
337 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
338 cl::init("__asan_"));
339
341 "asan-kernel-mem-intrinsic-prefix",
342 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
343 cl::init(false));
344
345static cl::opt<bool>
346 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
347 cl::desc("instrument dynamic allocas"),
348 cl::Hidden, cl::init(true));
349
351 "asan-skip-promotable-allocas",
352 cl::desc("Do not instrument promotable allocas"), cl::Hidden,
353 cl::init(true));
354
356 "asan-constructor-kind",
357 cl::desc("Sets the ASan constructor kind"),
358 cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"),
360 "Use global constructors")),
362// These flags allow to change the shadow mapping.
363// The shadow mapping looks like
364// Shadow = (Mem >> scale) + offset
365
366static cl::opt<int> ClMappingScale("asan-mapping-scale",
367 cl::desc("scale of asan shadow mapping"),
368 cl::Hidden, cl::init(0));
369
371 ClMappingOffset("asan-mapping-offset",
372 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
373 cl::Hidden, cl::init(0));
374
375// Optimization flags. Not user visible, used mostly for testing
376// and benchmarking the tool.
377
378static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
379 cl::Hidden, cl::init(true));
380
381static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
382 cl::desc("Optimize callbacks"),
383 cl::Hidden, cl::init(false));
384
386 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
387 cl::Hidden, cl::init(true));
388
389static cl::opt<bool> ClOptGlobals("asan-opt-globals",
390 cl::desc("Don't instrument scalar globals"),
391 cl::Hidden, cl::init(true));
392
394 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
395 cl::Hidden, cl::init(false));
396
398 "asan-stack-dynamic-alloca",
399 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
400 cl::init(true));
401
403 "asan-force-experiment",
404 cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
405 cl::init(0));
406
407static cl::opt<bool>
408 ClUsePrivateAlias("asan-use-private-alias",
409 cl::desc("Use private aliases for global variables"),
410 cl::Hidden, cl::init(true));
411
412static cl::opt<bool>
413 ClUseOdrIndicator("asan-use-odr-indicator",
414 cl::desc("Use odr indicators to improve ODR reporting"),
415 cl::Hidden, cl::init(true));
416
417static cl::opt<bool>
418 ClUseGlobalsGC("asan-globals-live-support",
419 cl::desc("Use linker features to support dead "
420 "code stripping of globals"),
421 cl::Hidden, cl::init(true));
422
423// This is on by default even though there is a bug in gold:
424// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
425static cl::opt<bool>
426 ClWithComdat("asan-with-comdat",
427 cl::desc("Place ASan constructors in comdat sections"),
428 cl::Hidden, cl::init(true));
429
431 "asan-destructor-kind",
432 cl::desc("Sets the ASan destructor kind. The default is to use the value "
433 "provided to the pass constructor"),
434 cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"),
436 "Use global destructors")),
438
439// Debug flags.
440
441static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
442 cl::init(0));
443
444static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
445 cl::Hidden, cl::init(0));
446
448 cl::desc("Debug func"));
449
450static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
451 cl::Hidden, cl::init(-1));
452
453static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
454 cl::Hidden, cl::init(-1));
455
456STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
457STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
458STATISTIC(NumOptimizedAccessesToGlobalVar,
459 "Number of optimized accesses to global vars");
460STATISTIC(NumOptimizedAccessesToStackVar,
461 "Number of optimized accesses to stack vars");
462
463namespace {
464
465/// This struct defines the shadow mapping using the rule:
466/// shadow = (mem >> Scale) ADD-or-OR Offset.
467/// If InGlobal is true, then
468/// extern char __asan_shadow[];
469/// shadow = (mem >> Scale) + &__asan_shadow
470struct ShadowMapping {
471 int Scale;
473 bool OrShadowOffset;
474 bool InGlobal;
475};
476
477} // end anonymous namespace
478
479static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize,
480 bool IsKasan) {
481 bool IsAndroid = TargetTriple.isAndroid();
482 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS() ||
483 TargetTriple.isDriverKit();
484 bool IsMacOS = TargetTriple.isMacOSX();
485 bool IsFreeBSD = TargetTriple.isOSFreeBSD();
486 bool IsNetBSD = TargetTriple.isOSNetBSD();
487 bool IsPS = TargetTriple.isPS();
488 bool IsLinux = TargetTriple.isOSLinux();
489 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
490 TargetTriple.getArch() == Triple::ppc64le;
491 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
492 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
493 bool IsMIPSN32ABI = TargetTriple.isABIN32();
494 bool IsMIPS32 = TargetTriple.isMIPS32();
495 bool IsMIPS64 = TargetTriple.isMIPS64();
496 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
497 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
498 TargetTriple.getArch() == Triple::aarch64_be;
499 bool IsLoongArch64 = TargetTriple.isLoongArch64();
500 bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
501 bool IsWindows = TargetTriple.isOSWindows();
502 bool IsFuchsia = TargetTriple.isOSFuchsia();
503 bool IsAMDGPU = TargetTriple.isAMDGPU();
504 bool IsHaiku = TargetTriple.isOSHaiku();
505 bool IsWasm = TargetTriple.isWasm();
506
507 ShadowMapping Mapping;
508
509 Mapping.Scale = kDefaultShadowScale;
510 if (ClMappingScale.getNumOccurrences() > 0) {
511 Mapping.Scale = ClMappingScale;
512 }
513
514 if (LongSize == 32) {
515 if (IsAndroid)
516 Mapping.Offset = kDynamicShadowSentinel;
517 else if (IsMIPSN32ABI)
518 Mapping.Offset = kMIPS_ShadowOffsetN32;
519 else if (IsMIPS32)
520 Mapping.Offset = kMIPS32_ShadowOffset32;
521 else if (IsFreeBSD)
522 Mapping.Offset = kFreeBSD_ShadowOffset32;
523 else if (IsNetBSD)
524 Mapping.Offset = kNetBSD_ShadowOffset32;
525 else if (IsIOS)
526 Mapping.Offset = kDynamicShadowSentinel;
527 else if (IsWindows)
528 Mapping.Offset = kWindowsShadowOffset32;
529 else if (IsWasm)
530 Mapping.Offset = kWebAssemblyShadowOffset;
531 else
532 Mapping.Offset = kDefaultShadowOffset32;
533 } else { // LongSize == 64
534 // Fuchsia is always PIE, which means that the beginning of the address
535 // space is always available.
536 if (IsFuchsia)
537 Mapping.Offset = 0;
538 else if (IsPPC64)
539 Mapping.Offset = kPPC64_ShadowOffset64;
540 else if (IsSystemZ)
541 Mapping.Offset = kSystemZ_ShadowOffset64;
542 else if (IsFreeBSD && IsAArch64)
543 Mapping.Offset = kFreeBSDAArch64_ShadowOffset64;
544 else if (IsFreeBSD && !IsMIPS64) {
545 if (IsKasan)
546 Mapping.Offset = kFreeBSDKasan_ShadowOffset64;
547 else
548 Mapping.Offset = kFreeBSD_ShadowOffset64;
549 } else if (IsNetBSD) {
550 if (IsKasan)
551 Mapping.Offset = kNetBSDKasan_ShadowOffset64;
552 else
553 Mapping.Offset = kNetBSD_ShadowOffset64;
554 } else if (IsPS)
555 Mapping.Offset = kPS_ShadowOffset64;
556 else if (IsLinux && IsX86_64) {
557 if (IsKasan)
558 Mapping.Offset = kLinuxKasan_ShadowOffset64;
559 else
560 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
561 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
562 } else if (IsWindows && IsX86_64) {
563 Mapping.Offset = kWindowsShadowOffset64;
564 } else if (IsMIPS64)
565 Mapping.Offset = kMIPS64_ShadowOffset64;
566 else if (IsIOS)
567 Mapping.Offset = kDynamicShadowSentinel;
568 else if (IsMacOS && IsAArch64)
569 Mapping.Offset = kDynamicShadowSentinel;
570 else if (IsAArch64)
571 Mapping.Offset = kAArch64_ShadowOffset64;
572 else if (IsLoongArch64)
573 Mapping.Offset = kLoongArch64_ShadowOffset64;
574 else if (IsRISCV64)
575 Mapping.Offset = kRISCV64_ShadowOffset64;
576 else if (IsAMDGPU)
577 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
578 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
579 else if (IsHaiku && IsX86_64)
580 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
581 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
582 else
583 Mapping.Offset = kDefaultShadowOffset64;
584 }
585
587 Mapping.Offset = kDynamicShadowSentinel;
588 }
589
590 if (ClMappingOffset.getNumOccurrences() > 0) {
591 Mapping.Offset = ClMappingOffset;
592 }
593
594 // OR-ing shadow offset if more efficient (at least on x86) if the offset
595 // is a power of two, but on ppc64 and loongarch64 we have to use add since
596 // the shadow offset is not necessarily 1/8-th of the address space. On
597 // SystemZ, we could OR the constant in a single instruction, but it's more
598 // efficient to load it once and use indexed addressing.
599 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
600 !IsRISCV64 && !IsLoongArch64 &&
601 !(Mapping.Offset & (Mapping.Offset - 1)) &&
602 Mapping.Offset != kDynamicShadowSentinel;
603 Mapping.InGlobal = ClWithIfunc && IsAndroid && IsArmOrThumb;
604
605 return Mapping;
606}
607
608namespace llvm {
609void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
610 bool IsKasan, uint64_t *ShadowBase,
611 int *MappingScale, bool *OrShadowOffset) {
612 auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan);
613 *ShadowBase = Mapping.Offset;
614 *MappingScale = Mapping.Scale;
615 *OrShadowOffset = Mapping.OrShadowOffset;
616}
617
619 // Sanitizer checks read from shadow, which invalidates memory(argmem: *).
620 //
621 // This is not only true for sanitized functions, because AttrInfer can
622 // infer those attributes on libc functions, which is not true if those
623 // are instrumented (Android) or intercepted.
624 //
625 // We might want to model ASan shadow memory more opaquely to get rid of
626 // this problem altogether, by hiding the shadow memory write in an
627 // intrinsic, essentially like in the AArch64StackTagging pass. But that's
628 // for another day.
629
630 // The API is weird. `onlyReadsMemory` actually means "does not write", and
631 // `onlyWritesMemory` actually means "does not read". So we reconstruct
632 // "accesses memory" && "does not read" <=> "writes".
633 bool Changed = false;
634 if (!F.doesNotAccessMemory()) {
635 bool WritesMemory = !F.onlyReadsMemory();
636 bool ReadsMemory = !F.onlyWritesMemory();
637 if ((WritesMemory && !ReadsMemory) || F.onlyAccessesArgMemory()) {
638 F.removeFnAttr(Attribute::Memory);
639 Changed = true;
640 }
641 }
642 if (ReadsArgMem) {
643 for (Argument &A : F.args()) {
644 if (A.hasAttribute(Attribute::WriteOnly)) {
645 A.removeAttr(Attribute::WriteOnly);
646 Changed = true;
647 }
648 }
649 }
650 if (Changed) {
651 // nobuiltin makes sure later passes don't restore assumptions about
652 // the function.
653 F.addFnAttr(Attribute::NoBuiltin);
654 }
655}
656
662
670
671} // namespace llvm
672
673static uint64_t getRedzoneSizeForScale(int MappingScale) {
674 // Redzone used for stack and globals is at least 32 bytes.
675 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
676 return std::max(32U, 1U << MappingScale);
677}
678
680 if (TargetTriple.isOSEmscripten()) {
682 } else {
684 }
685}
686
687static Twine genName(StringRef suffix) {
688 return Twine(kAsanGenPrefix) + suffix;
689}
690
691namespace {
692/// Helper RAII class to post-process inserted asan runtime calls during a
693/// pass on a single Function. Upon end of scope, detects and applies the
694/// required funclet OpBundle.
695class RuntimeCallInserter {
696 Function *OwnerFn = nullptr;
697 bool TrackInsertedCalls = false;
698 SmallVector<CallInst *> InsertedCalls;
699
700public:
701 RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {
702 if (Fn.hasPersonalityFn()) {
703 auto Personality = classifyEHPersonality(Fn.getPersonalityFn());
704 if (isScopedEHPersonality(Personality))
705 TrackInsertedCalls = true;
706 }
707 }
708
709 ~RuntimeCallInserter() {
710 if (InsertedCalls.empty())
711 return;
712 assert(TrackInsertedCalls && "Calls were wrongly tracked");
713
714 DenseMap<BasicBlock *, ColorVector> BlockColors = colorEHFunclets(*OwnerFn);
715 for (CallInst *CI : InsertedCalls) {
716 BasicBlock *BB = CI->getParent();
717 assert(BB && "Instruction doesn't belong to a BasicBlock");
718 assert(BB->getParent() == OwnerFn &&
719 "Instruction doesn't belong to the expected Function!");
720
721 ColorVector &Colors = BlockColors[BB];
722 // funclet opbundles are only valid in monochromatic BBs.
723 // Note that unreachable BBs are seen as colorless by colorEHFunclets()
724 // and will be DCE'ed later.
725 if (Colors.empty())
726 continue;
727 if (Colors.size() != 1) {
728 OwnerFn->getContext().emitError(
729 "Instruction's BasicBlock is not monochromatic");
730 continue;
731 }
732
733 BasicBlock *Color = Colors.front();
734 BasicBlock::iterator EHPadIt = Color->getFirstNonPHIIt();
735
736 if (EHPadIt != Color->end() && EHPadIt->isEHPad()) {
737 // Replace CI with a clone with an added funclet OperandBundle
738 OperandBundleDef OB("funclet", &*EHPadIt);
740 OB, CI->getIterator());
741 NewCall->copyMetadata(*CI);
742 CI->replaceAllUsesWith(NewCall);
743 CI->eraseFromParent();
744 }
745 }
746 }
747
748 CallInst *createRuntimeCall(IRBuilder<> &IRB, FunctionCallee Callee,
749 ArrayRef<Value *> Args = {},
750 const Twine &Name = "") {
751 assert(IRB.GetInsertBlock()->getParent() == OwnerFn);
752
753 CallInst *Inst = IRB.CreateCall(Callee, Args, Name, nullptr);
754 if (TrackInsertedCalls)
755 InsertedCalls.push_back(Inst);
756 return Inst;
757 }
758};
759
760/// AddressSanitizer: instrument the code in module to find memory bugs.
761struct AddressSanitizer {
762 AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI,
763 int InstrumentationWithCallsThreshold,
764 uint32_t MaxInlinePoisoningSize, bool CompileKernel = false,
765 bool Recover = false, bool UseAfterScope = false,
766 AsanDetectStackUseAfterReturnMode UseAfterReturn =
767 AsanDetectStackUseAfterReturnMode::Runtime)
768 : M(M),
769 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
770 : CompileKernel),
771 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
772 UseAfterScope(UseAfterScope || ClUseAfterScope),
773 UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn
774 : UseAfterReturn),
775 SSGI(SSGI),
776 InstrumentationWithCallsThreshold(
777 ClInstrumentationWithCallsThreshold.getNumOccurrences() > 0
779 : InstrumentationWithCallsThreshold),
780 MaxInlinePoisoningSize(ClMaxInlinePoisoningSize.getNumOccurrences() > 0
782 : MaxInlinePoisoningSize) {
783 C = &(M.getContext());
784 DL = &M.getDataLayout();
785 LongSize = M.getDataLayout().getPointerSizeInBits();
786 IntptrTy = Type::getIntNTy(*C, LongSize);
787 PtrTy = PointerType::getUnqual(*C);
788 Int32Ty = Type::getInt32Ty(*C);
789 TargetTriple = M.getTargetTriple();
790
791 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
792
793 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
794 }
795
796 TypeSize getAllocaSizeInBytes(const AllocaInst &AI) const {
797 return *AI.getAllocationSize(AI.getDataLayout());
798 }
799
800 /// Check if we want (and can) handle this alloca.
801 bool isInterestingAlloca(const AllocaInst &AI);
802
803 bool ignoreAccess(Instruction *Inst, Value *Ptr);
805 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting,
806 const TargetTransformInfo *TTI);
807
808 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
809 InterestingMemoryOperand &O, bool UseCalls,
810 const DataLayout &DL, RuntimeCallInserter &RTCI);
811 void instrumentPointerComparisonOrSubtraction(Instruction *I,
812 RuntimeCallInserter &RTCI);
813 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
814 Value *Addr, MaybeAlign Alignment,
815 uint32_t TypeStoreSize, bool IsWrite,
816 Value *SizeArgument, bool UseCalls, uint32_t Exp,
817 RuntimeCallInserter &RTCI);
818 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
819 Instruction *InsertBefore, Value *Addr,
820 uint32_t TypeStoreSize, bool IsWrite,
821 Value *SizeArgument);
822 Instruction *genAMDGPUReportBlock(IRBuilder<> &IRB, Value *Cond,
823 bool Recover);
824 void instrumentUnusualSizeOrAlignment(Instruction *I,
825 Instruction *InsertBefore, Value *Addr,
826 TypeSize TypeStoreSize, bool IsWrite,
827 Value *SizeArgument, bool UseCalls,
828 uint32_t Exp,
829 RuntimeCallInserter &RTCI);
830 void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL,
831 Type *IntptrTy, Value *Mask, Value *EVL,
832 Value *Stride, Instruction *I, Value *Addr,
833 MaybeAlign Alignment, unsigned Granularity,
834 Type *OpType, bool IsWrite,
835 Value *SizeArgument, bool UseCalls,
836 uint32_t Exp, RuntimeCallInserter &RTCI);
837 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
838 Value *ShadowValue, uint32_t TypeStoreSize);
839 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
840 bool IsWrite, size_t AccessSizeIndex,
841 Value *SizeArgument, uint32_t Exp,
842 RuntimeCallInserter &RTCI);
843 void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
844 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
845 bool suppressInstrumentationSiteForDebug(int &Instrumented);
846 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI,
847 const TargetTransformInfo *TTI);
848 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
849 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
850 void markEscapedLocalAllocas(Function &F);
851
852private:
853 friend struct FunctionStackPoisoner;
854
855 void initializeCallbacks(const TargetLibraryInfo *TLI);
856
857 bool LooksLikeCodeInBug11395(Instruction *I);
858 bool GlobalIsLinkerInitialized(GlobalVariable *G);
859 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
860 TypeSize TypeStoreSize) const;
861
862 /// Helper to cleanup per-function state.
863 struct FunctionStateRAII {
864 AddressSanitizer *Pass;
865
866 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
867 assert(Pass->ProcessedAllocas.empty() &&
868 "last pass forgot to clear cache");
869 assert(!Pass->LocalDynamicShadow);
870 }
871
872 ~FunctionStateRAII() {
873 Pass->LocalDynamicShadow = nullptr;
874 Pass->ProcessedAllocas.clear();
875 }
876 };
877
878 Module &M;
879 LLVMContext *C;
880 const DataLayout *DL;
881 Triple TargetTriple;
882 int LongSize;
883 bool CompileKernel;
884 bool Recover;
885 bool UseAfterScope;
887 Type *IntptrTy;
888 Type *Int32Ty;
889 PointerType *PtrTy;
890 ShadowMapping Mapping;
891 FunctionCallee AsanHandleNoReturnFunc;
892 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
893 Constant *AsanShadowGlobal;
894
895 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
896 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
897 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
898
899 // These arrays is indexed by AccessIsWrite and Experiment.
900 FunctionCallee AsanErrorCallbackSized[2][2];
901 FunctionCallee AsanMemoryAccessCallbackSized[2][2];
902
903 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
904 Value *LocalDynamicShadow = nullptr;
905 const StackSafetyGlobalInfo *SSGI;
906 DenseMap<const AllocaInst *, bool> ProcessedAllocas;
907
908 FunctionCallee AMDGPUAddressShared;
909 FunctionCallee AMDGPUAddressPrivate;
910 int InstrumentationWithCallsThreshold;
911 uint32_t MaxInlinePoisoningSize;
912};
913
914class ModuleAddressSanitizer {
915public:
916 ModuleAddressSanitizer(Module &M, bool InsertVersionCheck,
917 bool CompileKernel = false, bool Recover = false,
918 bool UseGlobalsGC = true, bool UseOdrIndicator = true,
919 AsanDtorKind DestructorKind = AsanDtorKind::Global,
920 AsanCtorKind ConstructorKind = AsanCtorKind::Global)
921 : M(M),
922 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
923 : CompileKernel),
924 InsertVersionCheck(ClInsertVersionCheck.getNumOccurrences() > 0
926 : InsertVersionCheck),
927 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
928 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel),
929 // Enable aliases as they should have no downside with ODR indicators.
930 UsePrivateAlias(ClUsePrivateAlias.getNumOccurrences() > 0
932 : UseOdrIndicator),
933 UseOdrIndicator(ClUseOdrIndicator.getNumOccurrences() > 0
935 : UseOdrIndicator),
936 // Not a typo: ClWithComdat is almost completely pointless without
937 // ClUseGlobalsGC (because then it only works on modules without
938 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
939 // and both suffer from gold PR19002 for which UseGlobalsGC constructor
940 // argument is designed as workaround. Therefore, disable both
941 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
942 // do globals-gc.
943 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel),
944 DestructorKind(DestructorKind),
945 ConstructorKind(ClConstructorKind.getNumOccurrences() > 0
947 : ConstructorKind) {
948 C = &(M.getContext());
949 int LongSize = M.getDataLayout().getPointerSizeInBits();
950 IntptrTy = Type::getIntNTy(*C, LongSize);
951 PtrTy = PointerType::getUnqual(*C);
952 TargetTriple = M.getTargetTriple();
953 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
954
955 if (ClOverrideDestructorKind != AsanDtorKind::Invalid)
956 this->DestructorKind = ClOverrideDestructorKind;
957 assert(this->DestructorKind != AsanDtorKind::Invalid);
958 }
959
960 bool instrumentModule();
961
962private:
963 void initializeCallbacks();
964
965 void instrumentGlobals(IRBuilder<> &IRB, bool *CtorComdat);
966 void InstrumentGlobalsCOFF(IRBuilder<> &IRB,
967 ArrayRef<GlobalVariable *> ExtendedGlobals,
968 ArrayRef<Constant *> MetadataInitializers);
969 void instrumentGlobalsELF(IRBuilder<> &IRB,
970 ArrayRef<GlobalVariable *> ExtendedGlobals,
971 ArrayRef<Constant *> MetadataInitializers,
972 const std::string &UniqueModuleId);
973 void InstrumentGlobalsMachO(IRBuilder<> &IRB,
974 ArrayRef<GlobalVariable *> ExtendedGlobals,
975 ArrayRef<Constant *> MetadataInitializers);
976 void
977 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB,
978 ArrayRef<GlobalVariable *> ExtendedGlobals,
979 ArrayRef<Constant *> MetadataInitializers);
980
981 GlobalVariable *CreateMetadataGlobal(Constant *Initializer,
982 StringRef OriginalName);
983 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
984 StringRef InternalSuffix);
985 Instruction *CreateAsanModuleDtor();
986
987 const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const;
988 bool shouldInstrumentGlobal(GlobalVariable *G) const;
989 bool ShouldUseMachOGlobalsSection() const;
990 StringRef getGlobalMetadataSection() const;
991 void poisonOneInitializer(Function &GlobalInit);
992 void createInitializerPoisonCalls();
993 uint64_t getMinRedzoneSizeForGlobal() const {
994 return getRedzoneSizeForScale(Mapping.Scale);
995 }
996 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const;
997 int GetAsanVersion() const;
998 GlobalVariable *getOrCreateModuleName();
999
1000 Module &M;
1001 bool CompileKernel;
1002 bool InsertVersionCheck;
1003 bool Recover;
1004 bool UseGlobalsGC;
1005 bool UsePrivateAlias;
1006 bool UseOdrIndicator;
1007 bool UseCtorComdat;
1008 AsanDtorKind DestructorKind;
1009 AsanCtorKind ConstructorKind;
1010 Type *IntptrTy;
1011 PointerType *PtrTy;
1012 LLVMContext *C;
1013 Triple TargetTriple;
1014 ShadowMapping Mapping;
1015 FunctionCallee AsanPoisonGlobals;
1016 FunctionCallee AsanUnpoisonGlobals;
1017 FunctionCallee AsanRegisterGlobals;
1018 FunctionCallee AsanUnregisterGlobals;
1019 FunctionCallee AsanRegisterImageGlobals;
1020 FunctionCallee AsanUnregisterImageGlobals;
1021 FunctionCallee AsanRegisterElfGlobals;
1022 FunctionCallee AsanUnregisterElfGlobals;
1023
1024 Function *AsanCtorFunction = nullptr;
1025 Function *AsanDtorFunction = nullptr;
1026 GlobalVariable *ModuleName = nullptr;
1027};
1028
1029// Stack poisoning does not play well with exception handling.
1030// When an exception is thrown, we essentially bypass the code
1031// that unpoisones the stack. This is why the run-time library has
1032// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
1033// stack in the interceptor. This however does not work inside the
1034// actual function which catches the exception. Most likely because the
1035// compiler hoists the load of the shadow value somewhere too high.
1036// This causes asan to report a non-existing bug on 453.povray.
1037// It sounds like an LLVM bug.
1038struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
1039 Function &F;
1040 AddressSanitizer &ASan;
1041 RuntimeCallInserter &RTCI;
1042 DIBuilder DIB;
1043 LLVMContext *C;
1044 Type *IntptrTy;
1045 Type *IntptrPtrTy;
1046 ShadowMapping Mapping;
1047
1049 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
1050 SmallVector<Instruction *, 8> RetVec;
1051
1052 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
1053 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
1054 FunctionCallee AsanSetShadowFunc[0x100] = {};
1055 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1056 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
1057
1058 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
1059 struct AllocaPoisonCall {
1060 IntrinsicInst *InsBefore;
1061 AllocaInst *AI;
1062 uint64_t Size;
1063 bool DoPoison;
1064 };
1065 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
1066 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
1067
1068 SmallVector<AllocaInst *, 1> DynamicAllocaVec;
1069 SmallVector<IntrinsicInst *, 1> StackRestoreVec;
1070 AllocaInst *DynamicAllocaLayout = nullptr;
1071 IntrinsicInst *LocalEscapeCall = nullptr;
1072
1073 bool HasInlineAsm = false;
1074 bool HasReturnsTwiceCall = false;
1075 bool PoisonStack;
1076
1077 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
1078 RuntimeCallInserter &RTCI)
1079 : F(F), ASan(ASan), RTCI(RTCI),
1080 DIB(*F.getParent(), /*AllowUnresolved*/ false), C(ASan.C),
1081 IntptrTy(ASan.IntptrTy),
1082 IntptrPtrTy(PointerType::get(IntptrTy->getContext(), 0)),
1083 Mapping(ASan.Mapping),
1084 PoisonStack(ClStack && !F.getParent()->getTargetTriple().isAMDGPU()) {}
1085
1086 bool runOnFunction() {
1087 if (!PoisonStack)
1088 return false;
1089
1091 copyArgsPassedByValToAllocas();
1092
1093 // Collect alloca, ret, lifetime instructions etc.
1094 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
1095
1096 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
1097
1098 initializeCallbacks(*F.getParent());
1099
1100 processDynamicAllocas();
1101 processStaticAllocas();
1102
1103 if (ClDebugStack) {
1104 LLVM_DEBUG(dbgs() << F);
1105 }
1106 return true;
1107 }
1108
1109 // Arguments marked with the "byval" attribute are implicitly copied without
1110 // using an alloca instruction. To produce redzones for those arguments, we
1111 // copy them a second time into memory allocated with an alloca instruction.
1112 void copyArgsPassedByValToAllocas();
1113
1114 // Finds all Alloca instructions and puts
1115 // poisoned red zones around all of them.
1116 // Then unpoison everything back before the function returns.
1117 void processStaticAllocas();
1118 void processDynamicAllocas();
1119
1120 void createDynamicAllocasInitStorage();
1121
1122 // ----------------------- Visitors.
1123 /// Collect all Ret instructions, or the musttail call instruction if it
1124 /// precedes the return instruction.
1125 void visitReturnInst(ReturnInst &RI) {
1126 if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall())
1127 RetVec.push_back(CI);
1128 else
1129 RetVec.push_back(&RI);
1130 }
1131
1132 /// Collect all Resume instructions.
1133 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1134
1135 /// Collect all CatchReturnInst instructions.
1136 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1137
1138 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1139 Value *SavedStack) {
1140 IRBuilder<> IRB(InstBefore);
1141 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
1142 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
1143 // need to adjust extracted SP to compute the address of the most recent
1144 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
1145 // this purpose.
1146 if (!isa<ReturnInst>(InstBefore)) {
1147 Value *DynamicAreaOffset = IRB.CreateIntrinsic(
1148 Intrinsic::get_dynamic_area_offset, {IntptrTy}, {});
1149
1150 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
1151 DynamicAreaOffset);
1152 }
1153
1154 RTCI.createRuntimeCall(
1155 IRB, AsanAllocasUnpoisonFunc,
1156 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1157 }
1158
1159 // Unpoison dynamic allocas redzones.
1160 void unpoisonDynamicAllocas() {
1161 for (Instruction *Ret : RetVec)
1162 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1163
1164 for (Instruction *StackRestoreInst : StackRestoreVec)
1165 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1166 StackRestoreInst->getOperand(0));
1167 }
1168
1169 // Deploy and poison redzones around dynamic alloca call. To do this, we
1170 // should replace this call with another one with changed parameters and
1171 // replace all its uses with new address, so
1172 // addr = alloca type, old_size, align
1173 // is replaced by
1174 // new_size = (old_size + additional_size) * sizeof(type)
1175 // tmp = alloca i8, new_size, max(align, 32)
1176 // addr = tmp + 32 (first 32 bytes are for the left redzone).
1177 // Additional_size is added to make new memory allocation contain not only
1178 // requested memory, but also left, partial and right redzones.
1179 void handleDynamicAllocaCall(AllocaInst *AI);
1180
1181 /// Collect Alloca instructions we want (and can) handle.
1182 void visitAllocaInst(AllocaInst &AI) {
1183 // FIXME: Handle scalable vectors instead of ignoring them.
1184 const Type *AllocaType = AI.getAllocatedType();
1185 const auto *STy = dyn_cast<StructType>(AllocaType);
1186 if (!ASan.isInterestingAlloca(AI) || isa<ScalableVectorType>(AllocaType) ||
1187 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1188 if (AI.isStaticAlloca()) {
1189 // Skip over allocas that are present *before* the first instrumented
1190 // alloca, we don't want to move those around.
1191 if (AllocaVec.empty())
1192 return;
1193
1194 StaticAllocasToMoveUp.push_back(&AI);
1195 }
1196 return;
1197 }
1198
1199 if (!AI.isStaticAlloca())
1200 DynamicAllocaVec.push_back(&AI);
1201 else
1202 AllocaVec.push_back(&AI);
1203 }
1204
1205 /// Collect lifetime intrinsic calls to check for use-after-scope
1206 /// errors.
1207 void visitIntrinsicInst(IntrinsicInst &II) {
1208 Intrinsic::ID ID = II.getIntrinsicID();
1209 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
1210 if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
1211 if (!ASan.UseAfterScope)
1212 return;
1213 if (!II.isLifetimeStartOrEnd())
1214 return;
1215 // Find alloca instruction that corresponds to llvm.lifetime argument.
1216 AllocaInst *AI = dyn_cast<AllocaInst>(II.getArgOperand(0));
1217 // We're interested only in allocas we can handle.
1218 if (!AI || !ASan.isInterestingAlloca(*AI))
1219 return;
1220
1221 std::optional<TypeSize> Size = AI->getAllocationSize(AI->getDataLayout());
1222 // Check that size is known and can be stored in IntptrTy.
1223 // TODO: Add support for scalable vectors if possible.
1224 if (!Size || Size->isScalable() ||
1226 return;
1227
1228 bool DoPoison = (ID == Intrinsic::lifetime_end);
1229 AllocaPoisonCall APC = {&II, AI, *Size, DoPoison};
1230 if (AI->isStaticAlloca())
1231 StaticAllocaPoisonCallVec.push_back(APC);
1233 DynamicAllocaPoisonCallVec.push_back(APC);
1234 }
1235
1236 void visitCallBase(CallBase &CB) {
1237 if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
1238 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1239 HasReturnsTwiceCall |= CI->canReturnTwice();
1240 }
1241 }
1242
1243 // ---------------------- Helpers.
1244 void initializeCallbacks(Module &M);
1245
1246 // Copies bytes from ShadowBytes into shadow memory for indexes where
1247 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
1248 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
1249 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1250 IRBuilder<> &IRB, Value *ShadowBase);
1251 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1252 size_t Begin, size_t End, IRBuilder<> &IRB,
1253 Value *ShadowBase);
1254 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1255 ArrayRef<uint8_t> ShadowBytes, size_t Begin,
1256 size_t End, IRBuilder<> &IRB, Value *ShadowBase);
1257
1258 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
1259
1260 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
1261 bool Dynamic);
1262 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
1263 Instruction *ThenTerm, Value *ValueIfFalse);
1264};
1265
1266} // end anonymous namespace
1267
1269 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1271 OS, MapClassName2PassName);
1272 OS << '<';
1273 if (Options.CompileKernel)
1274 OS << "kernel;";
1275 if (Options.UseAfterScope)
1276 OS << "use-after-scope";
1277 OS << '>';
1278}
1279
1281 const AddressSanitizerOptions &Options, bool UseGlobalGC,
1282 bool UseOdrIndicator, AsanDtorKind DestructorKind,
1283 AsanCtorKind ConstructorKind)
1284 : Options(Options), UseGlobalGC(UseGlobalGC),
1285 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1286 ConstructorKind(ConstructorKind) {}
1287
1290 // Return early if nosanitize_address module flag is present for the module.
1291 // This implies that asan pass has already run before.
1292 if (checkIfAlreadyInstrumented(M, "nosanitize_address"))
1293 return PreservedAnalyses::all();
1294
1295 ModuleAddressSanitizer ModuleSanitizer(
1296 M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
1297 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1298 bool Modified = false;
1299 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1300 const StackSafetyGlobalInfo *const SSGI =
1301 ClUseStackSafety ? &MAM.getResult<StackSafetyGlobalAnalysis>(M) : nullptr;
1302 for (Function &F : M) {
1303 if (F.empty())
1304 continue;
1305 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage)
1306 continue;
1307 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName())
1308 continue;
1309 if (F.getName().starts_with("__asan_"))
1310 continue;
1311 if (F.isPresplitCoroutine())
1312 continue;
1313 AddressSanitizer FunctionSanitizer(
1314 M, SSGI, Options.InstrumentationWithCallsThreshold,
1315 Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
1316 Options.UseAfterScope, Options.UseAfterReturn);
1317 const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1318 const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
1319 Modified |= FunctionSanitizer.instrumentFunction(F, &TLI, &TTI);
1320 }
1321 Modified |= ModuleSanitizer.instrumentModule();
1322 if (!Modified)
1323 return PreservedAnalyses::all();
1324
1326 // GlobalsAA is considered stateless and does not get invalidated unless
1327 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
1328 // make changes that require GlobalsAA to be invalidated.
1329 PA.abandon<GlobalsAA>();
1330 return PA;
1331}
1332
1334 size_t Res = llvm::countr_zero(TypeSize / 8);
1336 return Res;
1337}
1338
1339/// Check if \p G has been created by a trusted compiler pass.
1341 // Do not instrument @llvm.global_ctors, @llvm.used, etc.
1342 if (G->getName().starts_with("llvm.") ||
1343 // Do not instrument gcov counter arrays.
1344 G->getName().starts_with("__llvm_gcov_ctr") ||
1345 // Do not instrument rtti proxy symbols for function sanitizer.
1346 G->getName().starts_with("__llvm_rtti_proxy"))
1347 return true;
1348
1349 // Do not instrument asan globals.
1350 if (G->getName().starts_with(kAsanGenPrefix) ||
1351 G->getName().starts_with(kSanCovGenPrefix) ||
1352 G->getName().starts_with(kODRGenPrefix))
1353 return true;
1354
1355 return false;
1356}
1357
1359 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1360 unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1361 if (AddrSpace == 3 || AddrSpace == 5)
1362 return true;
1363 return false;
1364}
1365
1366Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1367 // Shadow >> scale
1368 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1369 if (Mapping.Offset == 0) return Shadow;
1370 // (Shadow >> scale) | offset
1371 Value *ShadowBase;
1372 if (LocalDynamicShadow)
1373 ShadowBase = LocalDynamicShadow;
1374 else
1375 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1376 if (Mapping.OrShadowOffset)
1377 return IRB.CreateOr(Shadow, ShadowBase);
1378 else
1379 return IRB.CreateAdd(Shadow, ShadowBase);
1380}
1381
1382// Instrument memset/memmove/memcpy
1383void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI,
1384 RuntimeCallInserter &RTCI) {
1386 if (isa<MemTransferInst>(MI)) {
1387 RTCI.createRuntimeCall(
1388 IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
1389 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1390 IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
1391 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1392 } else if (isa<MemSetInst>(MI)) {
1393 RTCI.createRuntimeCall(
1394 IRB, AsanMemset,
1395 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1396 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1397 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1398 }
1399 MI->eraseFromParent();
1400}
1401
1402/// Check if we want (and can) handle this alloca.
1403bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1404 auto [It, Inserted] = ProcessedAllocas.try_emplace(&AI);
1405
1406 if (!Inserted)
1407 return It->getSecond();
1408
1409 bool IsInteresting =
1410 (AI.getAllocatedType()->isSized() &&
1411 // alloca() may be called with 0 size, ignore it.
1412 ((!AI.isStaticAlloca()) || !getAllocaSizeInBytes(AI).isZero()) &&
1413 // We are only interested in allocas not promotable to registers.
1414 // Promotable allocas are common under -O0.
1416 // inalloca allocas are not treated as static, and we don't want
1417 // dynamic alloca instrumentation for them as well.
1418 !AI.isUsedWithInAlloca() &&
1419 // swifterror allocas are register promoted by ISel
1420 !AI.isSwiftError() &&
1421 // safe allocas are not interesting
1422 !(SSGI && SSGI->isSafe(AI)));
1423
1424 It->second = IsInteresting;
1425 return IsInteresting;
1426}
1427
1428bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
1429 // Instrument accesses from different address spaces only for AMDGPU.
1430 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1431 if (PtrTy->getPointerAddressSpace() != 0 &&
1432 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr)))
1433 return true;
1434
1435 // Ignore swifterror addresses.
1436 // swifterror memory addresses are mem2reg promoted by instruction
1437 // selection. As such they cannot have regular uses like an instrumentation
1438 // function and it makes no sense to track them as memory.
1439 if (Ptr->isSwiftError())
1440 return true;
1441
1442 // Treat memory accesses to promotable allocas as non-interesting since they
1443 // will not cause memory violations. This greatly speeds up the instrumented
1444 // executable at -O0.
1445 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr))
1446 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI))
1447 return true;
1448
1449 if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) &&
1451 return true;
1452
1453 return false;
1454}
1455
1456void AddressSanitizer::getInterestingMemoryOperands(
1458 const TargetTransformInfo *TTI) {
1459 // Do not instrument the load fetching the dynamic shadow address.
1460 if (LocalDynamicShadow == I)
1461 return;
1462
1463 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1464 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
1465 return;
1466 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
1467 LI->getType(), LI->getAlign());
1468 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1469 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
1470 return;
1471 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
1472 SI->getValueOperand()->getType(), SI->getAlign());
1473 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1474 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
1475 return;
1476 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
1477 RMW->getValOperand()->getType(), std::nullopt);
1478 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1479 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
1480 return;
1481 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
1482 XCHG->getCompareOperand()->getType(),
1483 std::nullopt);
1484 } else if (auto CI = dyn_cast<CallInst>(I)) {
1485 switch (CI->getIntrinsicID()) {
1486 case Intrinsic::masked_load:
1487 case Intrinsic::masked_store:
1488 case Intrinsic::masked_gather:
1489 case Intrinsic::masked_scatter: {
1490 bool IsWrite = CI->getType()->isVoidTy();
1491 // Masked store has an initial operand for the value.
1492 unsigned OpOffset = IsWrite ? 1 : 0;
1493 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1494 return;
1495
1496 auto BasePtr = CI->getOperand(OpOffset);
1497 if (ignoreAccess(I, BasePtr))
1498 return;
1499 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1500 MaybeAlign Alignment = Align(1);
1501 // Otherwise no alignment guarantees. We probably got Undef.
1502 if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
1503 Alignment = Op->getMaybeAlignValue();
1504 Value *Mask = CI->getOperand(2 + OpOffset);
1505 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
1506 break;
1507 }
1508 case Intrinsic::masked_expandload:
1509 case Intrinsic::masked_compressstore: {
1510 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1511 unsigned OpOffset = IsWrite ? 1 : 0;
1512 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1513 return;
1514 auto BasePtr = CI->getOperand(OpOffset);
1515 if (ignoreAccess(I, BasePtr))
1516 return;
1517 MaybeAlign Alignment = BasePtr->getPointerAlignment(*DL);
1518 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1519
1520 IRBuilder IB(I);
1521 Value *Mask = CI->getOperand(1 + OpOffset);
1522 // Use the popcount of Mask as the effective vector length.
1523 Type *ExtTy = VectorType::get(IntptrTy, cast<VectorType>(Ty));
1524 Value *ExtMask = IB.CreateZExt(Mask, ExtTy);
1525 Value *EVL = IB.CreateAddReduce(ExtMask);
1526 Value *TrueMask = ConstantInt::get(Mask->getType(), 1);
1527 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1528 EVL);
1529 break;
1530 }
1531 case Intrinsic::vp_load:
1532 case Intrinsic::vp_store:
1533 case Intrinsic::experimental_vp_strided_load:
1534 case Intrinsic::experimental_vp_strided_store: {
1535 auto *VPI = cast<VPIntrinsic>(CI);
1536 unsigned IID = CI->getIntrinsicID();
1537 bool IsWrite = CI->getType()->isVoidTy();
1538 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1539 return;
1540 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1541 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1542 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*DL);
1543 Value *Stride = nullptr;
1544 if (IID == Intrinsic::experimental_vp_strided_store ||
1545 IID == Intrinsic::experimental_vp_strided_load) {
1546 Stride = VPI->getOperand(PtrOpNo + 1);
1547 // Use the pointer alignment as the element alignment if the stride is a
1548 // mutiple of the pointer alignment. Otherwise, the element alignment
1549 // should be Align(1).
1550 unsigned PointerAlign = Alignment.valueOrOne().value();
1551 if (!isa<ConstantInt>(Stride) ||
1552 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
1553 Alignment = Align(1);
1554 }
1555 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1556 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1557 Stride);
1558 break;
1559 }
1560 case Intrinsic::vp_gather:
1561 case Intrinsic::vp_scatter: {
1562 auto *VPI = cast<VPIntrinsic>(CI);
1563 unsigned IID = CI->getIntrinsicID();
1564 bool IsWrite = IID == Intrinsic::vp_scatter;
1565 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1566 return;
1567 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1568 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1569 MaybeAlign Alignment = VPI->getPointerAlignment();
1570 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1571 VPI->getMaskParam(),
1572 VPI->getVectorLengthParam());
1573 break;
1574 }
1575 default:
1576 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1577 MemIntrinsicInfo IntrInfo;
1578 if (TTI->getTgtMemIntrinsic(II, IntrInfo))
1579 Interesting = IntrInfo.InterestingOperands;
1580 return;
1581 }
1582 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1583 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
1584 ignoreAccess(I, CI->getArgOperand(ArgNo)))
1585 continue;
1586 Type *Ty = CI->getParamByValType(ArgNo);
1587 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
1588 }
1589 }
1590 }
1591}
1592
1593static bool isPointerOperand(Value *V) {
1594 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1595}
1596
1597// This is a rough heuristic; it may cause both false positives and
1598// false negatives. The proper implementation requires cooperation with
1599// the frontend.
1601 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1602 if (!Cmp->isRelational())
1603 return false;
1604 } else {
1605 return false;
1606 }
1607 return isPointerOperand(I->getOperand(0)) &&
1608 isPointerOperand(I->getOperand(1));
1609}
1610
1611// This is a rough heuristic; it may cause both false positives and
1612// false negatives. The proper implementation requires cooperation with
1613// the frontend.
1616 if (BO->getOpcode() != Instruction::Sub)
1617 return false;
1618 } else {
1619 return false;
1620 }
1621 return isPointerOperand(I->getOperand(0)) &&
1622 isPointerOperand(I->getOperand(1));
1623}
1624
1625bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1626 // If a global variable does not have dynamic initialization we don't
1627 // have to instrument it. However, if a global does not have initializer
1628 // at all, we assume it has dynamic initializer (in other TU).
1629 if (!G->hasInitializer())
1630 return false;
1631
1632 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().IsDynInit)
1633 return false;
1634
1635 return true;
1636}
1637
1638void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1639 Instruction *I, RuntimeCallInserter &RTCI) {
1640 IRBuilder<> IRB(I);
1641 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1642 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1643 for (Value *&i : Param) {
1644 if (i->getType()->isPointerTy())
1645 i = IRB.CreatePointerCast(i, IntptrTy);
1646 }
1647 RTCI.createRuntimeCall(IRB, F, Param);
1648}
1649
1650static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1651 Instruction *InsertBefore, Value *Addr,
1652 MaybeAlign Alignment, unsigned Granularity,
1653 TypeSize TypeStoreSize, bool IsWrite,
1654 Value *SizeArgument, bool UseCalls,
1655 uint32_t Exp, RuntimeCallInserter &RTCI) {
1656 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1657 // if the data is properly aligned.
1658 if (!TypeStoreSize.isScalable()) {
1659 const auto FixedSize = TypeStoreSize.getFixedValue();
1660 switch (FixedSize) {
1661 case 8:
1662 case 16:
1663 case 32:
1664 case 64:
1665 case 128:
1666 if (!Alignment || *Alignment >= Granularity ||
1667 *Alignment >= FixedSize / 8)
1668 return Pass->instrumentAddress(I, InsertBefore, Addr, Alignment,
1669 FixedSize, IsWrite, nullptr, UseCalls,
1670 Exp, RTCI);
1671 }
1672 }
1673 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize,
1674 IsWrite, nullptr, UseCalls, Exp, RTCI);
1675}
1676
1677void AddressSanitizer::instrumentMaskedLoadOrStore(
1678 AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask,
1679 Value *EVL, Value *Stride, Instruction *I, Value *Addr,
1680 MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite,
1681 Value *SizeArgument, bool UseCalls, uint32_t Exp,
1682 RuntimeCallInserter &RTCI) {
1683 auto *VTy = cast<VectorType>(OpType);
1684 TypeSize ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1685 auto Zero = ConstantInt::get(IntptrTy, 0);
1686
1687 IRBuilder IB(I);
1688 Instruction *LoopInsertBefore = I;
1689 if (EVL) {
1690 // The end argument of SplitBlockAndInsertForLane is assumed bigger
1691 // than zero, so we should check whether EVL is zero here.
1692 Type *EVLType = EVL->getType();
1693 Value *IsEVLZero = IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1694 LoopInsertBefore = SplitBlockAndInsertIfThen(IsEVLZero, I, false);
1695 IB.SetInsertPoint(LoopInsertBefore);
1696 // Cast EVL to IntptrTy.
1697 EVL = IB.CreateZExtOrTrunc(EVL, IntptrTy);
1698 // To avoid undefined behavior for extracting with out of range index, use
1699 // the minimum of evl and element count as trip count.
1700 Value *EC = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1701 EVL = IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1702 } else {
1703 EVL = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1704 }
1705
1706 // Cast Stride to IntptrTy.
1707 if (Stride)
1708 Stride = IB.CreateZExtOrTrunc(Stride, IntptrTy);
1709
1710 SplitBlockAndInsertForEachLane(EVL, LoopInsertBefore->getIterator(),
1711 [&](IRBuilderBase &IRB, Value *Index) {
1712 Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1713 if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1714 if (MaskElemC->isZero())
1715 // No check
1716 return;
1717 // Unconditional check
1718 } else {
1719 // Conditional check
1720 Instruction *ThenTerm = SplitBlockAndInsertIfThen(
1721 MaskElem, &*IRB.GetInsertPoint(), false);
1722 IRB.SetInsertPoint(ThenTerm);
1723 }
1724
1725 Value *InstrumentedAddress;
1726 if (isa<VectorType>(Addr->getType())) {
1727 assert(
1728 cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1729 "Expected vector of pointer.");
1730 InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1731 } else if (Stride) {
1732 Index = IRB.CreateMul(Index, Stride);
1733 InstrumentedAddress = IRB.CreatePtrAdd(Addr, Index);
1734 } else {
1735 InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index});
1736 }
1737 doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(), InstrumentedAddress,
1738 Alignment, Granularity, ElemTypeSize, IsWrite,
1739 SizeArgument, UseCalls, Exp, RTCI);
1740 });
1741}
1742
1743void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1744 InterestingMemoryOperand &O, bool UseCalls,
1745 const DataLayout &DL,
1746 RuntimeCallInserter &RTCI) {
1747 Value *Addr = O.getPtr();
1748
1749 // Optimization experiments.
1750 // The experiments can be used to evaluate potential optimizations that remove
1751 // instrumentation (assess false negatives). Instead of completely removing
1752 // some instrumentation, you set Exp to a non-zero value (mask of optimization
1753 // experiments that want to remove instrumentation of this instruction).
1754 // If Exp is non-zero, this pass will emit special calls into runtime
1755 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1756 // make runtime terminate the program in a special way (with a different
1757 // exit status). Then you run the new compiler on a buggy corpus, collect
1758 // the special terminations (ideally, you don't see them at all -- no false
1759 // negatives) and make the decision on the optimization.
1760 uint32_t Exp = ClForceExperiment;
1761
1762 if (ClOpt && ClOptGlobals) {
1763 // If initialization order checking is disabled, a simple access to a
1764 // dynamically initialized global is always valid.
1766 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
1767 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1768 NumOptimizedAccessesToGlobalVar++;
1769 return;
1770 }
1771 }
1772
1773 if (ClOpt && ClOptStack) {
1774 // A direct inbounds access to a stack variable is always valid.
1776 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1777 NumOptimizedAccessesToStackVar++;
1778 return;
1779 }
1780 }
1781
1782 if (O.IsWrite)
1783 NumInstrumentedWrites++;
1784 else
1785 NumInstrumentedReads++;
1786
1787 if (O.MaybeByteOffset) {
1788 Type *Ty = Type::getInt8Ty(*C);
1789 IRBuilder IB(O.getInsn());
1790
1791 Value *OffsetOp = O.MaybeByteOffset;
1792 if (TargetTriple.isRISCV()) {
1793 Type *OffsetTy = OffsetOp->getType();
1794 // RVV indexed loads/stores zero-extend offset operands which are narrower
1795 // than XLEN to XLEN.
1796 if (OffsetTy->getScalarType()->getIntegerBitWidth() <
1797 static_cast<unsigned>(LongSize)) {
1798 VectorType *OrigType = cast<VectorType>(OffsetTy);
1799 Type *ExtendTy = VectorType::get(IntptrTy, OrigType);
1800 OffsetOp = IB.CreateZExt(OffsetOp, ExtendTy);
1801 }
1802 }
1803 Addr = IB.CreateGEP(Ty, Addr, {OffsetOp});
1804 }
1805
1806 unsigned Granularity = 1 << Mapping.Scale;
1807 if (O.MaybeMask) {
1808 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
1809 O.MaybeStride, O.getInsn(), Addr, O.Alignment,
1810 Granularity, O.OpType, O.IsWrite, nullptr,
1811 UseCalls, Exp, RTCI);
1812 } else {
1813 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
1814 Granularity, O.TypeStoreSize, O.IsWrite, nullptr,
1815 UseCalls, Exp, RTCI);
1816 }
1817}
1818
1819Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1820 Value *Addr, bool IsWrite,
1821 size_t AccessSizeIndex,
1822 Value *SizeArgument,
1823 uint32_t Exp,
1824 RuntimeCallInserter &RTCI) {
1825 InstrumentationIRBuilder IRB(InsertBefore);
1826 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
1827 CallInst *Call = nullptr;
1828 if (SizeArgument) {
1829 if (Exp == 0)
1830 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1831 {Addr, SizeArgument});
1832 else
1833 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1834 {Addr, SizeArgument, ExpVal});
1835 } else {
1836 if (Exp == 0)
1837 Call = RTCI.createRuntimeCall(
1838 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1839 else
1840 Call = RTCI.createRuntimeCall(
1841 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
1842 }
1843
1845 return Call;
1846}
1847
1848Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1849 Value *ShadowValue,
1850 uint32_t TypeStoreSize) {
1851 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1852 // Addr & (Granularity - 1)
1853 Value *LastAccessedByte =
1854 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1855 // (Addr & (Granularity - 1)) + size - 1
1856 if (TypeStoreSize / 8 > 1)
1857 LastAccessedByte = IRB.CreateAdd(
1858 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1859 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1860 LastAccessedByte =
1861 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1862 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1863 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1864}
1865
1866Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1867 Instruction *OrigIns, Instruction *InsertBefore, Value *Addr,
1868 uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument) {
1869 // Do not instrument unsupported addrspaces.
1871 return nullptr;
1872 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1873 // Follow host instrumentation for global and constant addresses.
1874 if (PtrTy->getPointerAddressSpace() != 0)
1875 return InsertBefore;
1876 // Instrument generic addresses in supported addressspaces.
1877 IRBuilder<> IRB(InsertBefore);
1878 Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {Addr});
1879 Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {Addr});
1880 Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate);
1881 Value *Cmp = IRB.CreateNot(IsSharedOrPrivate);
1882 Value *AddrSpaceZeroLanding =
1883 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
1884 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1885 return InsertBefore;
1886}
1887
1888Instruction *AddressSanitizer::genAMDGPUReportBlock(IRBuilder<> &IRB,
1889 Value *Cond, bool Recover) {
1890 Module &M = *IRB.GetInsertBlock()->getModule();
1891 Value *ReportCond = Cond;
1892 if (!Recover) {
1893 auto Ballot = M.getOrInsertFunction(kAMDGPUBallotName, IRB.getInt64Ty(),
1894 IRB.getInt1Ty());
1895 ReportCond = IRB.CreateIsNotNull(IRB.CreateCall(Ballot, {Cond}));
1896 }
1897
1898 auto *Trm =
1899 SplitBlockAndInsertIfThen(ReportCond, &*IRB.GetInsertPoint(), false,
1901 Trm->getParent()->setName("asan.report");
1902
1903 if (Recover)
1904 return Trm;
1905
1906 Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
1907 IRB.SetInsertPoint(Trm);
1908 return IRB.CreateCall(
1909 M.getOrInsertFunction(kAMDGPUUnreachableName, IRB.getVoidTy()), {});
1910}
1911
1912void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1913 Instruction *InsertBefore, Value *Addr,
1914 MaybeAlign Alignment,
1915 uint32_t TypeStoreSize, bool IsWrite,
1916 Value *SizeArgument, bool UseCalls,
1917 uint32_t Exp,
1918 RuntimeCallInserter &RTCI) {
1919 if (TargetTriple.isAMDGPU()) {
1920 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1921 TypeStoreSize, IsWrite, SizeArgument);
1922 if (!InsertBefore)
1923 return;
1924 }
1925
1926 InstrumentationIRBuilder IRB(InsertBefore);
1927 size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
1928
1929 if (UseCalls && ClOptimizeCallbacks) {
1930 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1931 IRB.CreateIntrinsic(Intrinsic::asan_check_memaccess, {},
1932 {IRB.CreatePointerCast(Addr, PtrTy),
1933 ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1934 return;
1935 }
1936
1937 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1938 if (UseCalls) {
1939 if (Exp == 0)
1940 RTCI.createRuntimeCall(
1941 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1942 else
1943 RTCI.createRuntimeCall(
1944 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1945 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1946 return;
1947 }
1948
1949 Type *ShadowTy =
1950 IntegerType::get(*C, std::max(8U, TypeStoreSize >> Mapping.Scale));
1951 Type *ShadowPtrTy = PointerType::get(*C, 0);
1952 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1953 const uint64_t ShadowAlign =
1954 std::max<uint64_t>(Alignment.valueOrOne().value() >> Mapping.Scale, 1);
1955 Value *ShadowValue = IRB.CreateAlignedLoad(
1956 ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
1957
1958 Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
1959 size_t Granularity = 1ULL << Mapping.Scale;
1960 Instruction *CrashTerm = nullptr;
1961
1962 bool GenSlowPath = (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1963
1964 if (TargetTriple.isAMDGCN()) {
1965 if (GenSlowPath) {
1966 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1967 Cmp = IRB.CreateAnd(Cmp, Cmp2);
1968 }
1969 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1970 } else if (GenSlowPath) {
1971 // We use branch weights for the slow path check, to indicate that the slow
1972 // path is rarely taken. This seems to be the case for SPEC benchmarks.
1974 Cmp, InsertBefore, false, MDBuilder(*C).createUnlikelyBranchWeights());
1975 assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1976 BasicBlock *NextBB = CheckTerm->getSuccessor(0);
1977 IRB.SetInsertPoint(CheckTerm);
1978 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1979 if (Recover) {
1980 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
1981 } else {
1982 BasicBlock *CrashBlock =
1983 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
1984 CrashTerm = new UnreachableInst(*C, CrashBlock);
1985 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
1986 ReplaceInstWithInst(CheckTerm, NewTerm);
1987 }
1988 } else {
1989 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
1990 }
1991
1992 Instruction *Crash = generateCrashCode(
1993 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
1994 if (OrigIns->getDebugLoc())
1995 Crash->setDebugLoc(OrigIns->getDebugLoc());
1996}
1997
1998// Instrument unusual size or unusual alignment.
1999// We can not do it with a single check, so we do 1-byte check for the first
2000// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
2001// to report the actual access size.
2002void AddressSanitizer::instrumentUnusualSizeOrAlignment(
2003 Instruction *I, Instruction *InsertBefore, Value *Addr,
2004 TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls,
2005 uint32_t Exp, RuntimeCallInserter &RTCI) {
2006 InstrumentationIRBuilder IRB(InsertBefore);
2007 Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
2008 Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
2009
2010 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
2011 if (UseCalls) {
2012 if (Exp == 0)
2013 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
2014 {AddrLong, Size});
2015 else
2016 RTCI.createRuntimeCall(
2017 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
2018 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
2019 } else {
2020 Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
2021 Value *LastByte = IRB.CreateIntToPtr(
2022 IRB.CreateAdd(AddrLong, SizeMinusOne),
2023 Addr->getType());
2024 instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp,
2025 RTCI);
2026 instrumentAddress(I, InsertBefore, LastByte, {}, 8, IsWrite, Size, false,
2027 Exp, RTCI);
2028 }
2029}
2030
2031void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit) {
2032 // Set up the arguments to our poison/unpoison functions.
2033 IRBuilder<> IRB(&GlobalInit.front(),
2034 GlobalInit.front().getFirstInsertionPt());
2035
2036 // Add a call to poison all external globals before the given function starts.
2037 Value *ModuleNameAddr =
2038 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy);
2039 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
2040
2041 // Add calls to unpoison all globals before each return instruction.
2042 for (auto &BB : GlobalInit)
2044 CallInst::Create(AsanUnpoisonGlobals, "", RI->getIterator());
2045}
2046
2047void ModuleAddressSanitizer::createInitializerPoisonCalls() {
2048 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
2049 if (!GV)
2050 return;
2051
2053 if (!CA)
2054 return;
2055
2056 for (Use &OP : CA->operands()) {
2057 if (isa<ConstantAggregateZero>(OP)) continue;
2059
2060 // Must have a function or null ptr.
2061 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
2062 if (F->getName() == kAsanModuleCtorName) continue;
2063 auto *Priority = cast<ConstantInt>(CS->getOperand(0));
2064 // Don't instrument CTORs that will run before asan.module_ctor.
2065 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple))
2066 continue;
2067 poisonOneInitializer(*F);
2068 }
2069 }
2070}
2071
2072const GlobalVariable *
2073ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
2074 // In case this function should be expanded to include rules that do not just
2075 // apply when CompileKernel is true, either guard all existing rules with an
2076 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules
2077 // should also apply to user space.
2078 assert(CompileKernel && "Only expecting to be called when compiling kernel");
2079
2080 const Constant *C = GA.getAliasee();
2081
2082 // When compiling the kernel, globals that are aliased by symbols prefixed
2083 // by "__" are special and cannot be padded with a redzone.
2084 if (GA.getName().starts_with("__"))
2085 return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
2086
2087 return nullptr;
2088}
2089
2090bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
2091 Type *Ty = G->getValueType();
2092 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
2093
2094 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().NoAddress)
2095 return false;
2096 if (!Ty->isSized()) return false;
2097 if (!G->hasInitializer()) return false;
2098 // Globals in address space 1 and 4 are supported for AMDGPU.
2099 if (G->getAddressSpace() &&
2100 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G)))
2101 return false;
2102 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
2103 // Two problems with thread-locals:
2104 // - The address of the main thread's copy can't be computed at link-time.
2105 // - Need to poison all copies, not just the main thread's one.
2106 if (G->isThreadLocal()) return false;
2107 // For now, just ignore this Global if the alignment is large.
2108 if (G->getAlign() && *G->getAlign() > getMinRedzoneSizeForGlobal()) return false;
2109
2110 // For non-COFF targets, only instrument globals known to be defined by this
2111 // TU.
2112 // FIXME: We can instrument comdat globals on ELF if we are using the
2113 // GC-friendly metadata scheme.
2114 if (!TargetTriple.isOSBinFormatCOFF()) {
2115 if (!G->hasExactDefinition() || G->hasComdat())
2116 return false;
2117 } else {
2118 // On COFF, don't instrument non-ODR linkages.
2119 if (G->isInterposable())
2120 return false;
2121 // If the global has AvailableExternally linkage, then it is not in this
2122 // module, which means it does not need to be instrumented.
2123 if (G->hasAvailableExternallyLinkage())
2124 return false;
2125 }
2126
2127 // If a comdat is present, it must have a selection kind that implies ODR
2128 // semantics: no duplicates, any, or exact match.
2129 if (Comdat *C = G->getComdat()) {
2130 switch (C->getSelectionKind()) {
2131 case Comdat::Any:
2132 case Comdat::ExactMatch:
2134 break;
2135 case Comdat::Largest:
2136 case Comdat::SameSize:
2137 return false;
2138 }
2139 }
2140
2141 if (G->hasSection()) {
2142 // The kernel uses explicit sections for mostly special global variables
2143 // that we should not instrument. E.g. the kernel may rely on their layout
2144 // without redzones, or remove them at link time ("discard.*"), etc.
2145 if (CompileKernel)
2146 return false;
2147
2148 StringRef Section = G->getSection();
2149
2150 // Globals from llvm.metadata aren't emitted, do not instrument them.
2151 if (Section == "llvm.metadata") return false;
2152 // Do not instrument globals from special LLVM sections.
2153 if (Section.contains("__llvm") || Section.contains("__LLVM"))
2154 return false;
2155
2156 // Do not instrument function pointers to initialization and termination
2157 // routines: dynamic linker will not properly handle redzones.
2158 if (Section.starts_with(".preinit_array") ||
2159 Section.starts_with(".init_array") ||
2160 Section.starts_with(".fini_array")) {
2161 return false;
2162 }
2163
2164 // Do not instrument user-defined sections (with names resembling
2165 // valid C identifiers)
2166 if (TargetTriple.isOSBinFormatELF()) {
2167 if (llvm::all_of(Section,
2168 [](char c) { return llvm::isAlnum(c) || c == '_'; }))
2169 return false;
2170 }
2171
2172 // On COFF, if the section name contains '$', it is highly likely that the
2173 // user is using section sorting to create an array of globals similar to
2174 // the way initialization callbacks are registered in .init_array and
2175 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
2176 // to such globals is counterproductive, because the intent is that they
2177 // will form an array, and out-of-bounds accesses are expected.
2178 // See https://github.com/google/sanitizers/issues/305
2179 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
2180 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) {
2181 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
2182 << *G << "\n");
2183 return false;
2184 }
2185
2186 if (TargetTriple.isOSBinFormatMachO()) {
2187 StringRef ParsedSegment, ParsedSection;
2188 unsigned TAA = 0, StubSize = 0;
2189 bool TAAParsed;
2191 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2192
2193 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
2194 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
2195 // them.
2196 if (ParsedSegment == "__OBJC" ||
2197 (ParsedSegment == "__DATA" && ParsedSection.starts_with("__objc_"))) {
2198 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
2199 return false;
2200 }
2201 // See https://github.com/google/sanitizers/issues/32
2202 // Constant CFString instances are compiled in the following way:
2203 // -- the string buffer is emitted into
2204 // __TEXT,__cstring,cstring_literals
2205 // -- the constant NSConstantString structure referencing that buffer
2206 // is placed into __DATA,__cfstring
2207 // Therefore there's no point in placing redzones into __DATA,__cfstring.
2208 // Moreover, it causes the linker to crash on OS X 10.7
2209 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
2210 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
2211 return false;
2212 }
2213 // The linker merges the contents of cstring_literals and removes the
2214 // trailing zeroes.
2215 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
2216 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
2217 return false;
2218 }
2219 }
2220 }
2221
2222 if (CompileKernel) {
2223 // Globals that prefixed by "__" are special and cannot be padded with a
2224 // redzone.
2225 if (G->getName().starts_with("__"))
2226 return false;
2227 }
2228
2229 return true;
2230}
2231
2232// On Mach-O platforms, we emit global metadata in a separate section of the
2233// binary in order to allow the linker to properly dead strip. This is only
2234// supported on recent versions of ld64.
2235bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
2236 if (!TargetTriple.isOSBinFormatMachO())
2237 return false;
2238
2239 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2240 return true;
2241 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
2242 return true;
2243 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2244 return true;
2245 if (TargetTriple.isDriverKit())
2246 return true;
2247 if (TargetTriple.isXROS())
2248 return true;
2249
2250 return false;
2251}
2252
2253StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
2254 switch (TargetTriple.getObjectFormat()) {
2255 case Triple::COFF: return ".ASAN$GL";
2256 case Triple::ELF: return "asan_globals";
2257 case Triple::MachO: return "__DATA,__asan_globals,regular";
2258 case Triple::Wasm:
2259 case Triple::GOFF:
2260 case Triple::SPIRV:
2261 case Triple::XCOFF:
2264 "ModuleAddressSanitizer not implemented for object file format");
2266 break;
2267 }
2268 llvm_unreachable("unsupported object format");
2269}
2270
2271void ModuleAddressSanitizer::initializeCallbacks() {
2272 IRBuilder<> IRB(*C);
2273
2274 // Declare our poisoning and unpoisoning functions.
2275 AsanPoisonGlobals =
2276 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
2277 AsanUnpoisonGlobals =
2278 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
2279
2280 // Declare functions that register/unregister globals.
2281 AsanRegisterGlobals = M.getOrInsertFunction(
2282 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2283 AsanUnregisterGlobals = M.getOrInsertFunction(
2284 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2285
2286 // Declare the functions that find globals in a shared object and then invoke
2287 // the (un)register function on them.
2288 AsanRegisterImageGlobals = M.getOrInsertFunction(
2289 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2290 AsanUnregisterImageGlobals = M.getOrInsertFunction(
2292
2293 AsanRegisterElfGlobals =
2294 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
2295 IntptrTy, IntptrTy, IntptrTy);
2296 AsanUnregisterElfGlobals =
2297 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
2298 IntptrTy, IntptrTy, IntptrTy);
2299}
2300
2301// Put the metadata and the instrumented global in the same group. This ensures
2302// that the metadata is discarded if the instrumented global is discarded.
2303void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2304 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
2305 Module &M = *G->getParent();
2306 Comdat *C = G->getComdat();
2307 if (!C) {
2308 if (!G->hasName()) {
2309 // If G is unnamed, it must be internal. Give it an artificial name
2310 // so we can put it in a comdat.
2311 assert(G->hasLocalLinkage());
2312 G->setName(genName("anon_global"));
2313 }
2314
2315 if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
2316 std::string Name = std::string(G->getName());
2317 Name += InternalSuffix;
2318 C = M.getOrInsertComdat(Name);
2319 } else {
2320 C = M.getOrInsertComdat(G->getName());
2321 }
2322
2323 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
2324 // linkage to internal linkage so that a symbol table entry is emitted. This
2325 // is necessary in order to create the comdat group.
2326 if (TargetTriple.isOSBinFormatCOFF()) {
2327 C->setSelectionKind(Comdat::NoDeduplicate);
2328 if (G->hasPrivateLinkage())
2329 G->setLinkage(GlobalValue::InternalLinkage);
2330 }
2331 G->setComdat(C);
2332 }
2333
2334 assert(G->hasComdat());
2335 Metadata->setComdat(G->getComdat());
2336}
2337
2338// Create a separate metadata global and put it in the appropriate ASan
2339// global registration section.
2341ModuleAddressSanitizer::CreateMetadataGlobal(Constant *Initializer,
2342 StringRef OriginalName) {
2343 auto Linkage = TargetTriple.isOSBinFormatMachO()
2347 M, Initializer->getType(), false, Linkage, Initializer,
2348 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
2349 Metadata->setSection(getGlobalMetadataSection());
2350 // Place metadata in a large section for x86-64 ELF binaries to mitigate
2351 // relocation pressure.
2353 return Metadata;
2354}
2355
2356Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor() {
2357 AsanDtorFunction = Function::createWithDefaultAttr(
2360 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2361 // Ensure Dtor cannot be discarded, even if in a comdat.
2362 appendToUsed(M, {AsanDtorFunction});
2363 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
2364
2365 return ReturnInst::Create(*C, AsanDtorBB);
2366}
2367
2368void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2369 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2370 ArrayRef<Constant *> MetadataInitializers) {
2371 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2372 auto &DL = M.getDataLayout();
2373
2374 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2375 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2376 Constant *Initializer = MetadataInitializers[i];
2377 GlobalVariable *G = ExtendedGlobals[i];
2378 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2379 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2380 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2381 MetadataGlobals[i] = Metadata;
2382
2383 // The MSVC linker always inserts padding when linking incrementally. We
2384 // cope with that by aligning each struct to its size, which must be a power
2385 // of two.
2386 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
2387 assert(isPowerOf2_32(SizeOfGlobalStruct) &&
2388 "global metadata will not be padded appropriately");
2389 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct));
2390
2391 SetComdatForGlobalMetadata(G, Metadata, "");
2392 }
2393
2394 // Update llvm.compiler.used, adding the new metadata globals. This is
2395 // needed so that during LTO these variables stay alive.
2396 if (!MetadataGlobals.empty())
2397 appendToCompilerUsed(M, MetadataGlobals);
2398}
2399
2400void ModuleAddressSanitizer::instrumentGlobalsELF(
2401 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2402 ArrayRef<Constant *> MetadataInitializers,
2403 const std::string &UniqueModuleId) {
2404 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2405
2406 // Putting globals in a comdat changes the semantic and potentially cause
2407 // false negative odr violations at link time. If odr indicators are used, we
2408 // keep the comdat sections, as link time odr violations will be dectected on
2409 // the odr indicator symbols.
2410 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2411
2412 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2413 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2414 GlobalVariable *G = ExtendedGlobals[i];
2416 CreateMetadataGlobal(MetadataInitializers[i], G->getName());
2417 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2418 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2419 MetadataGlobals[i] = Metadata;
2420
2421 if (UseComdatForGlobalsGC)
2422 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
2423 }
2424
2425 // Update llvm.compiler.used, adding the new metadata globals. This is
2426 // needed so that during LTO these variables stay alive.
2427 if (!MetadataGlobals.empty())
2428 appendToCompilerUsed(M, MetadataGlobals);
2429
2430 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2431 // to look up the loaded image that contains it. Second, we can store in it
2432 // whether registration has already occurred, to prevent duplicate
2433 // registration.
2434 //
2435 // Common linkage ensures that there is only one global per shared library.
2436 GlobalVariable *RegisteredFlag = new GlobalVariable(
2437 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2438 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2440
2441 // Create start and stop symbols.
2442 GlobalVariable *StartELFMetadata = new GlobalVariable(
2443 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2444 "__start_" + getGlobalMetadataSection());
2446 GlobalVariable *StopELFMetadata = new GlobalVariable(
2447 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2448 "__stop_" + getGlobalMetadataSection());
2450
2451 // Create a call to register the globals with the runtime.
2452 if (ConstructorKind == AsanCtorKind::Global)
2453 IRB.CreateCall(AsanRegisterElfGlobals,
2454 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2455 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2456 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2457
2458 // We also need to unregister globals at the end, e.g., when a shared library
2459 // gets closed.
2460 if (DestructorKind != AsanDtorKind::None && !MetadataGlobals.empty()) {
2461 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2462 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2463 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2464 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2465 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2466 }
2467}
2468
2469void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2470 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2471 ArrayRef<Constant *> MetadataInitializers) {
2472 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2473
2474 // On recent Mach-O platforms, use a structure which binds the liveness of
2475 // the global variable to the metadata struct. Keep the list of "Liveness" GV
2476 // created to be added to llvm.compiler.used
2477 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
2478 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
2479
2480 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2481 Constant *Initializer = MetadataInitializers[i];
2482 GlobalVariable *G = ExtendedGlobals[i];
2483 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2484
2485 // On recent Mach-O platforms, we emit the global metadata in a way that
2486 // allows the linker to properly strip dead globals.
2487 auto LivenessBinder =
2488 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
2490 GlobalVariable *Liveness = new GlobalVariable(
2491 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
2492 Twine("__asan_binder_") + G->getName());
2493 Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
2494 LivenessGlobals[i] = Liveness;
2495 }
2496
2497 // Update llvm.compiler.used, adding the new liveness globals. This is
2498 // needed so that during LTO these variables stay alive. The alternative
2499 // would be to have the linker handling the LTO symbols, but libLTO
2500 // current API does not expose access to the section for each symbol.
2501 if (!LivenessGlobals.empty())
2502 appendToCompilerUsed(M, LivenessGlobals);
2503
2504 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2505 // to look up the loaded image that contains it. Second, we can store in it
2506 // whether registration has already occurred, to prevent duplicate
2507 // registration.
2508 //
2509 // common linkage ensures that there is only one global per shared library.
2510 GlobalVariable *RegisteredFlag = new GlobalVariable(
2511 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2512 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2514
2515 if (ConstructorKind == AsanCtorKind::Global)
2516 IRB.CreateCall(AsanRegisterImageGlobals,
2517 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2518
2519 // We also need to unregister globals at the end, e.g., when a shared library
2520 // gets closed.
2521 if (DestructorKind != AsanDtorKind::None) {
2522 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2523 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2524 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2525 }
2526}
2527
2528void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2529 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2530 ArrayRef<Constant *> MetadataInitializers) {
2531 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2532 unsigned N = ExtendedGlobals.size();
2533 assert(N > 0);
2534
2535 // On platforms that don't have a custom metadata section, we emit an array
2536 // of global metadata structures.
2537 ArrayType *ArrayOfGlobalStructTy =
2538 ArrayType::get(MetadataInitializers[0]->getType(), N);
2539 auto AllGlobals = new GlobalVariable(
2540 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
2541 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
2542 if (Mapping.Scale > 3)
2543 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale));
2544
2545 if (ConstructorKind == AsanCtorKind::Global)
2546 IRB.CreateCall(AsanRegisterGlobals,
2547 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2548 ConstantInt::get(IntptrTy, N)});
2549
2550 // We also need to unregister globals at the end, e.g., when a shared library
2551 // gets closed.
2552 if (DestructorKind != AsanDtorKind::None) {
2553 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2554 IrbDtor.CreateCall(AsanUnregisterGlobals,
2555 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2556 ConstantInt::get(IntptrTy, N)});
2557 }
2558}
2559
2560// This function replaces all global variables with new variables that have
2561// trailing redzones. It also creates a function that poisons
2562// redzones and inserts this function into llvm.global_ctors.
2563// Sets *CtorComdat to true if the global registration code emitted into the
2564// asan constructor is comdat-compatible.
2565void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB,
2566 bool *CtorComdat) {
2567 // Build set of globals that are aliased by some GA, where
2568 // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
2569 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
2570 if (CompileKernel) {
2571 for (auto &GA : M.aliases()) {
2572 if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA))
2573 AliasedGlobalExclusions.insert(GV);
2574 }
2575 }
2576
2577 SmallVector<GlobalVariable *, 16> GlobalsToChange;
2578 for (auto &G : M.globals()) {
2579 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G))
2580 GlobalsToChange.push_back(&G);
2581 }
2582
2583 size_t n = GlobalsToChange.size();
2584 auto &DL = M.getDataLayout();
2585
2586 // A global is described by a structure
2587 // size_t beg;
2588 // size_t size;
2589 // size_t size_with_redzone;
2590 // const char *name;
2591 // const char *module_name;
2592 // size_t has_dynamic_init;
2593 // size_t padding_for_windows_msvc_incremental_link;
2594 // size_t odr_indicator;
2595 // We initialize an array of such structures and pass it to a run-time call.
2596 StructType *GlobalStructTy =
2597 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
2598 IntptrTy, IntptrTy, IntptrTy);
2600 SmallVector<Constant *, 16> Initializers(n);
2601
2602 for (size_t i = 0; i < n; i++) {
2603 GlobalVariable *G = GlobalsToChange[i];
2604
2606 if (G->hasSanitizerMetadata())
2607 MD = G->getSanitizerMetadata();
2608
2609 // The runtime library tries demangling symbol names in the descriptor but
2610 // functionality like __cxa_demangle may be unavailable (e.g.
2611 // -static-libstdc++). So we demangle the symbol names here.
2612 std::string NameForGlobal = G->getName().str();
2615 /*AllowMerging*/ true, genName("global"));
2616
2617 Type *Ty = G->getValueType();
2618 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
2619 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes);
2620 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
2621
2622 StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
2623 Constant *NewInitializer = ConstantStruct::get(
2624 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
2625
2626 // Create a new global variable with enough space for a redzone.
2627 GlobalValue::LinkageTypes Linkage = G->getLinkage();
2628 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
2630 GlobalVariable *NewGlobal = new GlobalVariable(
2631 M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G,
2632 G->getThreadLocalMode(), G->getAddressSpace());
2633 NewGlobal->copyAttributesFrom(G);
2634 NewGlobal->setComdat(G->getComdat());
2635 NewGlobal->setAlignment(Align(getMinRedzoneSizeForGlobal()));
2636 // Don't fold globals with redzones. ODR violation detector and redzone
2637 // poisoning implicitly creates a dependence on the global's address, so it
2638 // is no longer valid for it to be marked unnamed_addr.
2640
2641 // Move null-terminated C strings to "__asan_cstring" section on Darwin.
2642 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
2643 G->isConstant()) {
2644 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
2645 if (Seq && Seq->isCString())
2646 NewGlobal->setSection("__TEXT,__asan_cstring,regular");
2647 }
2648
2649 // Transfer the debug info and type metadata. The payload starts at offset
2650 // zero so we can copy the metadata over as is.
2651 NewGlobal->copyMetadata(G, 0);
2652
2653 Value *Indices2[2];
2654 Indices2[0] = IRB.getInt32(0);
2655 Indices2[1] = IRB.getInt32(0);
2656
2658 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
2659 NewGlobal->takeName(G);
2660 G->eraseFromParent();
2661 NewGlobals[i] = NewGlobal;
2662
2663 Constant *ODRIndicator = Constant::getNullValue(IntptrTy);
2664 GlobalValue *InstrumentedGlobal = NewGlobal;
2665
2666 bool CanUsePrivateAliases =
2667 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2668 TargetTriple.isOSBinFormatWasm();
2669 if (CanUsePrivateAliases && UsePrivateAlias) {
2670 // Create local alias for NewGlobal to avoid crash on ODR between
2671 // instrumented and non-instrumented libraries.
2672 InstrumentedGlobal =
2674 }
2675
2676 // ODR should not happen for local linkage.
2677 if (NewGlobal->hasLocalLinkage()) {
2678 ODRIndicator = ConstantInt::get(IntptrTy, -1);
2679 } else if (UseOdrIndicator) {
2680 // With local aliases, we need to provide another externally visible
2681 // symbol __odr_asan_XXX to detect ODR violation.
2682 auto *ODRIndicatorSym =
2683 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2685 kODRGenPrefix + NameForGlobal, nullptr,
2686 NewGlobal->getThreadLocalMode());
2687
2688 // Set meaningful attributes for indicator symbol.
2689 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2690 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2691 ODRIndicatorSym->setAlignment(Align(1));
2692 ODRIndicator = ConstantExpr::getPtrToInt(ODRIndicatorSym, IntptrTy);
2693 }
2694
2695 Constant *Initializer = ConstantStruct::get(
2696 GlobalStructTy,
2697 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2698 ConstantInt::get(IntptrTy, SizeInBytes),
2699 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2700 ConstantExpr::getPointerCast(Name, IntptrTy),
2701 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy),
2702 ConstantInt::get(IntptrTy, MD.IsDynInit),
2703 Constant::getNullValue(IntptrTy), ODRIndicator);
2704
2705 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2706
2707 Initializers[i] = Initializer;
2708 }
2709
2710 // Add instrumented globals to llvm.compiler.used list to avoid LTO from
2711 // ConstantMerge'ing them.
2712 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
2713 for (size_t i = 0; i < n; i++) {
2714 GlobalVariable *G = NewGlobals[i];
2715 if (G->getName().empty()) continue;
2716 GlobalsToAddToUsedList.push_back(G);
2717 }
2718 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
2719
2720 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2721 // Use COMDAT and register globals even if n == 0 to ensure that (a) the
2722 // linkage unit will only have one module constructor, and (b) the register
2723 // function will be called. The module destructor is not created when n ==
2724 // 0.
2725 *CtorComdat = true;
2726 instrumentGlobalsELF(IRB, NewGlobals, Initializers, getUniqueModuleId(&M));
2727 } else if (n == 0) {
2728 // When UseGlobalsGC is false, COMDAT can still be used if n == 0, because
2729 // all compile units will have identical module constructor/destructor.
2730 *CtorComdat = TargetTriple.isOSBinFormatELF();
2731 } else {
2732 *CtorComdat = false;
2733 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2734 InstrumentGlobalsCOFF(IRB, NewGlobals, Initializers);
2735 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2736 InstrumentGlobalsMachO(IRB, NewGlobals, Initializers);
2737 } else {
2738 InstrumentGlobalsWithMetadataArray(IRB, NewGlobals, Initializers);
2739 }
2740 }
2741
2742 // Create calls for poisoning before initializers run and unpoisoning after.
2743 if (ClInitializers)
2744 createInitializerPoisonCalls();
2745
2746 LLVM_DEBUG(dbgs() << M);
2747}
2748
2749uint64_t
2750ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const {
2751 constexpr uint64_t kMaxRZ = 1 << 18;
2752 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2753
2754 uint64_t RZ = 0;
2755 if (SizeInBytes <= MinRZ / 2) {
2756 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
2757 // at least 32 bytes, optimize when SizeInBytes is less than or equal to
2758 // half of MinRZ.
2759 RZ = MinRZ - SizeInBytes;
2760 } else {
2761 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
2762 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2763
2764 // Round up to multiple of MinRZ.
2765 if (SizeInBytes % MinRZ)
2766 RZ += MinRZ - (SizeInBytes % MinRZ);
2767 }
2768
2769 assert((RZ + SizeInBytes) % MinRZ == 0);
2770
2771 return RZ;
2772}
2773
2774int ModuleAddressSanitizer::GetAsanVersion() const {
2775 int LongSize = M.getDataLayout().getPointerSizeInBits();
2776 bool isAndroid = M.getTargetTriple().isAndroid();
2777 int Version = 8;
2778 // 32-bit Android is one version ahead because of the switch to dynamic
2779 // shadow.
2780 Version += (LongSize == 32 && isAndroid);
2781 return Version;
2782}
2783
2784GlobalVariable *ModuleAddressSanitizer::getOrCreateModuleName() {
2785 if (!ModuleName) {
2786 // We shouldn't merge same module names, as this string serves as unique
2787 // module ID in runtime.
2788 ModuleName =
2789 createPrivateGlobalForString(M, M.getModuleIdentifier(),
2790 /*AllowMerging*/ false, genName("module"));
2791 }
2792 return ModuleName;
2793}
2794
2795bool ModuleAddressSanitizer::instrumentModule() {
2796 initializeCallbacks();
2797
2798 for (Function &F : M)
2799 removeASanIncompatibleFnAttributes(F, /*ReadsArgMem=*/false);
2800
2801 // Create a module constructor. A destructor is created lazily because not all
2802 // platforms, and not all modules need it.
2803 if (ConstructorKind == AsanCtorKind::Global) {
2804 if (CompileKernel) {
2805 // The kernel always builds with its own runtime, and therefore does not
2806 // need the init and version check calls.
2807 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName);
2808 } else {
2809 std::string AsanVersion = std::to_string(GetAsanVersion());
2810 std::string VersionCheckName =
2811 InsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
2812 std::tie(AsanCtorFunction, std::ignore) =
2814 M, kAsanModuleCtorName, kAsanInitName, /*InitArgTypes=*/{},
2815 /*InitArgs=*/{}, VersionCheckName);
2816 }
2817 }
2818
2819 bool CtorComdat = true;
2820 if (ClGlobals) {
2821 assert(AsanCtorFunction || ConstructorKind == AsanCtorKind::None);
2822 if (AsanCtorFunction) {
2823 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2824 instrumentGlobals(IRB, &CtorComdat);
2825 } else {
2826 IRBuilder<> IRB(*C);
2827 instrumentGlobals(IRB, &CtorComdat);
2828 }
2829 }
2830
2831 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple);
2832
2833 // Put the constructor and destructor in comdat if both
2834 // (1) global instrumentation is not TU-specific
2835 // (2) target is ELF.
2836 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2837 if (AsanCtorFunction) {
2838 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2839 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction);
2840 }
2841 if (AsanDtorFunction) {
2842 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2843 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction);
2844 }
2845 } else {
2846 if (AsanCtorFunction)
2847 appendToGlobalCtors(M, AsanCtorFunction, Priority);
2848 if (AsanDtorFunction)
2849 appendToGlobalDtors(M, AsanDtorFunction, Priority);
2850 }
2851
2852 return true;
2853}
2854
2855void AddressSanitizer::initializeCallbacks(const TargetLibraryInfo *TLI) {
2856 IRBuilder<> IRB(*C);
2857 // Create __asan_report* callbacks.
2858 // IsWrite, TypeSize and Exp are encoded in the function name.
2859 for (int Exp = 0; Exp < 2; Exp++) {
2860 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2861 const std::string TypeStr = AccessIsWrite ? "store" : "load";
2862 const std::string ExpStr = Exp ? "exp_" : "";
2863 const std::string EndingStr = Recover ? "_noabort" : "";
2864
2865 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2866 SmallVector<Type *, 2> Args1{1, IntptrTy};
2867 AttributeList AL2;
2868 AttributeList AL1;
2869 if (Exp) {
2870 Type *ExpType = Type::getInt32Ty(*C);
2871 Args2.push_back(ExpType);
2872 Args1.push_back(ExpType);
2873 if (auto AK = TLI->getExtAttrForI32Param(false)) {
2874 AL2 = AL2.addParamAttribute(*C, 2, AK);
2875 AL1 = AL1.addParamAttribute(*C, 1, AK);
2876 }
2877 }
2878 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2879 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
2880 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2881
2882 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2883 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2884 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2885
2886 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2887 AccessSizeIndex++) {
2888 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2889 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2890 M.getOrInsertFunction(
2891 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2892 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2893
2894 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2895 M.getOrInsertFunction(
2896 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2897 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2898 }
2899 }
2900 }
2901
2902 const std::string MemIntrinCallbackPrefix =
2903 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
2904 ? std::string("")
2906 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
2907 PtrTy, PtrTy, PtrTy, IntptrTy);
2908 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", PtrTy,
2909 PtrTy, PtrTy, IntptrTy);
2910 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
2911 TLI->getAttrList(C, {1}, /*Signed=*/false),
2912 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
2913
2914 AsanHandleNoReturnFunc =
2915 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
2916
2917 AsanPtrCmpFunction =
2918 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
2919 AsanPtrSubFunction =
2920 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
2921 if (Mapping.InGlobal)
2922 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
2923 ArrayType::get(IRB.getInt8Ty(), 0));
2924
2925 AMDGPUAddressShared =
2926 M.getOrInsertFunction(kAMDGPUAddressSharedName, IRB.getInt1Ty(), PtrTy);
2927 AMDGPUAddressPrivate =
2928 M.getOrInsertFunction(kAMDGPUAddressPrivateName, IRB.getInt1Ty(), PtrTy);
2929}
2930
2931bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2932 // For each NSObject descendant having a +load method, this method is invoked
2933 // by the ObjC runtime before any of the static constructors is called.
2934 // Therefore we need to instrument such methods with a call to __asan_init
2935 // at the beginning in order to initialize our runtime before any access to
2936 // the shadow memory.
2937 // We cannot just ignore these methods, because they may call other
2938 // instrumented functions.
2939 if (F.getName().contains(" load]")) {
2940 FunctionCallee AsanInitFunction =
2941 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2942 IRBuilder<> IRB(&F.front(), F.front().begin());
2943 IRB.CreateCall(AsanInitFunction, {});
2944 return true;
2945 }
2946 return false;
2947}
2948
2949bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2950 // Generate code only when dynamic addressing is needed.
2951 if (Mapping.Offset != kDynamicShadowSentinel)
2952 return false;
2953
2954 IRBuilder<> IRB(&F.front().front());
2955 if (Mapping.InGlobal) {
2957 // An empty inline asm with input reg == output reg.
2958 // An opaque pointer-to-int cast, basically.
2960 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
2961 StringRef(""), StringRef("=r,0"),
2962 /*hasSideEffects=*/false);
2963 LocalDynamicShadow =
2964 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
2965 } else {
2966 LocalDynamicShadow =
2967 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
2968 }
2969 } else {
2970 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2972 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
2973 }
2974 return true;
2975}
2976
2977void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
2978 // Find the one possible call to llvm.localescape and pre-mark allocas passed
2979 // to it as uninteresting. This assumes we haven't started processing allocas
2980 // yet. This check is done up front because iterating the use list in
2981 // isInterestingAlloca would be algorithmically slower.
2982 assert(ProcessedAllocas.empty() && "must process localescape before allocas");
2983
2984 // Try to get the declaration of llvm.localescape. If it's not in the module,
2985 // we can exit early.
2986 if (!F.getParent()->getFunction("llvm.localescape")) return;
2987
2988 // Look for a call to llvm.localescape call in the entry block. It can't be in
2989 // any other block.
2990 for (Instruction &I : F.getEntryBlock()) {
2992 if (II && II->getIntrinsicID() == Intrinsic::localescape) {
2993 // We found a call. Mark all the allocas passed in as uninteresting.
2994 for (Value *Arg : II->args()) {
2995 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
2996 assert(AI && AI->isStaticAlloca() &&
2997 "non-static alloca arg to localescape");
2998 ProcessedAllocas[AI] = false;
2999 }
3000 break;
3001 }
3002 }
3003}
3004
3005bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
3006 bool ShouldInstrument =
3007 ClDebugMin < 0 || ClDebugMax < 0 ||
3008 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax);
3009 Instrumented++;
3010 return !ShouldInstrument;
3011}
3012
3013bool AddressSanitizer::instrumentFunction(Function &F,
3014 const TargetLibraryInfo *TLI,
3015 const TargetTransformInfo *TTI) {
3016 bool FunctionModified = false;
3017
3018 // Do not apply any instrumentation for naked functions.
3019 if (F.hasFnAttribute(Attribute::Naked))
3020 return FunctionModified;
3021
3022 // If needed, insert __asan_init before checking for SanitizeAddress attr.
3023 // This function needs to be called even if the function body is not
3024 // instrumented.
3025 if (maybeInsertAsanInitAtFunctionEntry(F))
3026 FunctionModified = true;
3027
3028 // Leave if the function doesn't need instrumentation.
3029 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
3030
3031 if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
3032 return FunctionModified;
3033
3034 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
3035
3036 initializeCallbacks(TLI);
3037
3038 FunctionStateRAII CleanupObj(this);
3039
3040 RuntimeCallInserter RTCI(F);
3041
3042 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
3043
3044 // We can't instrument allocas used with llvm.localescape. Only static allocas
3045 // can be passed to that intrinsic.
3046 markEscapedLocalAllocas(F);
3047
3048 // We want to instrument every address only once per basic block (unless there
3049 // are calls between uses).
3050 SmallPtrSet<Value *, 16> TempsToInstrument;
3051 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
3052 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
3053 SmallVector<Instruction *, 8> NoReturnCalls;
3055 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
3056
3057 // Fill the set of memory operations to instrument.
3058 for (auto &BB : F) {
3059 AllBlocks.push_back(&BB);
3060 TempsToInstrument.clear();
3061 int NumInsnsPerBB = 0;
3062 for (auto &Inst : BB) {
3063 if (LooksLikeCodeInBug11395(&Inst)) return false;
3064 // Skip instructions inserted by another instrumentation.
3065 if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
3066 continue;
3067 SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
3068 getInterestingMemoryOperands(&Inst, InterestingOperands, TTI);
3069
3070 if (!InterestingOperands.empty()) {
3071 for (auto &Operand : InterestingOperands) {
3072 if (ClOpt && ClOptSameTemp) {
3073 Value *Ptr = Operand.getPtr();
3074 // If we have a mask, skip instrumentation if we've already
3075 // instrumented the full object. But don't add to TempsToInstrument
3076 // because we might get another load/store with a different mask.
3077 if (Operand.MaybeMask) {
3078 if (TempsToInstrument.count(Ptr))
3079 continue; // We've seen this (whole) temp in the current BB.
3080 } else {
3081 if (!TempsToInstrument.insert(Ptr).second)
3082 continue; // We've seen this temp in the current BB.
3083 }
3084 }
3085 OperandsToInstrument.push_back(Operand);
3086 NumInsnsPerBB++;
3087 }
3088 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
3092 PointerComparisonsOrSubtracts.push_back(&Inst);
3093 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
3094 // ok, take it.
3095 IntrinToInstrument.push_back(MI);
3096 NumInsnsPerBB++;
3097 } else {
3098 if (auto *CB = dyn_cast<CallBase>(&Inst)) {
3099 // A call inside BB.
3100 TempsToInstrument.clear();
3101 if (CB->doesNotReturn())
3102 NoReturnCalls.push_back(CB);
3103 }
3104 if (CallInst *CI = dyn_cast<CallInst>(&Inst))
3106 }
3107 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
3108 }
3109 }
3110
3111 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3112 OperandsToInstrument.size() + IntrinToInstrument.size() >
3113 (unsigned)InstrumentationWithCallsThreshold);
3114 const DataLayout &DL = F.getDataLayout();
3115 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext());
3116
3117 // Instrument.
3118 int NumInstrumented = 0;
3119 for (auto &Operand : OperandsToInstrument) {
3120 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3121 instrumentMop(ObjSizeVis, Operand, UseCalls,
3122 F.getDataLayout(), RTCI);
3123 FunctionModified = true;
3124 }
3125 for (auto *Inst : IntrinToInstrument) {
3126 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3127 instrumentMemIntrinsic(Inst, RTCI);
3128 FunctionModified = true;
3129 }
3130
3131 FunctionStackPoisoner FSP(F, *this, RTCI);
3132 bool ChangedStack = FSP.runOnFunction();
3133
3134 // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
3135 // See e.g. https://github.com/google/sanitizers/issues/37
3136 for (auto *CI : NoReturnCalls) {
3137 IRBuilder<> IRB(CI);
3138 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3139 }
3140
3141 for (auto *Inst : PointerComparisonsOrSubtracts) {
3142 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3143 FunctionModified = true;
3144 }
3145
3146 if (ChangedStack || !NoReturnCalls.empty())
3147 FunctionModified = true;
3148
3149 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
3150 << F << "\n");
3151
3152 return FunctionModified;
3153}
3154
3155// Workaround for bug 11395: we don't want to instrument stack in functions
3156// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
3157// FIXME: remove once the bug 11395 is fixed.
3158bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
3159 if (LongSize != 32) return false;
3161 if (!CI || !CI->isInlineAsm()) return false;
3162 if (CI->arg_size() <= 5)
3163 return false;
3164 // We have inline assembly with quite a few arguments.
3165 return true;
3166}
3167
3168void FunctionStackPoisoner::initializeCallbacks(Module &M) {
3169 IRBuilder<> IRB(*C);
3170 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always ||
3171 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3172 const char *MallocNameTemplate =
3173 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always
3176 for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) {
3177 std::string Suffix = itostr(Index);
3178 AsanStackMallocFunc[Index] = M.getOrInsertFunction(
3179 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3180 AsanStackFreeFunc[Index] =
3181 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
3182 IRB.getVoidTy(), IntptrTy, IntptrTy);
3183 }
3184 }
3185 if (ASan.UseAfterScope) {
3186 AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
3187 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3188 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
3189 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3190 }
3191
3192 for (size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3193 0xf3, 0xf5, 0xf8}) {
3194 std::ostringstream Name;
3196 Name << std::setw(2) << std::setfill('0') << std::hex << Val;
3197 AsanSetShadowFunc[Val] =
3198 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
3199 }
3200
3201 AsanAllocaPoisonFunc = M.getOrInsertFunction(
3202 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3203 AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
3204 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3205}
3206
3207void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
3208 ArrayRef<uint8_t> ShadowBytes,
3209 size_t Begin, size_t End,
3210 IRBuilder<> &IRB,
3211 Value *ShadowBase) {
3212 if (Begin >= End)
3213 return;
3214
3215 const size_t LargestStoreSizeInBytes =
3216 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
3217
3218 const bool IsLittleEndian = F.getDataLayout().isLittleEndian();
3219
3220 // Poison given range in shadow using larges store size with out leading and
3221 // trailing zeros in ShadowMask. Zeros never change, so they need neither
3222 // poisoning nor up-poisoning. Still we don't mind if some of them get into a
3223 // middle of a store.
3224 for (size_t i = Begin; i < End;) {
3225 if (!ShadowMask[i]) {
3226 assert(!ShadowBytes[i]);
3227 ++i;
3228 continue;
3229 }
3230
3231 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3232 // Fit store size into the range.
3233 while (StoreSizeInBytes > End - i)
3234 StoreSizeInBytes /= 2;
3235
3236 // Minimize store size by trimming trailing zeros.
3237 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
3238 while (j <= StoreSizeInBytes / 2)
3239 StoreSizeInBytes /= 2;
3240 }
3241
3242 uint64_t Val = 0;
3243 for (size_t j = 0; j < StoreSizeInBytes; j++) {
3244 if (IsLittleEndian)
3245 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3246 else
3247 Val = (Val << 8) | ShadowBytes[i + j];
3248 }
3249
3250 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
3251 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
3254 Align(1));
3255
3256 i += StoreSizeInBytes;
3257 }
3258}
3259
3260void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3261 ArrayRef<uint8_t> ShadowBytes,
3262 IRBuilder<> &IRB, Value *ShadowBase) {
3263 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
3264}
3265
3266void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3267 ArrayRef<uint8_t> ShadowBytes,
3268 size_t Begin, size_t End,
3269 IRBuilder<> &IRB, Value *ShadowBase) {
3270 assert(ShadowMask.size() == ShadowBytes.size());
3271 size_t Done = Begin;
3272 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
3273 if (!ShadowMask[i]) {
3274 assert(!ShadowBytes[i]);
3275 continue;
3276 }
3277 uint8_t Val = ShadowBytes[i];
3278 if (!AsanSetShadowFunc[Val])
3279 continue;
3280
3281 // Skip same values.
3282 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
3283 }
3284
3285 if (j - i >= ASan.MaxInlinePoisoningSize) {
3286 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
3287 RTCI.createRuntimeCall(
3288 IRB, AsanSetShadowFunc[Val],
3289 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3290 ConstantInt::get(IntptrTy, j - i)});
3291 Done = j;
3292 }
3293 }
3294
3295 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
3296}
3297
3298// Fake stack allocator (asan_fake_stack.h) has 11 size classes
3299// for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
3300static int StackMallocSizeClass(uint64_t LocalStackSize) {
3301 assert(LocalStackSize <= kMaxStackMallocSize);
3302 uint64_t MaxSize = kMinStackMallocSize;
3303 for (int i = 0;; i++, MaxSize *= 2)
3304 if (LocalStackSize <= MaxSize) return i;
3305 llvm_unreachable("impossible LocalStackSize");
3306}
3307
3308void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3309 Instruction *CopyInsertPoint = &F.front().front();
3310 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3311 // Insert after the dynamic shadow location is determined
3312 CopyInsertPoint = CopyInsertPoint->getNextNode();
3313 assert(CopyInsertPoint);
3314 }
3315 IRBuilder<> IRB(CopyInsertPoint);
3316 const DataLayout &DL = F.getDataLayout();
3317 for (Argument &Arg : F.args()) {
3318 if (Arg.hasByValAttr()) {
3319 Type *Ty = Arg.getParamByValType();
3320 const Align Alignment =
3321 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3322
3323 AllocaInst *AI = IRB.CreateAlloca(
3324 Ty, nullptr,
3325 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) +
3326 ".byval");
3327 AI->setAlignment(Alignment);
3328 Arg.replaceAllUsesWith(AI);
3329
3330 uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3331 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3332 }
3333 }
3334}
3335
3336PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
3337 Value *ValueIfTrue,
3338 Instruction *ThenTerm,
3339 Value *ValueIfFalse) {
3340 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
3341 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
3342 PHI->addIncoming(ValueIfFalse, CondBlock);
3343 BasicBlock *ThenBlock = ThenTerm->getParent();
3344 PHI->addIncoming(ValueIfTrue, ThenBlock);
3345 return PHI;
3346}
3347
3348Value *FunctionStackPoisoner::createAllocaForLayout(
3349 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
3350 AllocaInst *Alloca;
3351 if (Dynamic) {
3352 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
3353 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
3354 "MyAlloca");
3355 } else {
3356 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
3357 nullptr, "MyAlloca");
3358 assert(Alloca->isStaticAlloca());
3359 }
3360 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
3361 uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack));
3362 Alloca->setAlignment(Align(FrameAlignment));
3363 return IRB.CreatePointerCast(Alloca, IntptrTy);
3364}
3365
3366void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3367 BasicBlock &FirstBB = *F.begin();
3368 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
3369 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
3370 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
3371 DynamicAllocaLayout->setAlignment(Align(32));
3372}
3373
3374void FunctionStackPoisoner::processDynamicAllocas() {
3375 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
3376 assert(DynamicAllocaPoisonCallVec.empty());
3377 return;
3378 }
3379
3380 // Insert poison calls for lifetime intrinsics for dynamic allocas.
3381 for (const auto &APC : DynamicAllocaPoisonCallVec) {
3382 assert(APC.InsBefore);
3383 assert(APC.AI);
3384 assert(ASan.isInterestingAlloca(*APC.AI));
3385 assert(!APC.AI->isStaticAlloca());
3386
3387 IRBuilder<> IRB(APC.InsBefore);
3388 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3389 // Dynamic allocas will be unpoisoned unconditionally below in
3390 // unpoisonDynamicAllocas.
3391 // Flag that we need unpoison static allocas.
3392 }
3393
3394 // Handle dynamic allocas.
3395 createDynamicAllocasInitStorage();
3396 for (auto &AI : DynamicAllocaVec)
3397 handleDynamicAllocaCall(AI);
3398 unpoisonDynamicAllocas();
3399}
3400
3401/// Collect instructions in the entry block after \p InsBefore which initialize
3402/// permanent storage for a function argument. These instructions must remain in
3403/// the entry block so that uninitialized values do not appear in backtraces. An
3404/// added benefit is that this conserves spill slots. This does not move stores
3405/// before instrumented / "interesting" allocas.
3407 AddressSanitizer &ASan, Instruction &InsBefore,
3408 SmallVectorImpl<Instruction *> &InitInsts) {
3409 Instruction *Start = InsBefore.getNextNode();
3410 for (Instruction *It = Start; It; It = It->getNextNode()) {
3411 // Argument initialization looks like:
3412 // 1) store <Argument>, <Alloca> OR
3413 // 2) <CastArgument> = cast <Argument> to ...
3414 // store <CastArgument> to <Alloca>
3415 // Do not consider any other kind of instruction.
3416 //
3417 // Note: This covers all known cases, but may not be exhaustive. An
3418 // alternative to pattern-matching stores is to DFS over all Argument uses:
3419 // this might be more general, but is probably much more complicated.
3420 if (isa<AllocaInst>(It) || isa<CastInst>(It))
3421 continue;
3422 if (auto *Store = dyn_cast<StoreInst>(It)) {
3423 // The store destination must be an alloca that isn't interesting for
3424 // ASan to instrument. These are moved up before InsBefore, and they're
3425 // not interesting because allocas for arguments can be mem2reg'd.
3426 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3427 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3428 continue;
3429
3430 Value *Val = Store->getValueOperand();
3431 bool IsDirectArgInit = isa<Argument>(Val);
3432 bool IsArgInitViaCast =
3433 isa<CastInst>(Val) &&
3434 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3435 // Check that the cast appears directly before the store. Otherwise
3436 // moving the cast before InsBefore may break the IR.
3437 Val == It->getPrevNode();
3438 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3439 if (!IsArgInit)
3440 continue;
3441
3442 if (IsArgInitViaCast)
3443 InitInsts.push_back(cast<Instruction>(Val));
3444 InitInsts.push_back(Store);
3445 continue;
3446 }
3447
3448 // Do not reorder past unknown instructions: argument initialization should
3449 // only involve casts and stores.
3450 return;
3451 }
3452}
3453
3455 // Alloca could have been renamed for uniqueness. Its true name will have been
3456 // recorded as an annotation.
3457 if (AI->hasMetadata(LLVMContext::MD_annotation)) {
3458 MDTuple *AllocaAnnotations =
3459 cast<MDTuple>(AI->getMetadata(LLVMContext::MD_annotation));
3460 for (auto &Annotation : AllocaAnnotations->operands()) {
3461 if (!isa<MDTuple>(Annotation))
3462 continue;
3463 auto AnnotationTuple = cast<MDTuple>(Annotation);
3464 for (unsigned Index = 0; Index < AnnotationTuple->getNumOperands();
3465 Index++) {
3466 // All annotations are strings
3467 auto MetadataString =
3468 cast<MDString>(AnnotationTuple->getOperand(Index));
3469 if (MetadataString->getString() == "alloca_name_altered")
3470 return cast<MDString>(AnnotationTuple->getOperand(Index + 1))
3471 ->getString();
3472 }
3473 }
3474 }
3475 return AI->getName();
3476}
3477
3478void FunctionStackPoisoner::processStaticAllocas() {
3479 if (AllocaVec.empty()) {
3480 assert(StaticAllocaPoisonCallVec.empty());
3481 return;
3482 }
3483
3484 int StackMallocIdx = -1;
3485 DebugLoc EntryDebugLocation;
3486 if (auto SP = F.getSubprogram())
3487 EntryDebugLocation =
3488 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP);
3489
3490 Instruction *InsBefore = AllocaVec[0];
3491 IRBuilder<> IRB(InsBefore);
3492
3493 // Make sure non-instrumented allocas stay in the entry block. Otherwise,
3494 // debug info is broken, because only entry-block allocas are treated as
3495 // regular stack slots.
3496 auto InsBeforeB = InsBefore->getParent();
3497 assert(InsBeforeB == &F.getEntryBlock());
3498 for (auto *AI : StaticAllocasToMoveUp)
3499 if (AI->getParent() == InsBeforeB)
3500 AI->moveBefore(InsBefore->getIterator());
3501
3502 // Move stores of arguments into entry-block allocas as well. This prevents
3503 // extra stack slots from being generated (to house the argument values until
3504 // they can be stored into the allocas). This also prevents uninitialized
3505 // values from being shown in backtraces.
3506 SmallVector<Instruction *, 8> ArgInitInsts;
3507 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts);
3508 for (Instruction *ArgInitInst : ArgInitInsts)
3509 ArgInitInst->moveBefore(InsBefore->getIterator());
3510
3511 // If we have a call to llvm.localescape, keep it in the entry block.
3512 if (LocalEscapeCall)
3513 LocalEscapeCall->moveBefore(InsBefore->getIterator());
3514
3516 SVD.reserve(AllocaVec.size());
3517 for (AllocaInst *AI : AllocaVec) {
3520 ASan.getAllocaSizeInBytes(*AI),
3521 0,
3522 AI->getAlign().value(),
3523 AI,
3524 0,
3525 0};
3526 SVD.push_back(D);
3527 }
3528
3529 // Minimal header size (left redzone) is 4 pointers,
3530 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
3531 uint64_t Granularity = 1ULL << Mapping.Scale;
3532 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3533 const ASanStackFrameLayout &L =
3534 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
3535
3536 // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
3538 for (auto &Desc : SVD)
3539 AllocaToSVDMap[Desc.AI] = &Desc;
3540
3541 // Update SVD with information from lifetime intrinsics.
3542 for (const auto &APC : StaticAllocaPoisonCallVec) {
3543 assert(APC.InsBefore);
3544 assert(APC.AI);
3545 assert(ASan.isInterestingAlloca(*APC.AI));
3546 assert(APC.AI->isStaticAlloca());
3547
3548 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3549 Desc.LifetimeSize = Desc.Size;
3550 if (const DILocation *FnLoc = EntryDebugLocation.get()) {
3551 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3552 if (LifetimeLoc->getFile() == FnLoc->getFile())
3553 if (unsigned Line = LifetimeLoc->getLine())
3554 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
3555 }
3556 }
3557 }
3558
3559 auto DescriptionString = ComputeASanStackFrameDescription(SVD);
3560 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
3561 uint64_t LocalStackSize = L.FrameSize;
3562 bool DoStackMalloc =
3563 ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never &&
3564 !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize;
3565 bool DoDynamicAlloca = ClDynamicAllocaStack;
3566 // Don't do dynamic alloca or stack malloc if:
3567 // 1) There is inline asm: too often it makes assumptions on which registers
3568 // are available.
3569 // 2) There is a returns_twice call (typically setjmp), which is
3570 // optimization-hostile, and doesn't play well with introduced indirect
3571 // register-relative calculation of local variable addresses.
3572 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3573 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3574
3575 Value *StaticAlloca =
3576 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
3577
3578 Value *FakeStack;
3579 Value *LocalStackBase;
3580 Value *LocalStackBaseAlloca;
3581 uint8_t DIExprFlags = DIExpression::ApplyOffset;
3582
3583 if (DoStackMalloc) {
3584 LocalStackBaseAlloca =
3585 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
3586 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3587 // void *FakeStack = __asan_option_detect_stack_use_after_return
3588 // ? __asan_stack_malloc_N(LocalStackSize)
3589 // : nullptr;
3590 // void *LocalStackBase = (FakeStack) ? FakeStack :
3591 // alloca(LocalStackSize);
3592 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
3594 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
3595 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
3597 Instruction *Term =
3598 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
3599 IRBuilder<> IRBIf(Term);
3600 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3601 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
3602 Value *FakeStackValue =
3603 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3604 ConstantInt::get(IntptrTy, LocalStackSize));
3605 IRB.SetInsertPoint(InsBefore);
3606 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3607 ConstantInt::get(IntptrTy, 0));
3608 } else {
3609 // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always)
3610 // void *FakeStack = __asan_stack_malloc_N(LocalStackSize);
3611 // void *LocalStackBase = (FakeStack) ? FakeStack :
3612 // alloca(LocalStackSize);
3613 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3614 FakeStack =
3615 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3616 ConstantInt::get(IntptrTy, LocalStackSize));
3617 }
3618 Value *NoFakeStack =
3619 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
3620 Instruction *Term =
3621 SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
3622 IRBuilder<> IRBIf(Term);
3623 Value *AllocaValue =
3624 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
3625
3626 IRB.SetInsertPoint(InsBefore);
3627 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3628 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
3629 DIExprFlags |= DIExpression::DerefBefore;
3630 } else {
3631 // void *FakeStack = nullptr;
3632 // void *LocalStackBase = alloca(LocalStackSize);
3633 FakeStack = ConstantInt::get(IntptrTy, 0);
3634 LocalStackBase =
3635 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
3636 LocalStackBaseAlloca = LocalStackBase;
3637 }
3638
3639 // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the
3640 // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse
3641 // later passes and can result in dropped variable coverage in debug info.
3642 Value *LocalStackBaseAllocaPtr =
3643 isa<PtrToIntInst>(LocalStackBaseAlloca)
3644 ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand()
3645 : LocalStackBaseAlloca;
3646 assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) &&
3647 "Variable descriptions relative to ASan stack base will be dropped");
3648
3649 // Replace Alloca instructions with base+offset.
3650 SmallVector<Value *> NewAllocaPtrs;
3651 for (const auto &Desc : SVD) {
3652 AllocaInst *AI = Desc.AI;
3653 replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags,
3654 Desc.Offset);
3655 Value *NewAllocaPtr = IRB.CreateIntToPtr(
3656 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
3657 AI->getType());
3658 AI->replaceAllUsesWith(NewAllocaPtr);
3659 NewAllocaPtrs.push_back(NewAllocaPtr);
3660 }
3661
3662 // The left-most redzone has enough space for at least 4 pointers.
3663 // Write the Magic value to redzone[0].
3664 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
3665 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
3666 BasePlus0);
3667 // Write the frame description constant to redzone[1].
3668 Value *BasePlus1 = IRB.CreateIntToPtr(
3669 IRB.CreateAdd(LocalStackBase,
3670 ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3671 IntptrPtrTy);
3672 GlobalVariable *StackDescriptionGlobal =
3673 createPrivateGlobalForString(*F.getParent(), DescriptionString,
3674 /*AllowMerging*/ true, genName("stack"));
3675 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
3676 IRB.CreateStore(Description, BasePlus1);
3677 // Write the PC to redzone[2].
3678 Value *BasePlus2 = IRB.CreateIntToPtr(
3679 IRB.CreateAdd(LocalStackBase,
3680 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3681 IntptrPtrTy);
3682 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
3683
3684 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
3685
3686 // Poison the stack red zones at the entry.
3687 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3688 // As mask we must use most poisoned case: red zones and after scope.
3689 // As bytes we can use either the same or just red zones only.
3690 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3691
3692 if (!StaticAllocaPoisonCallVec.empty()) {
3693 const auto &ShadowInScope = GetShadowBytes(SVD, L);
3694
3695 // Poison static allocas near lifetime intrinsics.
3696 for (const auto &APC : StaticAllocaPoisonCallVec) {
3697 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3698 assert(Desc.Offset % L.Granularity == 0);
3699 size_t Begin = Desc.Offset / L.Granularity;
3700 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
3701
3702 IRBuilder<> IRB(APC.InsBefore);
3703 copyToShadow(ShadowAfterScope,
3704 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3705 IRB, ShadowBase);
3706 }
3707 }
3708
3709 // Remove lifetime markers now that these are no longer allocas.
3710 for (Value *NewAllocaPtr : NewAllocaPtrs) {
3711 for (User *U : make_early_inc_range(NewAllocaPtr->users())) {
3712 auto *I = cast<Instruction>(U);
3713 if (I->isLifetimeStartOrEnd())
3714 I->eraseFromParent();
3715 }
3716 }
3717
3718 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
3719 SmallVector<uint8_t, 64> ShadowAfterReturn;
3720
3721 // (Un)poison the stack before all ret instructions.
3722 for (Instruction *Ret : RetVec) {
3723 IRBuilder<> IRBRet(Ret);
3724 // Mark the current frame as retired.
3725 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
3726 BasePlus0);
3727 if (DoStackMalloc) {
3728 assert(StackMallocIdx >= 0);
3729 // if FakeStack != 0 // LocalStackBase == FakeStack
3730 // // In use-after-return mode, poison the whole stack frame.
3731 // if StackMallocIdx <= 4
3732 // // For small sizes inline the whole thing:
3733 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
3734 // **SavedFlagPtr(FakeStack) = 0
3735 // else
3736 // __asan_stack_free_N(FakeStack, LocalStackSize)
3737 // else
3738 // <This is not a fake stack; unpoison the redzones>
3739 Value *Cmp =
3740 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
3741 Instruction *ThenTerm, *ElseTerm;
3742 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
3743
3744 IRBuilder<> IRBPoison(ThenTerm);
3745 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3746 int ClassSize = kMinStackMallocSize << StackMallocIdx;
3747 ShadowAfterReturn.resize(ClassSize / L.Granularity,
3749 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3750 ShadowBase);
3751 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3752 FakeStack,
3753 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3754 Value *SavedFlagPtr = IRBPoison.CreateLoad(
3755 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3756 IRBPoison.CreateStore(
3757 Constant::getNullValue(IRBPoison.getInt8Ty()),
3758 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3759 } else {
3760 // For larger frames call __asan_stack_free_*.
3761 RTCI.createRuntimeCall(
3762 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3763 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3764 }
3765
3766 IRBuilder<> IRBElse(ElseTerm);
3767 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3768 } else {
3769 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3770 }
3771 }
3772
3773 // We are done. Remove the old unused alloca instructions.
3774 for (auto *AI : AllocaVec)
3775 AI->eraseFromParent();
3776}
3777
3778void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
3779 IRBuilder<> &IRB, bool DoPoison) {
3780 // For now just insert the call to ASan runtime.
3781 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
3782 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
3783 RTCI.createRuntimeCall(
3784 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3785 {AddrArg, SizeArg});
3786}
3787
3788// Handling llvm.lifetime intrinsics for a given %alloca:
3789// (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
3790// (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
3791// invalid accesses) and unpoison it for llvm.lifetime.start (the memory
3792// could be poisoned by previous llvm.lifetime.end instruction, as the
3793// variable may go in and out of scope several times, e.g. in loops).
3794// (3) if we poisoned at least one %alloca in a function,
3795// unpoison the whole stack frame at function exit.
3796void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
3797 IRBuilder<> IRB(AI);
3798
3799 const Align Alignment = std::max(Align(kAllocaRzSize), AI->getAlign());
3800 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
3801
3802 Value *Zero = Constant::getNullValue(IntptrTy);
3803 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
3804 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3805
3806 // Since we need to extend alloca with additional memory to locate
3807 // redzones, and OldSize is number of allocated blocks with
3808 // ElementSize size, get allocated memory size in bytes by
3809 // OldSize * ElementSize.
3810 const unsigned ElementSize =
3811 F.getDataLayout().getTypeAllocSize(AI->getAllocatedType());
3812 Value *OldSize =
3813 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
3814 ConstantInt::get(IntptrTy, ElementSize));
3815
3816 // PartialSize = OldSize % 32
3817 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
3818
3819 // Misalign = kAllocaRzSize - PartialSize;
3820 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
3821
3822 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
3823 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
3824 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
3825
3826 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize
3827 // Alignment is added to locate left redzone, PartialPadding for possible
3828 // partial redzone and kAllocaRzSize for right redzone respectively.
3829 Value *AdditionalChunkSize = IRB.CreateAdd(
3830 ConstantInt::get(IntptrTy, Alignment.value() + kAllocaRzSize),
3831 PartialPadding);
3832
3833 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
3834
3835 // Insert new alloca with new NewSize and Alignment params.
3836 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
3837 NewAlloca->setAlignment(Alignment);
3838
3839 // NewAddress = Address + Alignment
3840 Value *NewAddress =
3841 IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
3842 ConstantInt::get(IntptrTy, Alignment.value()));
3843
3844 // Insert __asan_alloca_poison call for new created alloca.
3845 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
3846
3847 // Store the last alloca's address to DynamicAllocaLayout. We'll need this
3848 // for unpoisoning stuff.
3849 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
3850
3851 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
3852
3853 // Remove lifetime markers now that this is no longer an alloca.
3854 for (User *U : make_early_inc_range(AI->users())) {
3855 auto *I = cast<Instruction>(U);
3856 if (I->isLifetimeStartOrEnd())
3857 I->eraseFromParent();
3858 }
3859
3860 // Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
3861 AI->replaceAllUsesWith(NewAddressPtr);
3862
3863 // We are done. Erase old alloca from parent.
3864 AI->eraseFromParent();
3865}
3866
3867// isSafeAccess returns true if Addr is always inbounds with respect to its
3868// base object. For example, it is a field access or an array access with
3869// constant inbounds index.
3870bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3871 Value *Addr, TypeSize TypeStoreSize) const {
3872 if (TypeStoreSize.isScalable())
3873 // TODO: We can use vscale_range to convert a scalable value to an
3874 // upper bound on the access size.
3875 return false;
3876
3877 SizeOffsetAPInt SizeOffset = ObjSizeVis.compute(Addr);
3878 if (!SizeOffset.bothKnown())
3879 return false;
3880
3881 uint64_t Size = SizeOffset.Size.getZExtValue();
3882 int64_t Offset = SizeOffset.Offset.getSExtValue();
3883
3884 // Three checks are required to ensure safety:
3885 // . Offset >= 0 (since the offset is given from the base ptr)
3886 // . Size >= Offset (unsigned)
3887 // . Size - Offset >= NeededSize (unsigned)
3888 return Offset >= 0 && Size >= uint64_t(Offset) &&
3889 Size - uint64_t(Offset) >= TypeStoreSize / 8;
3890}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static Twine genName(StringRef suffix)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
const char kAsanPtrCmp[]
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static const uint64_t kWebAssemblyShadowOffset
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
const char kAsanPtrSub[]
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static StringRef getAllocaName(AllocaInst *AI)
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
Function Alias Analysis false
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Finalize Linkage
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This defines the Use class.
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition Lint.cpp:539
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
#define G(x, y, z)
Definition MD5.cpp:56
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:273
Machine Check Debug Module
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
if(PassOpts->AAPipeline)
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1540
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1562
LLVM_ABI AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
bool isInlineAsm() const
Check if this call is an inline asm statement.
void setCannotMerge()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
Definition Comdat.h:39
@ SameSize
The data referenced by the COMDAT must be the same size.
Definition Comdat.h:41
@ Any
The linker may choose any COMDAT.
Definition Comdat.h:37
@ NoDeduplicate
No deduplication is performed.
Definition Comdat.h:40
@ ExactMatch
The data referenced by the COMDAT must be the same.
Definition Comdat.h:38
ConstantArray - Constant Array Declarations.
Definition Constants.h:433
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition Constants.h:1274
static LLVM_ABI bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
Definition DebugLoc.cpp:50
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
Definition Function.h:858
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
Definition Function.cpp:380
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Constant * getAliasee() const
Definition GlobalAlias.h:87
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition Globals.cpp:598
LLVM_ABI void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
LLVM_ABI void setComdat(Comdat *C)
Definition Globals.cpp:214
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
Definition Globals.cpp:275
VisibilityTypes getVisibility() const
void setUnnamedAddr(UnnamedAddr Val)
bool hasLocalLinkage() const
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
ThreadLocalMode getThreadLocalMode() const
@ HiddenVisibility
The GV is hidden.
Definition GlobalValue.h:69
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition GlobalValue.h:52
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ CommonLinkage
Tentative definitions.
Definition GlobalValue.h:63
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
@ AvailableExternallyLinkage
Available for inspection, not emission.
Definition GlobalValue.h:54
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition GlobalValue.h:62
DLLStorageClassTypes getDLLStorageClass() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
Definition Globals.cpp:553
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalVariable.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition IRBuilder.h:1833
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition IRBuilder.h:547
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition IRBuilder.h:1867
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Definition IRBuilder.h:687
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2254
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2360
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Definition IRBuilder.h:202
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2202
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1513
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:562
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2039
BasicBlock * GetInsertBlock() const
Definition IRBuilder.h:201
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition IRBuilder.h:567
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2336
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:1926
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition IRBuilder.h:522
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition IRBuilder.h:2497
Value * CreateNot(Value *V, const Twine &Name="")
Definition IRBuilder.h:1808
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2332
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1420
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
Definition IRBuilder.h:533
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1850
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1551
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1863
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1403
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2197
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition IRBuilder.h:2654
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2511
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition IRBuilder.h:2280
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Type * getVoidTy()
Fetch the type representing void.
Definition IRBuilder.h:600
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition IRBuilder.h:1886
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1573
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition IRBuilder.h:552
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2212
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1437
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2783
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition InlineAsm.cpp:43
Base class for instruction visitors.
Definition InstVisitor.h:78
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition MDBuilder.cpp:48
Metadata node.
Definition Metadata.h:1078
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
Tuple of metadata.
Definition Metadata.h:1497
This is the common base class for memset/memcpy/memmove.
Root of the metadata hierarchy.
Definition Metadata.h:64
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Evaluate the size and offset of an object pointed to by a Value* statically.
LLVM_ABI SizeOffsetAPInt compute(Value *V)
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
Definition Analysis.h:171
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
Class to represent struct types.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition Type.cpp:414
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
EltTy front() const
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
Definition Triple.h:909
bool isDriverKit() const
Is this an Apple DriverKit triple.
Definition Triple.h:600
bool isOSNetBSD() const
Definition Triple.h:630
bool isAndroid() const
Tests whether the target is Android.
Definition Triple.h:819
bool isABIN32() const
Definition Triple.h:1134
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
Definition Triple.h:1030
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:411
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
Definition Triple.h:1019
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
Definition Triple.h:1025
bool isOSWindows() const
Tests whether the OS is Windows.
Definition Triple.h:679
@ UnknownObjectFormat
Definition Triple.h:318
bool isARM() const
Tests whether the target is ARM (little and big endian).
Definition Triple.h:914
bool isOSLinux() const
Tests whether the OS is Linux.
Definition Triple.h:728
bool isAMDGPU() const
Definition Triple.h:906
bool isMacOSX() const
Is this a Mac OS X triple.
Definition Triple.h:566
bool isOSFreeBSD() const
Definition Triple.h:638
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition Triple.h:748
bool isWatchOS() const
Is this an Apple watchOS triple.
Definition Triple.h:585
bool isiOS() const
Is this an iOS triple.
Definition Triple.h:575
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
Definition Triple.h:816
bool isWasm() const
Tests whether the target is wasm (32- and 64-bit).
Definition Triple.h:1118
bool isOSFuchsia() const
Definition Triple.h:642
bool isOSHaiku() const
Tests whether the OS is Haiku.
Definition Triple.h:669
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:281
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:295
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:503
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
Base class of all SIMD vector types.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:201
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
Changed
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
Definition MachO.h:131
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Context & getContext() const
Definition BasicBlock.h:99
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1727
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
LLVM_ABI GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, Twine NamePrefix="")
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
@ Done
Definition Threading.h:60
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
LLVM_ABI Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
Op::Description Desc
LLVM_ABI bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
LLVM_ABI SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:186
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:754
LLVM_ABI FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
FunctionAddr VTableAddr uintptr_t uintptr_t Version
Definition InstrProf.h:302
LLVM_ABI std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:288
LLVM_ABI std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
bool isAlnum(char C)
Checks whether character C is either a decimal digit or an uppercase or lowercase letter as classifie...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
AsanDtorKind
Types of ASan module destructors supported.
@ Invalid
Not a valid destructor Kind.
@ Global
Append to llvm.global_dtors.
@ None
Do not emit any destructors for ASan.
LLVM_ABI ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
TargetTransformInfo TTI
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition Error.h:769
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
LLVM_ABI void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem)
Remove memory attributes that are incompatible with the instrumentation added by AddressSanitizer and...
DWARFExpression::Operation Op
@ Dynamic
Denotes mode unknown at compile time.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
TinyPtrVector< BasicBlock * > ColorVector
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition Alignment.h:100
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AsanCtorKind
Types of ASan module constructors supported.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition Local.cpp:3861
LLVM_ABI void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
LLVM_ABI void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
DEMANGLE_ABI std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
Definition Demangle.cpp:20
std::string itostr(int64_t X)
LLVM_ABI void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, BasicBlock::iterator InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition Local.cpp:1942
#define N
const uint8_t AccessSizeIndex
LLVM_ABI ASanAccessInfo(int32_t Packed)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130
Information about a load/store intrinsic defined by the target.
SmallVector< InterestingMemoryOperand, 1 > InterestingOperands
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.