LLVM 22.0.0git
AddressSanitizer.cpp
Go to the documentation of this file.
1//===- AddressSanitizer.cpp - memory error detector -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address basic correctness
10// checker.
11// Details of the algorithm:
12// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
13//
14// FIXME: This sanitizer does not yet handle scalable vectors
15//
16//===----------------------------------------------------------------------===//
17
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/Statistic.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/Comdat.h"
39#include "llvm/IR/Constant.h"
40#include "llvm/IR/Constants.h"
41#include "llvm/IR/DIBuilder.h"
42#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugLoc.h"
47#include "llvm/IR/Function.h"
48#include "llvm/IR/GlobalAlias.h"
49#include "llvm/IR/GlobalValue.h"
51#include "llvm/IR/IRBuilder.h"
52#include "llvm/IR/InlineAsm.h"
53#include "llvm/IR/InstVisitor.h"
54#include "llvm/IR/InstrTypes.h"
55#include "llvm/IR/Instruction.h"
58#include "llvm/IR/Intrinsics.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/MDBuilder.h"
61#include "llvm/IR/Metadata.h"
62#include "llvm/IR/Module.h"
63#include "llvm/IR/Type.h"
64#include "llvm/IR/Use.h"
65#include "llvm/IR/Value.h"
69#include "llvm/Support/Debug.h"
82#include <algorithm>
83#include <cassert>
84#include <cstddef>
85#include <cstdint>
86#include <iomanip>
87#include <limits>
88#include <sstream>
89#include <string>
90#include <tuple>
91
92using namespace llvm;
93
94#define DEBUG_TYPE "asan"
95
97static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
98static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
100 std::numeric_limits<uint64_t>::max();
101static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G.
103static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
104static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
105static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
106static const uint64_t kMIPS_ShadowOffsetN32 = 1ULL << 29;
107static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
108static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
109static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
110static const uint64_t kLoongArch64_ShadowOffset64 = 1ULL << 46;
112static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
113static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
114static const uint64_t kFreeBSDAArch64_ShadowOffset64 = 1ULL << 47;
115static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000;
116static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
117static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
118static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
119static const uint64_t kPS_ShadowOffset64 = 1ULL << 40;
120static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
122
123// The shadow memory space is dynamically allocated.
125
126static const size_t kMinStackMallocSize = 1 << 6; // 64B
127static const size_t kMaxStackMallocSize = 1 << 16; // 64K
128static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
129static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
130
131const char kAsanModuleCtorName[] = "asan.module_ctor";
132const char kAsanModuleDtorName[] = "asan.module_dtor";
134// On Emscripten, the system needs more than one priorities for constructors.
136const char kAsanReportErrorTemplate[] = "__asan_report_";
137const char kAsanRegisterGlobalsName[] = "__asan_register_globals";
138const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals";
139const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals";
141 "__asan_unregister_image_globals";
142const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals";
143const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals";
144const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init";
145const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init";
146const char kAsanInitName[] = "__asan_init";
147const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v";
148const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp";
149const char kAsanPtrSub[] = "__sanitizer_ptr_sub";
150const char kAsanHandleNoReturnName[] = "__asan_handle_no_return";
151static const int kMaxAsanStackMallocSizeClass = 10;
152const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_";
154 "__asan_stack_malloc_always_";
155const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_";
156const char kAsanGenPrefix[] = "___asan_gen_";
157const char kODRGenPrefix[] = "__odr_asan_gen_";
158const char kSanCovGenPrefix[] = "__sancov_gen_";
159const char kAsanSetShadowPrefix[] = "__asan_set_shadow_";
160const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory";
161const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory";
162
163// ASan version script has __asan_* wildcard. Triple underscore prevents a
164// linker (gold) warning about attempting to export a local symbol.
165const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered";
166
168 "__asan_option_detect_stack_use_after_return";
169
171 "__asan_shadow_memory_dynamic_address";
172
173const char kAsanAllocaPoison[] = "__asan_alloca_poison";
174const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison";
175
176const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared";
177const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private";
178const char kAMDGPUBallotName[] = "llvm.amdgcn.ballot.i64";
179const char kAMDGPUUnreachableName[] = "llvm.amdgcn.unreachable";
180
181// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
182static const size_t kNumberOfAccessSizes = 5;
183
184static const uint64_t kAllocaRzSize = 32;
185
186// ASanAccessInfo implementation constants.
187constexpr size_t kCompileKernelShift = 0;
188constexpr size_t kCompileKernelMask = 0x1;
189constexpr size_t kAccessSizeIndexShift = 1;
190constexpr size_t kAccessSizeIndexMask = 0xf;
191constexpr size_t kIsWriteShift = 5;
192constexpr size_t kIsWriteMask = 0x1;
193
194// Command-line flags.
195
197 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
198 cl::Hidden, cl::init(false));
199
201 "asan-recover",
202 cl::desc("Enable recovery mode (continue-after-error)."),
203 cl::Hidden, cl::init(false));
204
206 "asan-guard-against-version-mismatch",
207 cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
208 cl::init(true));
209
210// This flag may need to be replaced with -f[no-]asan-reads.
211static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
212 cl::desc("instrument read instructions"),
213 cl::Hidden, cl::init(true));
214
216 "asan-instrument-writes", cl::desc("instrument write instructions"),
217 cl::Hidden, cl::init(true));
218
219static cl::opt<bool>
220 ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true),
221 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
223
225 "asan-instrument-atomics",
226 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
227 cl::init(true));
228
229static cl::opt<bool>
230 ClInstrumentByval("asan-instrument-byval",
231 cl::desc("instrument byval call arguments"), cl::Hidden,
232 cl::init(true));
233
235 "asan-always-slow-path",
236 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
237 cl::init(false));
238
240 "asan-force-dynamic-shadow",
241 cl::desc("Load shadow address into a local variable for each function"),
242 cl::Hidden, cl::init(false));
243
244static cl::opt<bool>
245 ClWithIfunc("asan-with-ifunc",
246 cl::desc("Access dynamic shadow through an ifunc global on "
247 "platforms that support this"),
248 cl::Hidden, cl::init(true));
249
251 "asan-with-ifunc-suppress-remat",
252 cl::desc("Suppress rematerialization of dynamic shadow address by passing "
253 "it through inline asm in prologue."),
254 cl::Hidden, cl::init(true));
255
256// This flag limits the number of instructions to be instrumented
257// in any given BB. Normally, this should be set to unlimited (INT_MAX),
258// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
259// set it to 10000.
261 "asan-max-ins-per-bb", cl::init(10000),
262 cl::desc("maximal number of instructions to instrument in any given BB"),
263 cl::Hidden);
264
265// This flag may need to be replaced with -f[no]asan-stack.
266static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
267 cl::Hidden, cl::init(true));
269 "asan-max-inline-poisoning-size",
270 cl::desc(
271 "Inline shadow poisoning for blocks up to the given size in bytes."),
272 cl::Hidden, cl::init(64));
273
275 "asan-use-after-return",
276 cl::desc("Sets the mode of detection for stack-use-after-return."),
278 clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never",
279 "Never detect stack use after return."),
281 AsanDetectStackUseAfterReturnMode::Runtime, "runtime",
282 "Detect stack use after return if "
283 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
284 clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always",
285 "Always detect stack use after return.")),
286 cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime));
287
288static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
289 cl::desc("Create redzones for byval "
290 "arguments (extra copy "
291 "required)"), cl::Hidden,
292 cl::init(true));
293
294static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
295 cl::desc("Check stack-use-after-scope"),
296 cl::Hidden, cl::init(false));
297
298// This flag may need to be replaced with -f[no]asan-globals.
299static cl::opt<bool> ClGlobals("asan-globals",
300 cl::desc("Handle global objects"), cl::Hidden,
301 cl::init(true));
302
303static cl::opt<bool> ClInitializers("asan-initialization-order",
304 cl::desc("Handle C++ initializer order"),
305 cl::Hidden, cl::init(true));
306
308 "asan-detect-invalid-pointer-pair",
309 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
310 cl::init(false));
311
313 "asan-detect-invalid-pointer-cmp",
314 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
315 cl::init(false));
316
318 "asan-detect-invalid-pointer-sub",
319 cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
320 cl::init(false));
321
323 "asan-realign-stack",
324 cl::desc("Realign stack to the value of this flag (power of two)"),
325 cl::Hidden, cl::init(32));
326
328 "asan-instrumentation-with-call-threshold",
329 cl::desc("If the function being instrumented contains more than "
330 "this number of memory accesses, use callbacks instead of "
331 "inline checks (-1 means never use callbacks)."),
332 cl::Hidden, cl::init(7000));
333
335 "asan-memory-access-callback-prefix",
336 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
337 cl::init("__asan_"));
338
340 "asan-kernel-mem-intrinsic-prefix",
341 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
342 cl::init(false));
343
344static cl::opt<bool>
345 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
346 cl::desc("instrument dynamic allocas"),
347 cl::Hidden, cl::init(true));
348
350 "asan-skip-promotable-allocas",
351 cl::desc("Do not instrument promotable allocas"), cl::Hidden,
352 cl::init(true));
353
355 "asan-constructor-kind",
356 cl::desc("Sets the ASan constructor kind"),
357 cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"),
358 clEnumValN(AsanCtorKind::Global, "global",
359 "Use global constructors")),
360 cl::init(AsanCtorKind::Global), cl::Hidden);
361// These flags allow to change the shadow mapping.
362// The shadow mapping looks like
363// Shadow = (Mem >> scale) + offset
364
365static cl::opt<int> ClMappingScale("asan-mapping-scale",
366 cl::desc("scale of asan shadow mapping"),
367 cl::Hidden, cl::init(0));
368
370 ClMappingOffset("asan-mapping-offset",
371 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
372 cl::Hidden, cl::init(0));
373
374// Optimization flags. Not user visible, used mostly for testing
375// and benchmarking the tool.
376
377static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
378 cl::Hidden, cl::init(true));
379
380static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
381 cl::desc("Optimize callbacks"),
382 cl::Hidden, cl::init(false));
383
385 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
386 cl::Hidden, cl::init(true));
387
388static cl::opt<bool> ClOptGlobals("asan-opt-globals",
389 cl::desc("Don't instrument scalar globals"),
390 cl::Hidden, cl::init(true));
391
393 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
394 cl::Hidden, cl::init(false));
395
397 "asan-stack-dynamic-alloca",
398 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
399 cl::init(true));
400
402 "asan-force-experiment",
403 cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
404 cl::init(0));
405
406static cl::opt<bool>
407 ClUsePrivateAlias("asan-use-private-alias",
408 cl::desc("Use private aliases for global variables"),
409 cl::Hidden, cl::init(true));
410
411static cl::opt<bool>
412 ClUseOdrIndicator("asan-use-odr-indicator",
413 cl::desc("Use odr indicators to improve ODR reporting"),
414 cl::Hidden, cl::init(true));
415
416static cl::opt<bool>
417 ClUseGlobalsGC("asan-globals-live-support",
418 cl::desc("Use linker features to support dead "
419 "code stripping of globals"),
420 cl::Hidden, cl::init(true));
421
422// This is on by default even though there is a bug in gold:
423// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
424static cl::opt<bool>
425 ClWithComdat("asan-with-comdat",
426 cl::desc("Place ASan constructors in comdat sections"),
427 cl::Hidden, cl::init(true));
428
430 "asan-destructor-kind",
431 cl::desc("Sets the ASan destructor kind. The default is to use the value "
432 "provided to the pass constructor"),
433 cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"),
434 clEnumValN(AsanDtorKind::Global, "global",
435 "Use global destructors")),
436 cl::init(AsanDtorKind::Invalid), cl::Hidden);
437
438// Debug flags.
439
440static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
441 cl::init(0));
442
443static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
444 cl::Hidden, cl::init(0));
445
447 cl::desc("Debug func"));
448
449static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
450 cl::Hidden, cl::init(-1));
451
452static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
453 cl::Hidden, cl::init(-1));
454
455STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
456STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
457STATISTIC(NumOptimizedAccessesToGlobalVar,
458 "Number of optimized accesses to global vars");
459STATISTIC(NumOptimizedAccessesToStackVar,
460 "Number of optimized accesses to stack vars");
461
462namespace {
463
464/// This struct defines the shadow mapping using the rule:
465/// shadow = (mem >> Scale) ADD-or-OR Offset.
466/// If InGlobal is true, then
467/// extern char __asan_shadow[];
468/// shadow = (mem >> Scale) + &__asan_shadow
469struct ShadowMapping {
470 int Scale;
471 uint64_t Offset;
472 bool OrShadowOffset;
473 bool InGlobal;
474};
475
476} // end anonymous namespace
477
478static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize,
479 bool IsKasan) {
480 bool IsAndroid = TargetTriple.isAndroid();
481 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS() ||
482 TargetTriple.isDriverKit();
483 bool IsMacOS = TargetTriple.isMacOSX();
484 bool IsFreeBSD = TargetTriple.isOSFreeBSD();
485 bool IsNetBSD = TargetTriple.isOSNetBSD();
486 bool IsPS = TargetTriple.isPS();
487 bool IsLinux = TargetTriple.isOSLinux();
488 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
489 TargetTriple.getArch() == Triple::ppc64le;
490 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
491 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
492 bool IsMIPSN32ABI = TargetTriple.isABIN32();
493 bool IsMIPS32 = TargetTriple.isMIPS32();
494 bool IsMIPS64 = TargetTriple.isMIPS64();
495 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
496 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
497 TargetTriple.getArch() == Triple::aarch64_be;
498 bool IsLoongArch64 = TargetTriple.isLoongArch64();
499 bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
500 bool IsWindows = TargetTriple.isOSWindows();
501 bool IsFuchsia = TargetTriple.isOSFuchsia();
502 bool IsAMDGPU = TargetTriple.isAMDGPU();
503 bool IsHaiku = TargetTriple.isOSHaiku();
504 bool IsWasm = TargetTriple.isWasm();
505
506 ShadowMapping Mapping;
507
508 Mapping.Scale = kDefaultShadowScale;
509 if (ClMappingScale.getNumOccurrences() > 0) {
510 Mapping.Scale = ClMappingScale;
511 }
512
513 if (LongSize == 32) {
514 if (IsAndroid)
515 Mapping.Offset = kDynamicShadowSentinel;
516 else if (IsMIPSN32ABI)
517 Mapping.Offset = kMIPS_ShadowOffsetN32;
518 else if (IsMIPS32)
519 Mapping.Offset = kMIPS32_ShadowOffset32;
520 else if (IsFreeBSD)
521 Mapping.Offset = kFreeBSD_ShadowOffset32;
522 else if (IsNetBSD)
523 Mapping.Offset = kNetBSD_ShadowOffset32;
524 else if (IsIOS)
525 Mapping.Offset = kDynamicShadowSentinel;
526 else if (IsWindows)
527 Mapping.Offset = kWindowsShadowOffset32;
528 else if (IsWasm)
529 Mapping.Offset = kWebAssemblyShadowOffset;
530 else
531 Mapping.Offset = kDefaultShadowOffset32;
532 } else { // LongSize == 64
533 // Fuchsia is always PIE, which means that the beginning of the address
534 // space is always available.
535 if (IsFuchsia)
536 Mapping.Offset = 0;
537 else if (IsPPC64)
538 Mapping.Offset = kPPC64_ShadowOffset64;
539 else if (IsSystemZ)
540 Mapping.Offset = kSystemZ_ShadowOffset64;
541 else if (IsFreeBSD && IsAArch64)
542 Mapping.Offset = kFreeBSDAArch64_ShadowOffset64;
543 else if (IsFreeBSD && !IsMIPS64) {
544 if (IsKasan)
545 Mapping.Offset = kFreeBSDKasan_ShadowOffset64;
546 else
547 Mapping.Offset = kFreeBSD_ShadowOffset64;
548 } else if (IsNetBSD) {
549 if (IsKasan)
550 Mapping.Offset = kNetBSDKasan_ShadowOffset64;
551 else
552 Mapping.Offset = kNetBSD_ShadowOffset64;
553 } else if (IsPS)
554 Mapping.Offset = kPS_ShadowOffset64;
555 else if (IsLinux && IsX86_64) {
556 if (IsKasan)
557 Mapping.Offset = kLinuxKasan_ShadowOffset64;
558 else
559 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
560 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
561 } else if (IsWindows && IsX86_64) {
562 Mapping.Offset = kWindowsShadowOffset64;
563 } else if (IsMIPS64)
564 Mapping.Offset = kMIPS64_ShadowOffset64;
565 else if (IsIOS)
566 Mapping.Offset = kDynamicShadowSentinel;
567 else if (IsMacOS && IsAArch64)
568 Mapping.Offset = kDynamicShadowSentinel;
569 else if (IsAArch64)
570 Mapping.Offset = kAArch64_ShadowOffset64;
571 else if (IsLoongArch64)
572 Mapping.Offset = kLoongArch64_ShadowOffset64;
573 else if (IsRISCV64)
574 Mapping.Offset = kRISCV64_ShadowOffset64;
575 else if (IsAMDGPU)
576 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
577 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
578 else if (IsHaiku && IsX86_64)
579 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
580 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
581 else
582 Mapping.Offset = kDefaultShadowOffset64;
583 }
584
586 Mapping.Offset = kDynamicShadowSentinel;
587 }
588
589 if (ClMappingOffset.getNumOccurrences() > 0) {
590 Mapping.Offset = ClMappingOffset;
591 }
592
593 // OR-ing shadow offset if more efficient (at least on x86) if the offset
594 // is a power of two, but on ppc64 and loongarch64 we have to use add since
595 // the shadow offset is not necessarily 1/8-th of the address space. On
596 // SystemZ, we could OR the constant in a single instruction, but it's more
597 // efficient to load it once and use indexed addressing.
598 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
599 !IsRISCV64 && !IsLoongArch64 &&
600 !(Mapping.Offset & (Mapping.Offset - 1)) &&
601 Mapping.Offset != kDynamicShadowSentinel;
602 bool IsAndroidWithIfuncSupport =
603 IsAndroid && !TargetTriple.isAndroidVersionLT(21);
604 Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb;
605
606 return Mapping;
607}
608
609namespace llvm {
610void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
611 bool IsKasan, uint64_t *ShadowBase,
612 int *MappingScale, bool *OrShadowOffset) {
613 auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan);
614 *ShadowBase = Mapping.Offset;
615 *MappingScale = Mapping.Scale;
616 *OrShadowOffset = Mapping.OrShadowOffset;
617}
618
620 // Sanitizer checks read from shadow, which invalidates memory(argmem: *).
621 //
622 // This is not only true for sanitized functions, because AttrInfer can
623 // infer those attributes on libc functions, which is not true if those
624 // are instrumented (Android) or intercepted.
625 //
626 // We might want to model ASan shadow memory more opaquely to get rid of
627 // this problem altogether, by hiding the shadow memory write in an
628 // intrinsic, essentially like in the AArch64StackTagging pass. But that's
629 // for another day.
630
631 // The API is weird. `onlyReadsMemory` actually means "does not write", and
632 // `onlyWritesMemory` actually means "does not read". So we reconstruct
633 // "accesses memory" && "does not read" <=> "writes".
634 bool Changed = false;
635 if (!F.doesNotAccessMemory()) {
636 bool WritesMemory = !F.onlyReadsMemory();
637 bool ReadsMemory = !F.onlyWritesMemory();
638 if ((WritesMemory && !ReadsMemory) || F.onlyAccessesArgMemory()) {
639 F.removeFnAttr(Attribute::Memory);
640 Changed = true;
641 }
642 }
643 if (ReadsArgMem) {
644 for (Argument &A : F.args()) {
645 if (A.hasAttribute(Attribute::WriteOnly)) {
646 A.removeAttr(Attribute::WriteOnly);
647 Changed = true;
648 }
649 }
650 }
651 if (Changed) {
652 // nobuiltin makes sure later passes don't restore assumptions about
653 // the function.
654 F.addFnAttr(Attribute::NoBuiltin);
655 }
656}
657
659 : Packed(Packed),
660 AccessSizeIndex((Packed >> kAccessSizeIndexShift) & kAccessSizeIndexMask),
661 IsWrite((Packed >> kIsWriteShift) & kIsWriteMask),
662 CompileKernel((Packed >> kCompileKernelShift) & kCompileKernelMask) {}
663
664ASanAccessInfo::ASanAccessInfo(bool IsWrite, bool CompileKernel,
665 uint8_t AccessSizeIndex)
666 : Packed((IsWrite << kIsWriteShift) +
667 (CompileKernel << kCompileKernelShift) +
668 (AccessSizeIndex << kAccessSizeIndexShift)),
669 AccessSizeIndex(AccessSizeIndex), IsWrite(IsWrite),
670 CompileKernel(CompileKernel) {}
671
672} // namespace llvm
673
674static uint64_t getRedzoneSizeForScale(int MappingScale) {
675 // Redzone used for stack and globals is at least 32 bytes.
676 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
677 return std::max(32U, 1U << MappingScale);
678}
679
681 if (TargetTriple.isOSEmscripten()) {
683 } else {
685 }
686}
687
688static Twine genName(StringRef suffix) {
689 return Twine(kAsanGenPrefix) + suffix;
690}
691
692namespace {
693/// Helper RAII class to post-process inserted asan runtime calls during a
694/// pass on a single Function. Upon end of scope, detects and applies the
695/// required funclet OpBundle.
696class RuntimeCallInserter {
697 Function *OwnerFn = nullptr;
698 bool TrackInsertedCalls = false;
699 SmallVector<CallInst *> InsertedCalls;
700
701public:
702 RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {
703 if (Fn.hasPersonalityFn()) {
704 auto Personality = classifyEHPersonality(Fn.getPersonalityFn());
705 if (isScopedEHPersonality(Personality))
706 TrackInsertedCalls = true;
707 }
708 }
709
710 ~RuntimeCallInserter() {
711 if (InsertedCalls.empty())
712 return;
713 assert(TrackInsertedCalls && "Calls were wrongly tracked");
714
716 for (CallInst *CI : InsertedCalls) {
717 BasicBlock *BB = CI->getParent();
718 assert(BB && "Instruction doesn't belong to a BasicBlock");
719 assert(BB->getParent() == OwnerFn &&
720 "Instruction doesn't belong to the expected Function!");
721
722 ColorVector &Colors = BlockColors[BB];
723 // funclet opbundles are only valid in monochromatic BBs.
724 // Note that unreachable BBs are seen as colorless by colorEHFunclets()
725 // and will be DCE'ed later.
726 if (Colors.empty())
727 continue;
728 if (Colors.size() != 1) {
729 OwnerFn->getContext().emitError(
730 "Instruction's BasicBlock is not monochromatic");
731 continue;
732 }
733
734 BasicBlock *Color = Colors.front();
735 BasicBlock::iterator EHPadIt = Color->getFirstNonPHIIt();
736
737 if (EHPadIt != Color->end() && EHPadIt->isEHPad()) {
738 // Replace CI with a clone with an added funclet OperandBundle
739 OperandBundleDef OB("funclet", &*EHPadIt);
741 OB, CI->getIterator());
742 NewCall->copyMetadata(*CI);
743 CI->replaceAllUsesWith(NewCall);
744 CI->eraseFromParent();
745 }
746 }
747 }
748
749 CallInst *createRuntimeCall(IRBuilder<> &IRB, FunctionCallee Callee,
750 ArrayRef<Value *> Args = {},
751 const Twine &Name = "") {
752 assert(IRB.GetInsertBlock()->getParent() == OwnerFn);
753
754 CallInst *Inst = IRB.CreateCall(Callee, Args, Name, nullptr);
755 if (TrackInsertedCalls)
756 InsertedCalls.push_back(Inst);
757 return Inst;
758 }
759};
760
761/// AddressSanitizer: instrument the code in module to find memory bugs.
762struct AddressSanitizer {
763 AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI,
764 int InstrumentationWithCallsThreshold,
765 uint32_t MaxInlinePoisoningSize, bool CompileKernel = false,
766 bool Recover = false, bool UseAfterScope = false,
767 AsanDetectStackUseAfterReturnMode UseAfterReturn =
768 AsanDetectStackUseAfterReturnMode::Runtime)
769 : M(M),
770 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
771 : CompileKernel),
772 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
773 UseAfterScope(UseAfterScope || ClUseAfterScope),
774 UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn
775 : UseAfterReturn),
776 SSGI(SSGI),
777 InstrumentationWithCallsThreshold(
778 ClInstrumentationWithCallsThreshold.getNumOccurrences() > 0
780 : InstrumentationWithCallsThreshold),
781 MaxInlinePoisoningSize(ClMaxInlinePoisoningSize.getNumOccurrences() > 0
783 : MaxInlinePoisoningSize) {
784 C = &(M.getContext());
785 DL = &M.getDataLayout();
786 LongSize = M.getDataLayout().getPointerSizeInBits();
787 IntptrTy = Type::getIntNTy(*C, LongSize);
788 PtrTy = PointerType::getUnqual(*C);
789 Int32Ty = Type::getInt32Ty(*C);
790 TargetTriple = M.getTargetTriple();
791
792 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
793
794 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
795 }
796
797 TypeSize getAllocaSizeInBytes(const AllocaInst &AI) const {
798 return *AI.getAllocationSize(AI.getDataLayout());
799 }
800
801 /// Check if we want (and can) handle this alloca.
802 bool isInterestingAlloca(const AllocaInst &AI);
803
804 bool ignoreAccess(Instruction *Inst, Value *Ptr);
807
808 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
809 InterestingMemoryOperand &O, bool UseCalls,
810 const DataLayout &DL, RuntimeCallInserter &RTCI);
811 void instrumentPointerComparisonOrSubtraction(Instruction *I,
812 RuntimeCallInserter &RTCI);
813 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
814 Value *Addr, MaybeAlign Alignment,
815 uint32_t TypeStoreSize, bool IsWrite,
816 Value *SizeArgument, bool UseCalls, uint32_t Exp,
817 RuntimeCallInserter &RTCI);
818 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
819 Instruction *InsertBefore, Value *Addr,
820 uint32_t TypeStoreSize, bool IsWrite,
821 Value *SizeArgument);
822 Instruction *genAMDGPUReportBlock(IRBuilder<> &IRB, Value *Cond,
823 bool Recover);
824 void instrumentUnusualSizeOrAlignment(Instruction *I,
825 Instruction *InsertBefore, Value *Addr,
826 TypeSize TypeStoreSize, bool IsWrite,
827 Value *SizeArgument, bool UseCalls,
828 uint32_t Exp,
829 RuntimeCallInserter &RTCI);
830 void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL,
831 Type *IntptrTy, Value *Mask, Value *EVL,
832 Value *Stride, Instruction *I, Value *Addr,
833 MaybeAlign Alignment, unsigned Granularity,
834 Type *OpType, bool IsWrite,
835 Value *SizeArgument, bool UseCalls,
836 uint32_t Exp, RuntimeCallInserter &RTCI);
837 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
838 Value *ShadowValue, uint32_t TypeStoreSize);
839 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
840 bool IsWrite, size_t AccessSizeIndex,
841 Value *SizeArgument, uint32_t Exp,
842 RuntimeCallInserter &RTCI);
843 void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
844 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
845 bool suppressInstrumentationSiteForDebug(int &Instrumented);
846 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
847 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
848 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
849 void markEscapedLocalAllocas(Function &F);
850
851private:
852 friend struct FunctionStackPoisoner;
853
854 void initializeCallbacks(const TargetLibraryInfo *TLI);
855
856 bool LooksLikeCodeInBug11395(Instruction *I);
857 bool GlobalIsLinkerInitialized(GlobalVariable *G);
858 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
859 TypeSize TypeStoreSize) const;
860
861 /// Helper to cleanup per-function state.
862 struct FunctionStateRAII {
863 AddressSanitizer *Pass;
864
865 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
866 assert(Pass->ProcessedAllocas.empty() &&
867 "last pass forgot to clear cache");
868 assert(!Pass->LocalDynamicShadow);
869 }
870
871 ~FunctionStateRAII() {
872 Pass->LocalDynamicShadow = nullptr;
873 Pass->ProcessedAllocas.clear();
874 }
875 };
876
877 Module &M;
878 LLVMContext *C;
879 const DataLayout *DL;
880 Triple TargetTriple;
881 int LongSize;
882 bool CompileKernel;
883 bool Recover;
884 bool UseAfterScope;
886 Type *IntptrTy;
887 Type *Int32Ty;
888 PointerType *PtrTy;
889 ShadowMapping Mapping;
890 FunctionCallee AsanHandleNoReturnFunc;
891 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
892 Constant *AsanShadowGlobal;
893
894 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
895 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
896 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
897
898 // These arrays is indexed by AccessIsWrite and Experiment.
899 FunctionCallee AsanErrorCallbackSized[2][2];
900 FunctionCallee AsanMemoryAccessCallbackSized[2][2];
901
902 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
903 Value *LocalDynamicShadow = nullptr;
904 const StackSafetyGlobalInfo *SSGI;
905 DenseMap<const AllocaInst *, bool> ProcessedAllocas;
906
907 FunctionCallee AMDGPUAddressShared;
908 FunctionCallee AMDGPUAddressPrivate;
909 int InstrumentationWithCallsThreshold;
910 uint32_t MaxInlinePoisoningSize;
911};
912
913class ModuleAddressSanitizer {
914public:
915 ModuleAddressSanitizer(Module &M, bool InsertVersionCheck,
916 bool CompileKernel = false, bool Recover = false,
917 bool UseGlobalsGC = true, bool UseOdrIndicator = true,
918 AsanDtorKind DestructorKind = AsanDtorKind::Global,
919 AsanCtorKind ConstructorKind = AsanCtorKind::Global)
920 : M(M),
921 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
922 : CompileKernel),
923 InsertVersionCheck(ClInsertVersionCheck.getNumOccurrences() > 0
925 : InsertVersionCheck),
926 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
927 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel),
928 // Enable aliases as they should have no downside with ODR indicators.
929 UsePrivateAlias(ClUsePrivateAlias.getNumOccurrences() > 0
931 : UseOdrIndicator),
932 UseOdrIndicator(ClUseOdrIndicator.getNumOccurrences() > 0
934 : UseOdrIndicator),
935 // Not a typo: ClWithComdat is almost completely pointless without
936 // ClUseGlobalsGC (because then it only works on modules without
937 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
938 // and both suffer from gold PR19002 for which UseGlobalsGC constructor
939 // argument is designed as workaround. Therefore, disable both
940 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
941 // do globals-gc.
942 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel),
943 DestructorKind(DestructorKind),
944 ConstructorKind(ClConstructorKind.getNumOccurrences() > 0
946 : ConstructorKind) {
947 C = &(M.getContext());
948 int LongSize = M.getDataLayout().getPointerSizeInBits();
949 IntptrTy = Type::getIntNTy(*C, LongSize);
950 PtrTy = PointerType::getUnqual(*C);
951 TargetTriple = M.getTargetTriple();
952 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
953
954 if (ClOverrideDestructorKind != AsanDtorKind::Invalid)
955 this->DestructorKind = ClOverrideDestructorKind;
956 assert(this->DestructorKind != AsanDtorKind::Invalid);
957 }
958
959 bool instrumentModule();
960
961private:
962 void initializeCallbacks();
963
964 void instrumentGlobals(IRBuilder<> &IRB, bool *CtorComdat);
965 void InstrumentGlobalsCOFF(IRBuilder<> &IRB,
966 ArrayRef<GlobalVariable *> ExtendedGlobals,
967 ArrayRef<Constant *> MetadataInitializers);
968 void instrumentGlobalsELF(IRBuilder<> &IRB,
969 ArrayRef<GlobalVariable *> ExtendedGlobals,
970 ArrayRef<Constant *> MetadataInitializers,
971 const std::string &UniqueModuleId);
972 void InstrumentGlobalsMachO(IRBuilder<> &IRB,
973 ArrayRef<GlobalVariable *> ExtendedGlobals,
974 ArrayRef<Constant *> MetadataInitializers);
975 void
976 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB,
977 ArrayRef<GlobalVariable *> ExtendedGlobals,
978 ArrayRef<Constant *> MetadataInitializers);
979
980 GlobalVariable *CreateMetadataGlobal(Constant *Initializer,
981 StringRef OriginalName);
982 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
983 StringRef InternalSuffix);
984 Instruction *CreateAsanModuleDtor();
985
986 const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const;
987 bool shouldInstrumentGlobal(GlobalVariable *G) const;
988 bool ShouldUseMachOGlobalsSection() const;
989 StringRef getGlobalMetadataSection() const;
990 void poisonOneInitializer(Function &GlobalInit);
991 void createInitializerPoisonCalls();
992 uint64_t getMinRedzoneSizeForGlobal() const {
993 return getRedzoneSizeForScale(Mapping.Scale);
994 }
995 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const;
996 int GetAsanVersion() const;
997 GlobalVariable *getOrCreateModuleName();
998
999 Module &M;
1000 bool CompileKernel;
1001 bool InsertVersionCheck;
1002 bool Recover;
1003 bool UseGlobalsGC;
1004 bool UsePrivateAlias;
1005 bool UseOdrIndicator;
1006 bool UseCtorComdat;
1007 AsanDtorKind DestructorKind;
1008 AsanCtorKind ConstructorKind;
1009 Type *IntptrTy;
1010 PointerType *PtrTy;
1011 LLVMContext *C;
1012 Triple TargetTriple;
1013 ShadowMapping Mapping;
1014 FunctionCallee AsanPoisonGlobals;
1015 FunctionCallee AsanUnpoisonGlobals;
1016 FunctionCallee AsanRegisterGlobals;
1017 FunctionCallee AsanUnregisterGlobals;
1018 FunctionCallee AsanRegisterImageGlobals;
1019 FunctionCallee AsanUnregisterImageGlobals;
1020 FunctionCallee AsanRegisterElfGlobals;
1021 FunctionCallee AsanUnregisterElfGlobals;
1022
1023 Function *AsanCtorFunction = nullptr;
1024 Function *AsanDtorFunction = nullptr;
1025 GlobalVariable *ModuleName = nullptr;
1026};
1027
1028// Stack poisoning does not play well with exception handling.
1029// When an exception is thrown, we essentially bypass the code
1030// that unpoisones the stack. This is why the run-time library has
1031// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
1032// stack in the interceptor. This however does not work inside the
1033// actual function which catches the exception. Most likely because the
1034// compiler hoists the load of the shadow value somewhere too high.
1035// This causes asan to report a non-existing bug on 453.povray.
1036// It sounds like an LLVM bug.
1037struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
1038 Function &F;
1039 AddressSanitizer &ASan;
1040 RuntimeCallInserter &RTCI;
1041 DIBuilder DIB;
1042 LLVMContext *C;
1043 Type *IntptrTy;
1044 Type *IntptrPtrTy;
1045 ShadowMapping Mapping;
1046
1048 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
1050
1051 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
1052 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
1053 FunctionCallee AsanSetShadowFunc[0x100] = {};
1054 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1055 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
1056
1057 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
1058 struct AllocaPoisonCall {
1059 IntrinsicInst *InsBefore;
1060 AllocaInst *AI;
1061 uint64_t Size;
1062 bool DoPoison;
1063 };
1064 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
1065 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
1066
1067 SmallVector<AllocaInst *, 1> DynamicAllocaVec;
1068 SmallVector<IntrinsicInst *, 1> StackRestoreVec;
1069 AllocaInst *DynamicAllocaLayout = nullptr;
1070 IntrinsicInst *LocalEscapeCall = nullptr;
1071
1072 bool HasInlineAsm = false;
1073 bool HasReturnsTwiceCall = false;
1074 bool PoisonStack;
1075
1076 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
1077 RuntimeCallInserter &RTCI)
1078 : F(F), ASan(ASan), RTCI(RTCI),
1079 DIB(*F.getParent(), /*AllowUnresolved*/ false), C(ASan.C),
1080 IntptrTy(ASan.IntptrTy),
1081 IntptrPtrTy(PointerType::get(IntptrTy->getContext(), 0)),
1082 Mapping(ASan.Mapping),
1083 PoisonStack(ClStack && !F.getParent()->getTargetTriple().isAMDGPU()) {}
1084
1085 bool runOnFunction() {
1086 if (!PoisonStack)
1087 return false;
1088
1090 copyArgsPassedByValToAllocas();
1091
1092 // Collect alloca, ret, lifetime instructions etc.
1093 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
1094
1095 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
1096
1097 initializeCallbacks(*F.getParent());
1098
1099 processDynamicAllocas();
1100 processStaticAllocas();
1101
1102 if (ClDebugStack) {
1103 LLVM_DEBUG(dbgs() << F);
1104 }
1105 return true;
1106 }
1107
1108 // Arguments marked with the "byval" attribute are implicitly copied without
1109 // using an alloca instruction. To produce redzones for those arguments, we
1110 // copy them a second time into memory allocated with an alloca instruction.
1111 void copyArgsPassedByValToAllocas();
1112
1113 // Finds all Alloca instructions and puts
1114 // poisoned red zones around all of them.
1115 // Then unpoison everything back before the function returns.
1116 void processStaticAllocas();
1117 void processDynamicAllocas();
1118
1119 void createDynamicAllocasInitStorage();
1120
1121 // ----------------------- Visitors.
1122 /// Collect all Ret instructions, or the musttail call instruction if it
1123 /// precedes the return instruction.
1124 void visitReturnInst(ReturnInst &RI) {
1125 if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall())
1126 RetVec.push_back(CI);
1127 else
1128 RetVec.push_back(&RI);
1129 }
1130
1131 /// Collect all Resume instructions.
1132 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1133
1134 /// Collect all CatchReturnInst instructions.
1135 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1136
1137 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1138 Value *SavedStack) {
1139 IRBuilder<> IRB(InstBefore);
1140 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
1141 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
1142 // need to adjust extracted SP to compute the address of the most recent
1143 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
1144 // this purpose.
1145 if (!isa<ReturnInst>(InstBefore)) {
1146 Value *DynamicAreaOffset = IRB.CreateIntrinsic(
1147 Intrinsic::get_dynamic_area_offset, {IntptrTy}, {});
1148
1149 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
1150 DynamicAreaOffset);
1151 }
1152
1153 RTCI.createRuntimeCall(
1154 IRB, AsanAllocasUnpoisonFunc,
1155 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1156 }
1157
1158 // Unpoison dynamic allocas redzones.
1159 void unpoisonDynamicAllocas() {
1160 for (Instruction *Ret : RetVec)
1161 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1162
1163 for (Instruction *StackRestoreInst : StackRestoreVec)
1164 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1165 StackRestoreInst->getOperand(0));
1166 }
1167
1168 // Deploy and poison redzones around dynamic alloca call. To do this, we
1169 // should replace this call with another one with changed parameters and
1170 // replace all its uses with new address, so
1171 // addr = alloca type, old_size, align
1172 // is replaced by
1173 // new_size = (old_size + additional_size) * sizeof(type)
1174 // tmp = alloca i8, new_size, max(align, 32)
1175 // addr = tmp + 32 (first 32 bytes are for the left redzone).
1176 // Additional_size is added to make new memory allocation contain not only
1177 // requested memory, but also left, partial and right redzones.
1178 void handleDynamicAllocaCall(AllocaInst *AI);
1179
1180 /// Collect Alloca instructions we want (and can) handle.
1181 void visitAllocaInst(AllocaInst &AI) {
1182 // FIXME: Handle scalable vectors instead of ignoring them.
1183 const Type *AllocaType = AI.getAllocatedType();
1184 const auto *STy = dyn_cast<StructType>(AllocaType);
1185 if (!ASan.isInterestingAlloca(AI) || isa<ScalableVectorType>(AllocaType) ||
1186 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1187 if (AI.isStaticAlloca()) {
1188 // Skip over allocas that are present *before* the first instrumented
1189 // alloca, we don't want to move those around.
1190 if (AllocaVec.empty())
1191 return;
1192
1193 StaticAllocasToMoveUp.push_back(&AI);
1194 }
1195 return;
1196 }
1197
1198 if (!AI.isStaticAlloca())
1199 DynamicAllocaVec.push_back(&AI);
1200 else
1201 AllocaVec.push_back(&AI);
1202 }
1203
1204 /// Collect lifetime intrinsic calls to check for use-after-scope
1205 /// errors.
1207 Intrinsic::ID ID = II.getIntrinsicID();
1208 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
1209 if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
1210 if (!ASan.UseAfterScope)
1211 return;
1212 if (!II.isLifetimeStartOrEnd())
1213 return;
1214 // Find alloca instruction that corresponds to llvm.lifetime argument.
1215 AllocaInst *AI = dyn_cast<AllocaInst>(II.getArgOperand(0));
1216 // We're interested only in allocas we can handle.
1217 if (!AI || !ASan.isInterestingAlloca(*AI))
1218 return;
1219
1220 std::optional<TypeSize> Size = AI->getAllocationSize(AI->getDataLayout());
1221 // Check that size is known and can be stored in IntptrTy.
1222 // TODO: Add support for scalable vectors if possible.
1223 if (!Size || Size->isScalable() ||
1225 return;
1226
1227 bool DoPoison = (ID == Intrinsic::lifetime_end);
1228 AllocaPoisonCall APC = {&II, AI, *Size, DoPoison};
1229 if (AI->isStaticAlloca())
1230 StaticAllocaPoisonCallVec.push_back(APC);
1232 DynamicAllocaPoisonCallVec.push_back(APC);
1233 }
1234
1235 void visitCallBase(CallBase &CB) {
1236 if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
1237 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1238 HasReturnsTwiceCall |= CI->canReturnTwice();
1239 }
1240 }
1241
1242 // ---------------------- Helpers.
1243 void initializeCallbacks(Module &M);
1244
1245 // Copies bytes from ShadowBytes into shadow memory for indexes where
1246 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
1247 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
1248 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1249 IRBuilder<> &IRB, Value *ShadowBase);
1250 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1251 size_t Begin, size_t End, IRBuilder<> &IRB,
1252 Value *ShadowBase);
1253 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1254 ArrayRef<uint8_t> ShadowBytes, size_t Begin,
1255 size_t End, IRBuilder<> &IRB, Value *ShadowBase);
1256
1257 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
1258
1259 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
1260 bool Dynamic);
1261 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
1262 Instruction *ThenTerm, Value *ValueIfFalse);
1263};
1264
1265} // end anonymous namespace
1266
1268 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1270 OS, MapClassName2PassName);
1271 OS << '<';
1272 if (Options.CompileKernel)
1273 OS << "kernel;";
1274 if (Options.UseAfterScope)
1275 OS << "use-after-scope";
1276 OS << '>';
1277}
1278
1280 const AddressSanitizerOptions &Options, bool UseGlobalGC,
1281 bool UseOdrIndicator, AsanDtorKind DestructorKind,
1282 AsanCtorKind ConstructorKind)
1283 : Options(Options), UseGlobalGC(UseGlobalGC),
1284 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1285 ConstructorKind(ConstructorKind) {}
1286
1289 // Return early if nosanitize_address module flag is present for the module.
1290 // This implies that asan pass has already run before.
1291 if (checkIfAlreadyInstrumented(M, "nosanitize_address"))
1292 return PreservedAnalyses::all();
1293
1294 ModuleAddressSanitizer ModuleSanitizer(
1295 M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
1296 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1297 bool Modified = false;
1298 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1299 const StackSafetyGlobalInfo *const SSGI =
1301 for (Function &F : M) {
1302 if (F.empty())
1303 continue;
1304 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage)
1305 continue;
1306 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName())
1307 continue;
1308 if (F.getName().starts_with("__asan_"))
1309 continue;
1310 if (F.isPresplitCoroutine())
1311 continue;
1312 AddressSanitizer FunctionSanitizer(
1313 M, SSGI, Options.InstrumentationWithCallsThreshold,
1314 Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
1315 Options.UseAfterScope, Options.UseAfterReturn);
1317 Modified |= FunctionSanitizer.instrumentFunction(F, &TLI);
1318 }
1319 Modified |= ModuleSanitizer.instrumentModule();
1320 if (!Modified)
1321 return PreservedAnalyses::all();
1322
1324 // GlobalsAA is considered stateless and does not get invalidated unless
1325 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
1326 // make changes that require GlobalsAA to be invalidated.
1327 PA.abandon<GlobalsAA>();
1328 return PA;
1329}
1330
1332 size_t Res = llvm::countr_zero(TypeSize / 8);
1334 return Res;
1335}
1336
1337/// Check if \p G has been created by a trusted compiler pass.
1339 // Do not instrument @llvm.global_ctors, @llvm.used, etc.
1340 if (G->getName().starts_with("llvm.") ||
1341 // Do not instrument gcov counter arrays.
1342 G->getName().starts_with("__llvm_gcov_ctr") ||
1343 // Do not instrument rtti proxy symbols for function sanitizer.
1344 G->getName().starts_with("__llvm_rtti_proxy"))
1345 return true;
1346
1347 // Do not instrument asan globals.
1348 if (G->getName().starts_with(kAsanGenPrefix) ||
1349 G->getName().starts_with(kSanCovGenPrefix) ||
1350 G->getName().starts_with(kODRGenPrefix))
1351 return true;
1352
1353 return false;
1354}
1355
1357 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1358 unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1359 if (AddrSpace == 3 || AddrSpace == 5)
1360 return true;
1361 return false;
1362}
1363
1364Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1365 // Shadow >> scale
1366 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1367 if (Mapping.Offset == 0) return Shadow;
1368 // (Shadow >> scale) | offset
1369 Value *ShadowBase;
1370 if (LocalDynamicShadow)
1371 ShadowBase = LocalDynamicShadow;
1372 else
1373 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1374 if (Mapping.OrShadowOffset)
1375 return IRB.CreateOr(Shadow, ShadowBase);
1376 else
1377 return IRB.CreateAdd(Shadow, ShadowBase);
1378}
1379
1380// Instrument memset/memmove/memcpy
1381void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI,
1382 RuntimeCallInserter &RTCI) {
1384 if (isa<MemTransferInst>(MI)) {
1385 RTCI.createRuntimeCall(
1386 IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
1387 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1388 IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
1389 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1390 } else if (isa<MemSetInst>(MI)) {
1391 RTCI.createRuntimeCall(
1392 IRB, AsanMemset,
1393 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1394 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1395 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1396 }
1397 MI->eraseFromParent();
1398}
1399
1400/// Check if we want (and can) handle this alloca.
1401bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1402 auto [It, Inserted] = ProcessedAllocas.try_emplace(&AI);
1403
1404 if (!Inserted)
1405 return It->getSecond();
1406
1407 bool IsInteresting =
1408 (AI.getAllocatedType()->isSized() &&
1409 // alloca() may be called with 0 size, ignore it.
1410 ((!AI.isStaticAlloca()) || !getAllocaSizeInBytes(AI).isZero()) &&
1411 // We are only interested in allocas not promotable to registers.
1412 // Promotable allocas are common under -O0.
1414 // inalloca allocas are not treated as static, and we don't want
1415 // dynamic alloca instrumentation for them as well.
1416 !AI.isUsedWithInAlloca() &&
1417 // swifterror allocas are register promoted by ISel
1418 !AI.isSwiftError() &&
1419 // safe allocas are not interesting
1420 !(SSGI && SSGI->isSafe(AI)));
1421
1422 It->second = IsInteresting;
1423 return IsInteresting;
1424}
1425
1426bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
1427 // Instrument accesses from different address spaces only for AMDGPU.
1428 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1429 if (PtrTy->getPointerAddressSpace() != 0 &&
1430 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr)))
1431 return true;
1432
1433 // Ignore swifterror addresses.
1434 // swifterror memory addresses are mem2reg promoted by instruction
1435 // selection. As such they cannot have regular uses like an instrumentation
1436 // function and it makes no sense to track them as memory.
1437 if (Ptr->isSwiftError())
1438 return true;
1439
1440 // Treat memory accesses to promotable allocas as non-interesting since they
1441 // will not cause memory violations. This greatly speeds up the instrumented
1442 // executable at -O0.
1443 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr))
1444 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI))
1445 return true;
1446
1447 if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) &&
1449 return true;
1450
1451 return false;
1452}
1453
1454void AddressSanitizer::getInterestingMemoryOperands(
1456 // Do not instrument the load fetching the dynamic shadow address.
1457 if (LocalDynamicShadow == I)
1458 return;
1459
1460 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1461 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
1462 return;
1463 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
1464 LI->getType(), LI->getAlign());
1465 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1466 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
1467 return;
1468 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
1469 SI->getValueOperand()->getType(), SI->getAlign());
1470 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1471 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
1472 return;
1473 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
1474 RMW->getValOperand()->getType(), std::nullopt);
1475 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1476 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
1477 return;
1478 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
1479 XCHG->getCompareOperand()->getType(),
1480 std::nullopt);
1481 } else if (auto CI = dyn_cast<CallInst>(I)) {
1482 switch (CI->getIntrinsicID()) {
1483 case Intrinsic::masked_load:
1484 case Intrinsic::masked_store:
1485 case Intrinsic::masked_gather:
1486 case Intrinsic::masked_scatter: {
1487 bool IsWrite = CI->getType()->isVoidTy();
1488 // Masked store has an initial operand for the value.
1489 unsigned OpOffset = IsWrite ? 1 : 0;
1490 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1491 return;
1492
1493 auto BasePtr = CI->getOperand(OpOffset);
1494 if (ignoreAccess(I, BasePtr))
1495 return;
1496 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1497 MaybeAlign Alignment = Align(1);
1498 // Otherwise no alignment guarantees. We probably got Undef.
1499 if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
1500 Alignment = Op->getMaybeAlignValue();
1501 Value *Mask = CI->getOperand(2 + OpOffset);
1502 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
1503 break;
1504 }
1505 case Intrinsic::masked_expandload:
1506 case Intrinsic::masked_compressstore: {
1507 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1508 unsigned OpOffset = IsWrite ? 1 : 0;
1509 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1510 return;
1511 auto BasePtr = CI->getOperand(OpOffset);
1512 if (ignoreAccess(I, BasePtr))
1513 return;
1514 MaybeAlign Alignment = BasePtr->getPointerAlignment(*DL);
1515 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1516
1517 IRBuilder IB(I);
1518 Value *Mask = CI->getOperand(1 + OpOffset);
1519 // Use the popcount of Mask as the effective vector length.
1520 Type *ExtTy = VectorType::get(IntptrTy, cast<VectorType>(Ty));
1521 Value *ExtMask = IB.CreateZExt(Mask, ExtTy);
1522 Value *EVL = IB.CreateAddReduce(ExtMask);
1523 Value *TrueMask = ConstantInt::get(Mask->getType(), 1);
1524 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1525 EVL);
1526 break;
1527 }
1528 case Intrinsic::vp_load:
1529 case Intrinsic::vp_store:
1530 case Intrinsic::experimental_vp_strided_load:
1531 case Intrinsic::experimental_vp_strided_store: {
1532 auto *VPI = cast<VPIntrinsic>(CI);
1533 unsigned IID = CI->getIntrinsicID();
1534 bool IsWrite = CI->getType()->isVoidTy();
1535 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1536 return;
1537 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1538 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1539 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*DL);
1540 Value *Stride = nullptr;
1541 if (IID == Intrinsic::experimental_vp_strided_store ||
1542 IID == Intrinsic::experimental_vp_strided_load) {
1543 Stride = VPI->getOperand(PtrOpNo + 1);
1544 // Use the pointer alignment as the element alignment if the stride is a
1545 // mutiple of the pointer alignment. Otherwise, the element alignment
1546 // should be Align(1).
1547 unsigned PointerAlign = Alignment.valueOrOne().value();
1548 if (!isa<ConstantInt>(Stride) ||
1549 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
1550 Alignment = Align(1);
1551 }
1552 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1553 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1554 Stride);
1555 break;
1556 }
1557 case Intrinsic::vp_gather:
1558 case Intrinsic::vp_scatter: {
1559 auto *VPI = cast<VPIntrinsic>(CI);
1560 unsigned IID = CI->getIntrinsicID();
1561 bool IsWrite = IID == Intrinsic::vp_scatter;
1562 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1563 return;
1564 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1565 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1566 MaybeAlign Alignment = VPI->getPointerAlignment();
1567 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1568 VPI->getMaskParam(),
1569 VPI->getVectorLengthParam());
1570 break;
1571 }
1572 default:
1573 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1574 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
1575 ignoreAccess(I, CI->getArgOperand(ArgNo)))
1576 continue;
1577 Type *Ty = CI->getParamByValType(ArgNo);
1578 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
1579 }
1580 }
1581 }
1582}
1583
1584static bool isPointerOperand(Value *V) {
1585 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1586}
1587
1588// This is a rough heuristic; it may cause both false positives and
1589// false negatives. The proper implementation requires cooperation with
1590// the frontend.
1592 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1593 if (!Cmp->isRelational())
1594 return false;
1595 } else {
1596 return false;
1597 }
1598 return isPointerOperand(I->getOperand(0)) &&
1599 isPointerOperand(I->getOperand(1));
1600}
1601
1602// This is a rough heuristic; it may cause both false positives and
1603// false negatives. The proper implementation requires cooperation with
1604// the frontend.
1606 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
1607 if (BO->getOpcode() != Instruction::Sub)
1608 return false;
1609 } else {
1610 return false;
1611 }
1612 return isPointerOperand(I->getOperand(0)) &&
1613 isPointerOperand(I->getOperand(1));
1614}
1615
1616bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1617 // If a global variable does not have dynamic initialization we don't
1618 // have to instrument it. However, if a global does not have initializer
1619 // at all, we assume it has dynamic initializer (in other TU).
1620 if (!G->hasInitializer())
1621 return false;
1622
1623 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().IsDynInit)
1624 return false;
1625
1626 return true;
1627}
1628
1629void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1630 Instruction *I, RuntimeCallInserter &RTCI) {
1631 IRBuilder<> IRB(I);
1632 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1633 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1634 for (Value *&i : Param) {
1635 if (i->getType()->isPointerTy())
1636 i = IRB.CreatePointerCast(i, IntptrTy);
1637 }
1638 RTCI.createRuntimeCall(IRB, F, Param);
1639}
1640
1641static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1642 Instruction *InsertBefore, Value *Addr,
1643 MaybeAlign Alignment, unsigned Granularity,
1644 TypeSize TypeStoreSize, bool IsWrite,
1645 Value *SizeArgument, bool UseCalls,
1646 uint32_t Exp, RuntimeCallInserter &RTCI) {
1647 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1648 // if the data is properly aligned.
1649 if (!TypeStoreSize.isScalable()) {
1650 const auto FixedSize = TypeStoreSize.getFixedValue();
1651 switch (FixedSize) {
1652 case 8:
1653 case 16:
1654 case 32:
1655 case 64:
1656 case 128:
1657 if (!Alignment || *Alignment >= Granularity ||
1658 *Alignment >= FixedSize / 8)
1659 return Pass->instrumentAddress(I, InsertBefore, Addr, Alignment,
1660 FixedSize, IsWrite, nullptr, UseCalls,
1661 Exp, RTCI);
1662 }
1663 }
1664 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize,
1665 IsWrite, nullptr, UseCalls, Exp, RTCI);
1666}
1667
1668void AddressSanitizer::instrumentMaskedLoadOrStore(
1669 AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask,
1670 Value *EVL, Value *Stride, Instruction *I, Value *Addr,
1671 MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite,
1672 Value *SizeArgument, bool UseCalls, uint32_t Exp,
1673 RuntimeCallInserter &RTCI) {
1674 auto *VTy = cast<VectorType>(OpType);
1675 TypeSize ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1676 auto Zero = ConstantInt::get(IntptrTy, 0);
1677
1678 IRBuilder IB(I);
1679 Instruction *LoopInsertBefore = I;
1680 if (EVL) {
1681 // The end argument of SplitBlockAndInsertForLane is assumed bigger
1682 // than zero, so we should check whether EVL is zero here.
1683 Type *EVLType = EVL->getType();
1684 Value *IsEVLZero = IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1685 LoopInsertBefore = SplitBlockAndInsertIfThen(IsEVLZero, I, false);
1686 IB.SetInsertPoint(LoopInsertBefore);
1687 // Cast EVL to IntptrTy.
1688 EVL = IB.CreateZExtOrTrunc(EVL, IntptrTy);
1689 // To avoid undefined behavior for extracting with out of range index, use
1690 // the minimum of evl and element count as trip count.
1691 Value *EC = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1692 EVL = IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1693 } else {
1694 EVL = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1695 }
1696
1697 // Cast Stride to IntptrTy.
1698 if (Stride)
1699 Stride = IB.CreateZExtOrTrunc(Stride, IntptrTy);
1700
1701 SplitBlockAndInsertForEachLane(EVL, LoopInsertBefore->getIterator(),
1702 [&](IRBuilderBase &IRB, Value *Index) {
1703 Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1704 if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1705 if (MaskElemC->isZero())
1706 // No check
1707 return;
1708 // Unconditional check
1709 } else {
1710 // Conditional check
1711 Instruction *ThenTerm = SplitBlockAndInsertIfThen(
1712 MaskElem, &*IRB.GetInsertPoint(), false);
1713 IRB.SetInsertPoint(ThenTerm);
1714 }
1715
1716 Value *InstrumentedAddress;
1717 if (isa<VectorType>(Addr->getType())) {
1718 assert(
1719 cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1720 "Expected vector of pointer.");
1721 InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1722 } else if (Stride) {
1723 Index = IRB.CreateMul(Index, Stride);
1724 InstrumentedAddress = IRB.CreatePtrAdd(Addr, Index);
1725 } else {
1726 InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index});
1727 }
1728 doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(), InstrumentedAddress,
1729 Alignment, Granularity, ElemTypeSize, IsWrite,
1730 SizeArgument, UseCalls, Exp, RTCI);
1731 });
1732}
1733
1734void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1735 InterestingMemoryOperand &O, bool UseCalls,
1736 const DataLayout &DL,
1737 RuntimeCallInserter &RTCI) {
1738 Value *Addr = O.getPtr();
1739
1740 // Optimization experiments.
1741 // The experiments can be used to evaluate potential optimizations that remove
1742 // instrumentation (assess false negatives). Instead of completely removing
1743 // some instrumentation, you set Exp to a non-zero value (mask of optimization
1744 // experiments that want to remove instrumentation of this instruction).
1745 // If Exp is non-zero, this pass will emit special calls into runtime
1746 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1747 // make runtime terminate the program in a special way (with a different
1748 // exit status). Then you run the new compiler on a buggy corpus, collect
1749 // the special terminations (ideally, you don't see them at all -- no false
1750 // negatives) and make the decision on the optimization.
1752
1753 if (ClOpt && ClOptGlobals) {
1754 // If initialization order checking is disabled, a simple access to a
1755 // dynamically initialized global is always valid.
1756 GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr));
1757 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
1758 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1759 NumOptimizedAccessesToGlobalVar++;
1760 return;
1761 }
1762 }
1763
1764 if (ClOpt && ClOptStack) {
1765 // A direct inbounds access to a stack variable is always valid.
1766 if (isa<AllocaInst>(getUnderlyingObject(Addr)) &&
1767 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1768 NumOptimizedAccessesToStackVar++;
1769 return;
1770 }
1771 }
1772
1773 if (O.IsWrite)
1774 NumInstrumentedWrites++;
1775 else
1776 NumInstrumentedReads++;
1777
1778 unsigned Granularity = 1 << Mapping.Scale;
1779 if (O.MaybeMask) {
1780 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
1781 O.MaybeStride, O.getInsn(), Addr, O.Alignment,
1782 Granularity, O.OpType, O.IsWrite, nullptr,
1783 UseCalls, Exp, RTCI);
1784 } else {
1785 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
1786 Granularity, O.TypeStoreSize, O.IsWrite, nullptr,
1787 UseCalls, Exp, RTCI);
1788 }
1789}
1790
1791Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1792 Value *Addr, bool IsWrite,
1793 size_t AccessSizeIndex,
1794 Value *SizeArgument,
1795 uint32_t Exp,
1796 RuntimeCallInserter &RTCI) {
1797 InstrumentationIRBuilder IRB(InsertBefore);
1798 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
1799 CallInst *Call = nullptr;
1800 if (SizeArgument) {
1801 if (Exp == 0)
1802 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1803 {Addr, SizeArgument});
1804 else
1805 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1806 {Addr, SizeArgument, ExpVal});
1807 } else {
1808 if (Exp == 0)
1809 Call = RTCI.createRuntimeCall(
1810 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1811 else
1812 Call = RTCI.createRuntimeCall(
1813 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
1814 }
1815
1816 Call->setCannotMerge();
1817 return Call;
1818}
1819
1820Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1821 Value *ShadowValue,
1822 uint32_t TypeStoreSize) {
1823 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1824 // Addr & (Granularity - 1)
1825 Value *LastAccessedByte =
1826 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1827 // (Addr & (Granularity - 1)) + size - 1
1828 if (TypeStoreSize / 8 > 1)
1829 LastAccessedByte = IRB.CreateAdd(
1830 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1831 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1832 LastAccessedByte =
1833 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1834 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1835 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1836}
1837
1838Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1839 Instruction *OrigIns, Instruction *InsertBefore, Value *Addr,
1840 uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument) {
1841 // Do not instrument unsupported addrspaces.
1843 return nullptr;
1844 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1845 // Follow host instrumentation for global and constant addresses.
1846 if (PtrTy->getPointerAddressSpace() != 0)
1847 return InsertBefore;
1848 // Instrument generic addresses in supported addressspaces.
1849 IRBuilder<> IRB(InsertBefore);
1850 Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {Addr});
1851 Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {Addr});
1852 Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate);
1853 Value *Cmp = IRB.CreateNot(IsSharedOrPrivate);
1854 Value *AddrSpaceZeroLanding =
1855 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
1856 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1857 return InsertBefore;
1858}
1859
1860Instruction *AddressSanitizer::genAMDGPUReportBlock(IRBuilder<> &IRB,
1861 Value *Cond, bool Recover) {
1862 Module &M = *IRB.GetInsertBlock()->getModule();
1863 Value *ReportCond = Cond;
1864 if (!Recover) {
1865 auto Ballot = M.getOrInsertFunction(kAMDGPUBallotName, IRB.getInt64Ty(),
1866 IRB.getInt1Ty());
1867 ReportCond = IRB.CreateIsNotNull(IRB.CreateCall(Ballot, {Cond}));
1868 }
1869
1870 auto *Trm =
1871 SplitBlockAndInsertIfThen(ReportCond, &*IRB.GetInsertPoint(), false,
1873 Trm->getParent()->setName("asan.report");
1874
1875 if (Recover)
1876 return Trm;
1877
1878 Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
1879 IRB.SetInsertPoint(Trm);
1880 return IRB.CreateCall(
1881 M.getOrInsertFunction(kAMDGPUUnreachableName, IRB.getVoidTy()), {});
1882}
1883
1884void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1885 Instruction *InsertBefore, Value *Addr,
1886 MaybeAlign Alignment,
1887 uint32_t TypeStoreSize, bool IsWrite,
1888 Value *SizeArgument, bool UseCalls,
1889 uint32_t Exp,
1890 RuntimeCallInserter &RTCI) {
1891 if (TargetTriple.isAMDGPU()) {
1892 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1893 TypeStoreSize, IsWrite, SizeArgument);
1894 if (!InsertBefore)
1895 return;
1896 }
1897
1898 InstrumentationIRBuilder IRB(InsertBefore);
1899 size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
1900
1901 if (UseCalls && ClOptimizeCallbacks) {
1902 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1903 IRB.CreateIntrinsic(Intrinsic::asan_check_memaccess, {},
1904 {IRB.CreatePointerCast(Addr, PtrTy),
1905 ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1906 return;
1907 }
1908
1909 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1910 if (UseCalls) {
1911 if (Exp == 0)
1912 RTCI.createRuntimeCall(
1913 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1914 else
1915 RTCI.createRuntimeCall(
1916 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1917 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1918 return;
1919 }
1920
1921 Type *ShadowTy =
1922 IntegerType::get(*C, std::max(8U, TypeStoreSize >> Mapping.Scale));
1923 Type *ShadowPtrTy = PointerType::get(*C, 0);
1924 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1925 const uint64_t ShadowAlign =
1926 std::max<uint64_t>(Alignment.valueOrOne().value() >> Mapping.Scale, 1);
1927 Value *ShadowValue = IRB.CreateAlignedLoad(
1928 ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
1929
1930 Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
1931 size_t Granularity = 1ULL << Mapping.Scale;
1932 Instruction *CrashTerm = nullptr;
1933
1934 bool GenSlowPath = (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1935
1936 if (TargetTriple.isAMDGCN()) {
1937 if (GenSlowPath) {
1938 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1939 Cmp = IRB.CreateAnd(Cmp, Cmp2);
1940 }
1941 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1942 } else if (GenSlowPath) {
1943 // We use branch weights for the slow path check, to indicate that the slow
1944 // path is rarely taken. This seems to be the case for SPEC benchmarks.
1946 Cmp, InsertBefore, false, MDBuilder(*C).createUnlikelyBranchWeights());
1947 assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1948 BasicBlock *NextBB = CheckTerm->getSuccessor(0);
1949 IRB.SetInsertPoint(CheckTerm);
1950 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1951 if (Recover) {
1952 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
1953 } else {
1954 BasicBlock *CrashBlock =
1955 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
1956 CrashTerm = new UnreachableInst(*C, CrashBlock);
1957 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
1958 ReplaceInstWithInst(CheckTerm, NewTerm);
1959 }
1960 } else {
1961 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
1962 }
1963
1964 Instruction *Crash = generateCrashCode(
1965 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
1966 if (OrigIns->getDebugLoc())
1967 Crash->setDebugLoc(OrigIns->getDebugLoc());
1968}
1969
1970// Instrument unusual size or unusual alignment.
1971// We can not do it with a single check, so we do 1-byte check for the first
1972// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
1973// to report the actual access size.
1974void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1975 Instruction *I, Instruction *InsertBefore, Value *Addr,
1976 TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls,
1977 uint32_t Exp, RuntimeCallInserter &RTCI) {
1978 InstrumentationIRBuilder IRB(InsertBefore);
1979 Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
1980 Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
1981
1982 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1983 if (UseCalls) {
1984 if (Exp == 0)
1985 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
1986 {AddrLong, Size});
1987 else
1988 RTCI.createRuntimeCall(
1989 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
1990 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1991 } else {
1992 Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
1993 Value *LastByte = IRB.CreateIntToPtr(
1994 IRB.CreateAdd(AddrLong, SizeMinusOne),
1995 Addr->getType());
1996 instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp,
1997 RTCI);
1998 instrumentAddress(I, InsertBefore, LastByte, {}, 8, IsWrite, Size, false,
1999 Exp, RTCI);
2000 }
2001}
2002
2003void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit) {
2004 // Set up the arguments to our poison/unpoison functions.
2005 IRBuilder<> IRB(&GlobalInit.front(),
2006 GlobalInit.front().getFirstInsertionPt());
2007
2008 // Add a call to poison all external globals before the given function starts.
2009 Value *ModuleNameAddr =
2010 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy);
2011 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
2012
2013 // Add calls to unpoison all globals before each return instruction.
2014 for (auto &BB : GlobalInit)
2015 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
2016 CallInst::Create(AsanUnpoisonGlobals, "", RI->getIterator());
2017}
2018
2019void ModuleAddressSanitizer::createInitializerPoisonCalls() {
2020 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
2021 if (!GV)
2022 return;
2023
2024 ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer());
2025 if (!CA)
2026 return;
2027
2028 for (Use &OP : CA->operands()) {
2029 if (isa<ConstantAggregateZero>(OP)) continue;
2030 ConstantStruct *CS = cast<ConstantStruct>(OP);
2031
2032 // Must have a function or null ptr.
2033 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
2034 if (F->getName() == kAsanModuleCtorName) continue;
2035 auto *Priority = cast<ConstantInt>(CS->getOperand(0));
2036 // Don't instrument CTORs that will run before asan.module_ctor.
2037 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple))
2038 continue;
2039 poisonOneInitializer(*F);
2040 }
2041 }
2042}
2043
2044const GlobalVariable *
2045ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
2046 // In case this function should be expanded to include rules that do not just
2047 // apply when CompileKernel is true, either guard all existing rules with an
2048 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules
2049 // should also apply to user space.
2050 assert(CompileKernel && "Only expecting to be called when compiling kernel");
2051
2052 const Constant *C = GA.getAliasee();
2053
2054 // When compiling the kernel, globals that are aliased by symbols prefixed
2055 // by "__" are special and cannot be padded with a redzone.
2056 if (GA.getName().starts_with("__"))
2057 return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
2058
2059 return nullptr;
2060}
2061
2062bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
2063 Type *Ty = G->getValueType();
2064 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
2065
2066 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().NoAddress)
2067 return false;
2068 if (!Ty->isSized()) return false;
2069 if (!G->hasInitializer()) return false;
2070 // Globals in address space 1 and 4 are supported for AMDGPU.
2071 if (G->getAddressSpace() &&
2072 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G)))
2073 return false;
2074 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
2075 // Two problems with thread-locals:
2076 // - The address of the main thread's copy can't be computed at link-time.
2077 // - Need to poison all copies, not just the main thread's one.
2078 if (G->isThreadLocal()) return false;
2079 // For now, just ignore this Global if the alignment is large.
2080 if (G->getAlign() && *G->getAlign() > getMinRedzoneSizeForGlobal()) return false;
2081
2082 // For non-COFF targets, only instrument globals known to be defined by this
2083 // TU.
2084 // FIXME: We can instrument comdat globals on ELF if we are using the
2085 // GC-friendly metadata scheme.
2086 if (!TargetTriple.isOSBinFormatCOFF()) {
2087 if (!G->hasExactDefinition() || G->hasComdat())
2088 return false;
2089 } else {
2090 // On COFF, don't instrument non-ODR linkages.
2091 if (G->isInterposable())
2092 return false;
2093 // If the global has AvailableExternally linkage, then it is not in this
2094 // module, which means it does not need to be instrumented.
2095 if (G->hasAvailableExternallyLinkage())
2096 return false;
2097 }
2098
2099 // If a comdat is present, it must have a selection kind that implies ODR
2100 // semantics: no duplicates, any, or exact match.
2101 if (Comdat *C = G->getComdat()) {
2102 switch (C->getSelectionKind()) {
2103 case Comdat::Any:
2104 case Comdat::ExactMatch:
2106 break;
2107 case Comdat::Largest:
2108 case Comdat::SameSize:
2109 return false;
2110 }
2111 }
2112
2113 if (G->hasSection()) {
2114 // The kernel uses explicit sections for mostly special global variables
2115 // that we should not instrument. E.g. the kernel may rely on their layout
2116 // without redzones, or remove them at link time ("discard.*"), etc.
2117 if (CompileKernel)
2118 return false;
2119
2120 StringRef Section = G->getSection();
2121
2122 // Globals from llvm.metadata aren't emitted, do not instrument them.
2123 if (Section == "llvm.metadata") return false;
2124 // Do not instrument globals from special LLVM sections.
2125 if (Section.contains("__llvm") || Section.contains("__LLVM"))
2126 return false;
2127
2128 // Do not instrument function pointers to initialization and termination
2129 // routines: dynamic linker will not properly handle redzones.
2130 if (Section.starts_with(".preinit_array") ||
2131 Section.starts_with(".init_array") ||
2132 Section.starts_with(".fini_array")) {
2133 return false;
2134 }
2135
2136 // Do not instrument user-defined sections (with names resembling
2137 // valid C identifiers)
2138 if (TargetTriple.isOSBinFormatELF()) {
2139 if (llvm::all_of(Section,
2140 [](char c) { return llvm::isAlnum(c) || c == '_'; }))
2141 return false;
2142 }
2143
2144 // On COFF, if the section name contains '$', it is highly likely that the
2145 // user is using section sorting to create an array of globals similar to
2146 // the way initialization callbacks are registered in .init_array and
2147 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
2148 // to such globals is counterproductive, because the intent is that they
2149 // will form an array, and out-of-bounds accesses are expected.
2150 // See https://github.com/google/sanitizers/issues/305
2151 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
2152 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) {
2153 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
2154 << *G << "\n");
2155 return false;
2156 }
2157
2158 if (TargetTriple.isOSBinFormatMachO()) {
2159 StringRef ParsedSegment, ParsedSection;
2160 unsigned TAA = 0, StubSize = 0;
2161 bool TAAParsed;
2163 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2164
2165 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
2166 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
2167 // them.
2168 if (ParsedSegment == "__OBJC" ||
2169 (ParsedSegment == "__DATA" && ParsedSection.starts_with("__objc_"))) {
2170 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
2171 return false;
2172 }
2173 // See https://github.com/google/sanitizers/issues/32
2174 // Constant CFString instances are compiled in the following way:
2175 // -- the string buffer is emitted into
2176 // __TEXT,__cstring,cstring_literals
2177 // -- the constant NSConstantString structure referencing that buffer
2178 // is placed into __DATA,__cfstring
2179 // Therefore there's no point in placing redzones into __DATA,__cfstring.
2180 // Moreover, it causes the linker to crash on OS X 10.7
2181 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
2182 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
2183 return false;
2184 }
2185 // The linker merges the contents of cstring_literals and removes the
2186 // trailing zeroes.
2187 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
2188 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
2189 return false;
2190 }
2191 }
2192 }
2193
2194 if (CompileKernel) {
2195 // Globals that prefixed by "__" are special and cannot be padded with a
2196 // redzone.
2197 if (G->getName().starts_with("__"))
2198 return false;
2199 }
2200
2201 return true;
2202}
2203
2204// On Mach-O platforms, we emit global metadata in a separate section of the
2205// binary in order to allow the linker to properly dead strip. This is only
2206// supported on recent versions of ld64.
2207bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
2208 if (!TargetTriple.isOSBinFormatMachO())
2209 return false;
2210
2211 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2212 return true;
2213 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
2214 return true;
2215 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2216 return true;
2217 if (TargetTriple.isDriverKit())
2218 return true;
2219 if (TargetTriple.isXROS())
2220 return true;
2221
2222 return false;
2223}
2224
2225StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
2226 switch (TargetTriple.getObjectFormat()) {
2227 case Triple::COFF: return ".ASAN$GL";
2228 case Triple::ELF: return "asan_globals";
2229 case Triple::MachO: return "__DATA,__asan_globals,regular";
2230 case Triple::Wasm:
2231 case Triple::GOFF:
2232 case Triple::SPIRV:
2233 case Triple::XCOFF:
2236 "ModuleAddressSanitizer not implemented for object file format");
2238 break;
2239 }
2240 llvm_unreachable("unsupported object format");
2241}
2242
2243void ModuleAddressSanitizer::initializeCallbacks() {
2244 IRBuilder<> IRB(*C);
2245
2246 // Declare our poisoning and unpoisoning functions.
2247 AsanPoisonGlobals =
2248 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
2249 AsanUnpoisonGlobals =
2250 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
2251
2252 // Declare functions that register/unregister globals.
2253 AsanRegisterGlobals = M.getOrInsertFunction(
2254 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2255 AsanUnregisterGlobals = M.getOrInsertFunction(
2256 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2257
2258 // Declare the functions that find globals in a shared object and then invoke
2259 // the (un)register function on them.
2260 AsanRegisterImageGlobals = M.getOrInsertFunction(
2261 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2262 AsanUnregisterImageGlobals = M.getOrInsertFunction(
2264
2265 AsanRegisterElfGlobals =
2266 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
2267 IntptrTy, IntptrTy, IntptrTy);
2268 AsanUnregisterElfGlobals =
2269 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
2270 IntptrTy, IntptrTy, IntptrTy);
2271}
2272
2273// Put the metadata and the instrumented global in the same group. This ensures
2274// that the metadata is discarded if the instrumented global is discarded.
2275void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2276 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
2277 Module &M = *G->getParent();
2278 Comdat *C = G->getComdat();
2279 if (!C) {
2280 if (!G->hasName()) {
2281 // If G is unnamed, it must be internal. Give it an artificial name
2282 // so we can put it in a comdat.
2283 assert(G->hasLocalLinkage());
2284 G->setName(genName("anon_global"));
2285 }
2286
2287 if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
2288 std::string Name = std::string(G->getName());
2289 Name += InternalSuffix;
2290 C = M.getOrInsertComdat(Name);
2291 } else {
2292 C = M.getOrInsertComdat(G->getName());
2293 }
2294
2295 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
2296 // linkage to internal linkage so that a symbol table entry is emitted. This
2297 // is necessary in order to create the comdat group.
2298 if (TargetTriple.isOSBinFormatCOFF()) {
2299 C->setSelectionKind(Comdat::NoDeduplicate);
2300 if (G->hasPrivateLinkage())
2301 G->setLinkage(GlobalValue::InternalLinkage);
2302 }
2303 G->setComdat(C);
2304 }
2305
2306 assert(G->hasComdat());
2307 Metadata->setComdat(G->getComdat());
2308}
2309
2310// Create a separate metadata global and put it in the appropriate ASan
2311// global registration section.
2313ModuleAddressSanitizer::CreateMetadataGlobal(Constant *Initializer,
2314 StringRef OriginalName) {
2315 auto Linkage = TargetTriple.isOSBinFormatMachO()
2319 M, Initializer->getType(), false, Linkage, Initializer,
2320 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
2321 Metadata->setSection(getGlobalMetadataSection());
2322 // Place metadata in a large section for x86-64 ELF binaries to mitigate
2323 // relocation pressure.
2325 return Metadata;
2326}
2327
2328Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor() {
2329 AsanDtorFunction = Function::createWithDefaultAttr(
2332 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2333 // Ensure Dtor cannot be discarded, even if in a comdat.
2334 appendToUsed(M, {AsanDtorFunction});
2335 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
2336
2337 return ReturnInst::Create(*C, AsanDtorBB);
2338}
2339
2340void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2341 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2342 ArrayRef<Constant *> MetadataInitializers) {
2343 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2344 auto &DL = M.getDataLayout();
2345
2346 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2347 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2348 Constant *Initializer = MetadataInitializers[i];
2349 GlobalVariable *G = ExtendedGlobals[i];
2350 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2351 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2352 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2353 MetadataGlobals[i] = Metadata;
2354
2355 // The MSVC linker always inserts padding when linking incrementally. We
2356 // cope with that by aligning each struct to its size, which must be a power
2357 // of two.
2358 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
2359 assert(isPowerOf2_32(SizeOfGlobalStruct) &&
2360 "global metadata will not be padded appropriately");
2361 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct));
2362
2363 SetComdatForGlobalMetadata(G, Metadata, "");
2364 }
2365
2366 // Update llvm.compiler.used, adding the new metadata globals. This is
2367 // needed so that during LTO these variables stay alive.
2368 if (!MetadataGlobals.empty())
2369 appendToCompilerUsed(M, MetadataGlobals);
2370}
2371
2372void ModuleAddressSanitizer::instrumentGlobalsELF(
2373 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2374 ArrayRef<Constant *> MetadataInitializers,
2375 const std::string &UniqueModuleId) {
2376 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2377
2378 // Putting globals in a comdat changes the semantic and potentially cause
2379 // false negative odr violations at link time. If odr indicators are used, we
2380 // keep the comdat sections, as link time odr violations will be dectected on
2381 // the odr indicator symbols.
2382 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2383
2384 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2385 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2386 GlobalVariable *G = ExtendedGlobals[i];
2388 CreateMetadataGlobal(MetadataInitializers[i], G->getName());
2389 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2390 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2391 MetadataGlobals[i] = Metadata;
2392
2393 if (UseComdatForGlobalsGC)
2394 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
2395 }
2396
2397 // Update llvm.compiler.used, adding the new metadata globals. This is
2398 // needed so that during LTO these variables stay alive.
2399 if (!MetadataGlobals.empty())
2400 appendToCompilerUsed(M, MetadataGlobals);
2401
2402 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2403 // to look up the loaded image that contains it. Second, we can store in it
2404 // whether registration has already occurred, to prevent duplicate
2405 // registration.
2406 //
2407 // Common linkage ensures that there is only one global per shared library.
2408 GlobalVariable *RegisteredFlag = new GlobalVariable(
2409 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2410 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2412
2413 // Create start and stop symbols.
2414 GlobalVariable *StartELFMetadata = new GlobalVariable(
2415 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2416 "__start_" + getGlobalMetadataSection());
2418 GlobalVariable *StopELFMetadata = new GlobalVariable(
2419 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2420 "__stop_" + getGlobalMetadataSection());
2422
2423 // Create a call to register the globals with the runtime.
2424 if (ConstructorKind == AsanCtorKind::Global)
2425 IRB.CreateCall(AsanRegisterElfGlobals,
2426 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2427 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2428 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2429
2430 // We also need to unregister globals at the end, e.g., when a shared library
2431 // gets closed.
2432 if (DestructorKind != AsanDtorKind::None && !MetadataGlobals.empty()) {
2433 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2434 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2435 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2436 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2437 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2438 }
2439}
2440
2441void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2442 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2443 ArrayRef<Constant *> MetadataInitializers) {
2444 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2445
2446 // On recent Mach-O platforms, use a structure which binds the liveness of
2447 // the global variable to the metadata struct. Keep the list of "Liveness" GV
2448 // created to be added to llvm.compiler.used
2449 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
2450 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
2451
2452 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2453 Constant *Initializer = MetadataInitializers[i];
2454 GlobalVariable *G = ExtendedGlobals[i];
2455 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2456
2457 // On recent Mach-O platforms, we emit the global metadata in a way that
2458 // allows the linker to properly strip dead globals.
2459 auto LivenessBinder =
2460 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
2462 GlobalVariable *Liveness = new GlobalVariable(
2463 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
2464 Twine("__asan_binder_") + G->getName());
2465 Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
2466 LivenessGlobals[i] = Liveness;
2467 }
2468
2469 // Update llvm.compiler.used, adding the new liveness globals. This is
2470 // needed so that during LTO these variables stay alive. The alternative
2471 // would be to have the linker handling the LTO symbols, but libLTO
2472 // current API does not expose access to the section for each symbol.
2473 if (!LivenessGlobals.empty())
2474 appendToCompilerUsed(M, LivenessGlobals);
2475
2476 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2477 // to look up the loaded image that contains it. Second, we can store in it
2478 // whether registration has already occurred, to prevent duplicate
2479 // registration.
2480 //
2481 // common linkage ensures that there is only one global per shared library.
2482 GlobalVariable *RegisteredFlag = new GlobalVariable(
2483 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2484 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2486
2487 if (ConstructorKind == AsanCtorKind::Global)
2488 IRB.CreateCall(AsanRegisterImageGlobals,
2489 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2490
2491 // We also need to unregister globals at the end, e.g., when a shared library
2492 // gets closed.
2493 if (DestructorKind != AsanDtorKind::None) {
2494 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2495 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2496 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2497 }
2498}
2499
2500void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2501 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2502 ArrayRef<Constant *> MetadataInitializers) {
2503 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2504 unsigned N = ExtendedGlobals.size();
2505 assert(N > 0);
2506
2507 // On platforms that don't have a custom metadata section, we emit an array
2508 // of global metadata structures.
2509 ArrayType *ArrayOfGlobalStructTy =
2510 ArrayType::get(MetadataInitializers[0]->getType(), N);
2511 auto AllGlobals = new GlobalVariable(
2512 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
2513 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
2514 if (Mapping.Scale > 3)
2515 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale));
2516
2517 if (ConstructorKind == AsanCtorKind::Global)
2518 IRB.CreateCall(AsanRegisterGlobals,
2519 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2520 ConstantInt::get(IntptrTy, N)});
2521
2522 // We also need to unregister globals at the end, e.g., when a shared library
2523 // gets closed.
2524 if (DestructorKind != AsanDtorKind::None) {
2525 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2526 IrbDtor.CreateCall(AsanUnregisterGlobals,
2527 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2528 ConstantInt::get(IntptrTy, N)});
2529 }
2530}
2531
2532// This function replaces all global variables with new variables that have
2533// trailing redzones. It also creates a function that poisons
2534// redzones and inserts this function into llvm.global_ctors.
2535// Sets *CtorComdat to true if the global registration code emitted into the
2536// asan constructor is comdat-compatible.
2537void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB,
2538 bool *CtorComdat) {
2539 // Build set of globals that are aliased by some GA, where
2540 // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
2541 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
2542 if (CompileKernel) {
2543 for (auto &GA : M.aliases()) {
2544 if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA))
2545 AliasedGlobalExclusions.insert(GV);
2546 }
2547 }
2548
2549 SmallVector<GlobalVariable *, 16> GlobalsToChange;
2550 for (auto &G : M.globals()) {
2551 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G))
2552 GlobalsToChange.push_back(&G);
2553 }
2554
2555 size_t n = GlobalsToChange.size();
2556 auto &DL = M.getDataLayout();
2557
2558 // A global is described by a structure
2559 // size_t beg;
2560 // size_t size;
2561 // size_t size_with_redzone;
2562 // const char *name;
2563 // const char *module_name;
2564 // size_t has_dynamic_init;
2565 // size_t padding_for_windows_msvc_incremental_link;
2566 // size_t odr_indicator;
2567 // We initialize an array of such structures and pass it to a run-time call.
2568 StructType *GlobalStructTy =
2569 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
2570 IntptrTy, IntptrTy, IntptrTy);
2572 SmallVector<Constant *, 16> Initializers(n);
2573
2574 for (size_t i = 0; i < n; i++) {
2575 GlobalVariable *G = GlobalsToChange[i];
2576
2578 if (G->hasSanitizerMetadata())
2579 MD = G->getSanitizerMetadata();
2580
2581 // The runtime library tries demangling symbol names in the descriptor but
2582 // functionality like __cxa_demangle may be unavailable (e.g.
2583 // -static-libstdc++). So we demangle the symbol names here.
2584 std::string NameForGlobal = G->getName().str();
2587 /*AllowMerging*/ true, genName("global"));
2588
2589 Type *Ty = G->getValueType();
2590 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
2591 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes);
2592 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
2593
2594 StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
2595 Constant *NewInitializer = ConstantStruct::get(
2596 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
2597
2598 // Create a new global variable with enough space for a redzone.
2599 GlobalValue::LinkageTypes Linkage = G->getLinkage();
2600 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
2602 GlobalVariable *NewGlobal = new GlobalVariable(
2603 M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G,
2604 G->getThreadLocalMode(), G->getAddressSpace());
2605 NewGlobal->copyAttributesFrom(G);
2606 NewGlobal->setComdat(G->getComdat());
2607 NewGlobal->setAlignment(Align(getMinRedzoneSizeForGlobal()));
2608 // Don't fold globals with redzones. ODR violation detector and redzone
2609 // poisoning implicitly creates a dependence on the global's address, so it
2610 // is no longer valid for it to be marked unnamed_addr.
2612
2613 // Move null-terminated C strings to "__asan_cstring" section on Darwin.
2614 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
2615 G->isConstant()) {
2616 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
2617 if (Seq && Seq->isCString())
2618 NewGlobal->setSection("__TEXT,__asan_cstring,regular");
2619 }
2620
2621 // Transfer the debug info and type metadata. The payload starts at offset
2622 // zero so we can copy the metadata over as is.
2623 NewGlobal->copyMetadata(G, 0);
2624
2625 Value *Indices2[2];
2626 Indices2[0] = IRB.getInt32(0);
2627 Indices2[1] = IRB.getInt32(0);
2628
2629 G->replaceAllUsesWith(
2630 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
2631 NewGlobal->takeName(G);
2632 G->eraseFromParent();
2633 NewGlobals[i] = NewGlobal;
2634
2635 Constant *ODRIndicator = ConstantPointerNull::get(PtrTy);
2636 GlobalValue *InstrumentedGlobal = NewGlobal;
2637
2638 bool CanUsePrivateAliases =
2639 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2640 TargetTriple.isOSBinFormatWasm();
2641 if (CanUsePrivateAliases && UsePrivateAlias) {
2642 // Create local alias for NewGlobal to avoid crash on ODR between
2643 // instrumented and non-instrumented libraries.
2644 InstrumentedGlobal =
2646 }
2647
2648 // ODR should not happen for local linkage.
2649 if (NewGlobal->hasLocalLinkage()) {
2650 ODRIndicator =
2651 ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1), PtrTy);
2652 } else if (UseOdrIndicator) {
2653 // With local aliases, we need to provide another externally visible
2654 // symbol __odr_asan_XXX to detect ODR violation.
2655 auto *ODRIndicatorSym =
2656 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2658 kODRGenPrefix + NameForGlobal, nullptr,
2659 NewGlobal->getThreadLocalMode());
2660
2661 // Set meaningful attributes for indicator symbol.
2662 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2663 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2664 ODRIndicatorSym->setAlignment(Align(1));
2665 ODRIndicator = ODRIndicatorSym;
2666 }
2667
2668 Constant *Initializer = ConstantStruct::get(
2669 GlobalStructTy,
2670 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2671 ConstantInt::get(IntptrTy, SizeInBytes),
2672 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2674 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy),
2675 ConstantInt::get(IntptrTy, MD.IsDynInit),
2676 Constant::getNullValue(IntptrTy),
2677 ConstantExpr::getPointerCast(ODRIndicator, IntptrTy));
2678
2679 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2680
2681 Initializers[i] = Initializer;
2682 }
2683
2684 // Add instrumented globals to llvm.compiler.used list to avoid LTO from
2685 // ConstantMerge'ing them.
2686 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
2687 for (size_t i = 0; i < n; i++) {
2688 GlobalVariable *G = NewGlobals[i];
2689 if (G->getName().empty()) continue;
2690 GlobalsToAddToUsedList.push_back(G);
2691 }
2692 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
2693
2694 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2695 // Use COMDAT and register globals even if n == 0 to ensure that (a) the
2696 // linkage unit will only have one module constructor, and (b) the register
2697 // function will be called. The module destructor is not created when n ==
2698 // 0.
2699 *CtorComdat = true;
2700 instrumentGlobalsELF(IRB, NewGlobals, Initializers, getUniqueModuleId(&M));
2701 } else if (n == 0) {
2702 // When UseGlobalsGC is false, COMDAT can still be used if n == 0, because
2703 // all compile units will have identical module constructor/destructor.
2704 *CtorComdat = TargetTriple.isOSBinFormatELF();
2705 } else {
2706 *CtorComdat = false;
2707 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2708 InstrumentGlobalsCOFF(IRB, NewGlobals, Initializers);
2709 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2710 InstrumentGlobalsMachO(IRB, NewGlobals, Initializers);
2711 } else {
2712 InstrumentGlobalsWithMetadataArray(IRB, NewGlobals, Initializers);
2713 }
2714 }
2715
2716 // Create calls for poisoning before initializers run and unpoisoning after.
2717 if (ClInitializers)
2718 createInitializerPoisonCalls();
2719
2720 LLVM_DEBUG(dbgs() << M);
2721}
2722
2724ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const {
2725 constexpr uint64_t kMaxRZ = 1 << 18;
2726 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2727
2728 uint64_t RZ = 0;
2729 if (SizeInBytes <= MinRZ / 2) {
2730 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
2731 // at least 32 bytes, optimize when SizeInBytes is less than or equal to
2732 // half of MinRZ.
2733 RZ = MinRZ - SizeInBytes;
2734 } else {
2735 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
2736 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2737
2738 // Round up to multiple of MinRZ.
2739 if (SizeInBytes % MinRZ)
2740 RZ += MinRZ - (SizeInBytes % MinRZ);
2741 }
2742
2743 assert((RZ + SizeInBytes) % MinRZ == 0);
2744
2745 return RZ;
2746}
2747
2748int ModuleAddressSanitizer::GetAsanVersion() const {
2749 int LongSize = M.getDataLayout().getPointerSizeInBits();
2750 bool isAndroid = M.getTargetTriple().isAndroid();
2751 int Version = 8;
2752 // 32-bit Android is one version ahead because of the switch to dynamic
2753 // shadow.
2754 Version += (LongSize == 32 && isAndroid);
2755 return Version;
2756}
2757
2758GlobalVariable *ModuleAddressSanitizer::getOrCreateModuleName() {
2759 if (!ModuleName) {
2760 // We shouldn't merge same module names, as this string serves as unique
2761 // module ID in runtime.
2762 ModuleName =
2763 createPrivateGlobalForString(M, M.getModuleIdentifier(),
2764 /*AllowMerging*/ false, genName("module"));
2765 }
2766 return ModuleName;
2767}
2768
2769bool ModuleAddressSanitizer::instrumentModule() {
2770 initializeCallbacks();
2771
2772 for (Function &F : M)
2773 removeASanIncompatibleFnAttributes(F, /*ReadsArgMem=*/false);
2774
2775 // Create a module constructor. A destructor is created lazily because not all
2776 // platforms, and not all modules need it.
2777 if (ConstructorKind == AsanCtorKind::Global) {
2778 if (CompileKernel) {
2779 // The kernel always builds with its own runtime, and therefore does not
2780 // need the init and version check calls.
2781 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName);
2782 } else {
2783 std::string AsanVersion = std::to_string(GetAsanVersion());
2784 std::string VersionCheckName =
2785 InsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
2786 std::tie(AsanCtorFunction, std::ignore) =
2788 M, kAsanModuleCtorName, kAsanInitName, /*InitArgTypes=*/{},
2789 /*InitArgs=*/{}, VersionCheckName);
2790 }
2791 }
2792
2793 bool CtorComdat = true;
2794 if (ClGlobals) {
2795 assert(AsanCtorFunction || ConstructorKind == AsanCtorKind::None);
2796 if (AsanCtorFunction) {
2797 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2798 instrumentGlobals(IRB, &CtorComdat);
2799 } else {
2800 IRBuilder<> IRB(*C);
2801 instrumentGlobals(IRB, &CtorComdat);
2802 }
2803 }
2804
2805 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple);
2806
2807 // Put the constructor and destructor in comdat if both
2808 // (1) global instrumentation is not TU-specific
2809 // (2) target is ELF.
2810 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2811 if (AsanCtorFunction) {
2812 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2813 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction);
2814 }
2815 if (AsanDtorFunction) {
2816 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2817 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction);
2818 }
2819 } else {
2820 if (AsanCtorFunction)
2821 appendToGlobalCtors(M, AsanCtorFunction, Priority);
2822 if (AsanDtorFunction)
2823 appendToGlobalDtors(M, AsanDtorFunction, Priority);
2824 }
2825
2826 return true;
2827}
2828
2829void AddressSanitizer::initializeCallbacks(const TargetLibraryInfo *TLI) {
2830 IRBuilder<> IRB(*C);
2831 // Create __asan_report* callbacks.
2832 // IsWrite, TypeSize and Exp are encoded in the function name.
2833 for (int Exp = 0; Exp < 2; Exp++) {
2834 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2835 const std::string TypeStr = AccessIsWrite ? "store" : "load";
2836 const std::string ExpStr = Exp ? "exp_" : "";
2837 const std::string EndingStr = Recover ? "_noabort" : "";
2838
2839 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2840 SmallVector<Type *, 2> Args1{1, IntptrTy};
2841 AttributeList AL2;
2842 AttributeList AL1;
2843 if (Exp) {
2844 Type *ExpType = Type::getInt32Ty(*C);
2845 Args2.push_back(ExpType);
2846 Args1.push_back(ExpType);
2847 if (auto AK = TLI->getExtAttrForI32Param(false)) {
2848 AL2 = AL2.addParamAttribute(*C, 2, AK);
2849 AL1 = AL1.addParamAttribute(*C, 1, AK);
2850 }
2851 }
2852 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2853 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
2854 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2855
2856 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2857 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2858 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2859
2860 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2861 AccessSizeIndex++) {
2862 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2863 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2864 M.getOrInsertFunction(
2865 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2866 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2867
2868 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2869 M.getOrInsertFunction(
2870 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2871 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2872 }
2873 }
2874 }
2875
2876 const std::string MemIntrinCallbackPrefix =
2877 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
2878 ? std::string("")
2880 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
2881 PtrTy, PtrTy, PtrTy, IntptrTy);
2882 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", PtrTy,
2883 PtrTy, PtrTy, IntptrTy);
2884 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
2885 TLI->getAttrList(C, {1}, /*Signed=*/false),
2886 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
2887
2888 AsanHandleNoReturnFunc =
2889 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
2890
2891 AsanPtrCmpFunction =
2892 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
2893 AsanPtrSubFunction =
2894 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
2895 if (Mapping.InGlobal)
2896 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
2897 ArrayType::get(IRB.getInt8Ty(), 0));
2898
2899 AMDGPUAddressShared =
2900 M.getOrInsertFunction(kAMDGPUAddressSharedName, IRB.getInt1Ty(), PtrTy);
2901 AMDGPUAddressPrivate =
2902 M.getOrInsertFunction(kAMDGPUAddressPrivateName, IRB.getInt1Ty(), PtrTy);
2903}
2904
2905bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2906 // For each NSObject descendant having a +load method, this method is invoked
2907 // by the ObjC runtime before any of the static constructors is called.
2908 // Therefore we need to instrument such methods with a call to __asan_init
2909 // at the beginning in order to initialize our runtime before any access to
2910 // the shadow memory.
2911 // We cannot just ignore these methods, because they may call other
2912 // instrumented functions.
2913 if (F.getName().contains(" load]")) {
2914 FunctionCallee AsanInitFunction =
2915 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2916 IRBuilder<> IRB(&F.front(), F.front().begin());
2917 IRB.CreateCall(AsanInitFunction, {});
2918 return true;
2919 }
2920 return false;
2921}
2922
2923bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2924 // Generate code only when dynamic addressing is needed.
2925 if (Mapping.Offset != kDynamicShadowSentinel)
2926 return false;
2927
2928 IRBuilder<> IRB(&F.front().front());
2929 if (Mapping.InGlobal) {
2931 // An empty inline asm with input reg == output reg.
2932 // An opaque pointer-to-int cast, basically.
2934 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
2935 StringRef(""), StringRef("=r,0"),
2936 /*hasSideEffects=*/false);
2937 LocalDynamicShadow =
2938 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
2939 } else {
2940 LocalDynamicShadow =
2941 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
2942 }
2943 } else {
2944 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2946 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
2947 }
2948 return true;
2949}
2950
2951void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
2952 // Find the one possible call to llvm.localescape and pre-mark allocas passed
2953 // to it as uninteresting. This assumes we haven't started processing allocas
2954 // yet. This check is done up front because iterating the use list in
2955 // isInterestingAlloca would be algorithmically slower.
2956 assert(ProcessedAllocas.empty() && "must process localescape before allocas");
2957
2958 // Try to get the declaration of llvm.localescape. If it's not in the module,
2959 // we can exit early.
2960 if (!F.getParent()->getFunction("llvm.localescape")) return;
2961
2962 // Look for a call to llvm.localescape call in the entry block. It can't be in
2963 // any other block.
2964 for (Instruction &I : F.getEntryBlock()) {
2965 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
2966 if (II && II->getIntrinsicID() == Intrinsic::localescape) {
2967 // We found a call. Mark all the allocas passed in as uninteresting.
2968 for (Value *Arg : II->args()) {
2969 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
2970 assert(AI && AI->isStaticAlloca() &&
2971 "non-static alloca arg to localescape");
2972 ProcessedAllocas[AI] = false;
2973 }
2974 break;
2975 }
2976 }
2977}
2978
2979bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
2980 bool ShouldInstrument =
2981 ClDebugMin < 0 || ClDebugMax < 0 ||
2982 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax);
2983 Instrumented++;
2984 return !ShouldInstrument;
2985}
2986
2987bool AddressSanitizer::instrumentFunction(Function &F,
2988 const TargetLibraryInfo *TLI) {
2989 bool FunctionModified = false;
2990
2991 // Do not apply any instrumentation for naked functions.
2992 if (F.hasFnAttribute(Attribute::Naked))
2993 return FunctionModified;
2994
2995 // If needed, insert __asan_init before checking for SanitizeAddress attr.
2996 // This function needs to be called even if the function body is not
2997 // instrumented.
2998 if (maybeInsertAsanInitAtFunctionEntry(F))
2999 FunctionModified = true;
3000
3001 // Leave if the function doesn't need instrumentation.
3002 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
3003
3004 if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
3005 return FunctionModified;
3006
3007 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
3008
3009 initializeCallbacks(TLI);
3010
3011 FunctionStateRAII CleanupObj(this);
3012
3013 RuntimeCallInserter RTCI(F);
3014
3015 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
3016
3017 // We can't instrument allocas used with llvm.localescape. Only static allocas
3018 // can be passed to that intrinsic.
3019 markEscapedLocalAllocas(F);
3020
3021 // We want to instrument every address only once per basic block (unless there
3022 // are calls between uses).
3023 SmallPtrSet<Value *, 16> TempsToInstrument;
3024 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
3025 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
3026 SmallVector<Instruction *, 8> NoReturnCalls;
3028 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
3029
3030 // Fill the set of memory operations to instrument.
3031 for (auto &BB : F) {
3032 AllBlocks.push_back(&BB);
3033 TempsToInstrument.clear();
3034 int NumInsnsPerBB = 0;
3035 for (auto &Inst : BB) {
3036 if (LooksLikeCodeInBug11395(&Inst)) return false;
3037 // Skip instructions inserted by another instrumentation.
3038 if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
3039 continue;
3040 SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
3041 getInterestingMemoryOperands(&Inst, InterestingOperands);
3042
3043 if (!InterestingOperands.empty()) {
3044 for (auto &Operand : InterestingOperands) {
3045 if (ClOpt && ClOptSameTemp) {
3046 Value *Ptr = Operand.getPtr();
3047 // If we have a mask, skip instrumentation if we've already
3048 // instrumented the full object. But don't add to TempsToInstrument
3049 // because we might get another load/store with a different mask.
3050 if (Operand.MaybeMask) {
3051 if (TempsToInstrument.count(Ptr))
3052 continue; // We've seen this (whole) temp in the current BB.
3053 } else {
3054 if (!TempsToInstrument.insert(Ptr).second)
3055 continue; // We've seen this temp in the current BB.
3056 }
3057 }
3058 OperandsToInstrument.push_back(Operand);
3059 NumInsnsPerBB++;
3060 }
3061 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
3065 PointerComparisonsOrSubtracts.push_back(&Inst);
3066 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
3067 // ok, take it.
3068 IntrinToInstrument.push_back(MI);
3069 NumInsnsPerBB++;
3070 } else {
3071 if (auto *CB = dyn_cast<CallBase>(&Inst)) {
3072 // A call inside BB.
3073 TempsToInstrument.clear();
3074 if (CB->doesNotReturn())
3075 NoReturnCalls.push_back(CB);
3076 }
3077 if (CallInst *CI = dyn_cast<CallInst>(&Inst))
3079 }
3080 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
3081 }
3082 }
3083
3084 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3085 OperandsToInstrument.size() + IntrinToInstrument.size() >
3086 (unsigned)InstrumentationWithCallsThreshold);
3087 const DataLayout &DL = F.getDataLayout();
3088 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext());
3089
3090 // Instrument.
3091 int NumInstrumented = 0;
3092 for (auto &Operand : OperandsToInstrument) {
3093 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3094 instrumentMop(ObjSizeVis, Operand, UseCalls,
3095 F.getDataLayout(), RTCI);
3096 FunctionModified = true;
3097 }
3098 for (auto *Inst : IntrinToInstrument) {
3099 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3100 instrumentMemIntrinsic(Inst, RTCI);
3101 FunctionModified = true;
3102 }
3103
3104 FunctionStackPoisoner FSP(F, *this, RTCI);
3105 bool ChangedStack = FSP.runOnFunction();
3106
3107 // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
3108 // See e.g. https://github.com/google/sanitizers/issues/37
3109 for (auto *CI : NoReturnCalls) {
3110 IRBuilder<> IRB(CI);
3111 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3112 }
3113
3114 for (auto *Inst : PointerComparisonsOrSubtracts) {
3115 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3116 FunctionModified = true;
3117 }
3118
3119 if (ChangedStack || !NoReturnCalls.empty())
3120 FunctionModified = true;
3121
3122 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
3123 << F << "\n");
3124
3125 return FunctionModified;
3126}
3127
3128// Workaround for bug 11395: we don't want to instrument stack in functions
3129// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
3130// FIXME: remove once the bug 11395 is fixed.
3131bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
3132 if (LongSize != 32) return false;
3133 CallInst *CI = dyn_cast<CallInst>(I);
3134 if (!CI || !CI->isInlineAsm()) return false;
3135 if (CI->arg_size() <= 5)
3136 return false;
3137 // We have inline assembly with quite a few arguments.
3138 return true;
3139}
3140
3141void FunctionStackPoisoner::initializeCallbacks(Module &M) {
3142 IRBuilder<> IRB(*C);
3143 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always ||
3144 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3145 const char *MallocNameTemplate =
3146 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always
3149 for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) {
3150 std::string Suffix = itostr(Index);
3151 AsanStackMallocFunc[Index] = M.getOrInsertFunction(
3152 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3153 AsanStackFreeFunc[Index] =
3154 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
3155 IRB.getVoidTy(), IntptrTy, IntptrTy);
3156 }
3157 }
3158 if (ASan.UseAfterScope) {
3159 AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
3160 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3161 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
3162 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3163 }
3164
3165 for (size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3166 0xf3, 0xf5, 0xf8}) {
3167 std::ostringstream Name;
3169 Name << std::setw(2) << std::setfill('0') << std::hex << Val;
3170 AsanSetShadowFunc[Val] =
3171 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
3172 }
3173
3174 AsanAllocaPoisonFunc = M.getOrInsertFunction(
3175 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3176 AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
3177 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3178}
3179
3180void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
3181 ArrayRef<uint8_t> ShadowBytes,
3182 size_t Begin, size_t End,
3183 IRBuilder<> &IRB,
3184 Value *ShadowBase) {
3185 if (Begin >= End)
3186 return;
3187
3188 const size_t LargestStoreSizeInBytes =
3189 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
3190
3191 const bool IsLittleEndian = F.getDataLayout().isLittleEndian();
3192
3193 // Poison given range in shadow using larges store size with out leading and
3194 // trailing zeros in ShadowMask. Zeros never change, so they need neither
3195 // poisoning nor up-poisoning. Still we don't mind if some of them get into a
3196 // middle of a store.
3197 for (size_t i = Begin; i < End;) {
3198 if (!ShadowMask[i]) {
3199 assert(!ShadowBytes[i]);
3200 ++i;
3201 continue;
3202 }
3203
3204 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3205 // Fit store size into the range.
3206 while (StoreSizeInBytes > End - i)
3207 StoreSizeInBytes /= 2;
3208
3209 // Minimize store size by trimming trailing zeros.
3210 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
3211 while (j <= StoreSizeInBytes / 2)
3212 StoreSizeInBytes /= 2;
3213 }
3214
3215 uint64_t Val = 0;
3216 for (size_t j = 0; j < StoreSizeInBytes; j++) {
3217 if (IsLittleEndian)
3218 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3219 else
3220 Val = (Val << 8) | ShadowBytes[i + j];
3221 }
3222
3223 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
3224 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
3227 Align(1));
3228
3229 i += StoreSizeInBytes;
3230 }
3231}
3232
3233void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3234 ArrayRef<uint8_t> ShadowBytes,
3235 IRBuilder<> &IRB, Value *ShadowBase) {
3236 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
3237}
3238
3239void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3240 ArrayRef<uint8_t> ShadowBytes,
3241 size_t Begin, size_t End,
3242 IRBuilder<> &IRB, Value *ShadowBase) {
3243 assert(ShadowMask.size() == ShadowBytes.size());
3244 size_t Done = Begin;
3245 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
3246 if (!ShadowMask[i]) {
3247 assert(!ShadowBytes[i]);
3248 continue;
3249 }
3250 uint8_t Val = ShadowBytes[i];
3251 if (!AsanSetShadowFunc[Val])
3252 continue;
3253
3254 // Skip same values.
3255 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
3256 }
3257
3258 if (j - i >= ASan.MaxInlinePoisoningSize) {
3259 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
3260 RTCI.createRuntimeCall(
3261 IRB, AsanSetShadowFunc[Val],
3262 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3263 ConstantInt::get(IntptrTy, j - i)});
3264 Done = j;
3265 }
3266 }
3267
3268 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
3269}
3270
3271// Fake stack allocator (asan_fake_stack.h) has 11 size classes
3272// for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
3273static int StackMallocSizeClass(uint64_t LocalStackSize) {
3274 assert(LocalStackSize <= kMaxStackMallocSize);
3275 uint64_t MaxSize = kMinStackMallocSize;
3276 for (int i = 0;; i++, MaxSize *= 2)
3277 if (LocalStackSize <= MaxSize) return i;
3278 llvm_unreachable("impossible LocalStackSize");
3279}
3280
3281void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3282 Instruction *CopyInsertPoint = &F.front().front();
3283 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3284 // Insert after the dynamic shadow location is determined
3285 CopyInsertPoint = CopyInsertPoint->getNextNode();
3286 assert(CopyInsertPoint);
3287 }
3288 IRBuilder<> IRB(CopyInsertPoint);
3289 const DataLayout &DL = F.getDataLayout();
3290 for (Argument &Arg : F.args()) {
3291 if (Arg.hasByValAttr()) {
3292 Type *Ty = Arg.getParamByValType();
3293 const Align Alignment =
3294 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3295
3296 AllocaInst *AI = IRB.CreateAlloca(
3297 Ty, nullptr,
3298 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) +
3299 ".byval");
3300 AI->setAlignment(Alignment);
3301 Arg.replaceAllUsesWith(AI);
3302
3303 uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3304 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3305 }
3306 }
3307}
3308
3309PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
3310 Value *ValueIfTrue,
3311 Instruction *ThenTerm,
3312 Value *ValueIfFalse) {
3313 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
3314 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
3315 PHI->addIncoming(ValueIfFalse, CondBlock);
3316 BasicBlock *ThenBlock = ThenTerm->getParent();
3317 PHI->addIncoming(ValueIfTrue, ThenBlock);
3318 return PHI;
3319}
3320
3321Value *FunctionStackPoisoner::createAllocaForLayout(
3322 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
3323 AllocaInst *Alloca;
3324 if (Dynamic) {
3325 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
3326 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
3327 "MyAlloca");
3328 } else {
3329 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
3330 nullptr, "MyAlloca");
3331 assert(Alloca->isStaticAlloca());
3332 }
3333 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
3334 uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack));
3335 Alloca->setAlignment(Align(FrameAlignment));
3336 return IRB.CreatePointerCast(Alloca, IntptrTy);
3337}
3338
3339void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3340 BasicBlock &FirstBB = *F.begin();
3341 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
3342 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
3343 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
3344 DynamicAllocaLayout->setAlignment(Align(32));
3345}
3346
3347void FunctionStackPoisoner::processDynamicAllocas() {
3348 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
3349 assert(DynamicAllocaPoisonCallVec.empty());
3350 return;
3351 }
3352
3353 // Insert poison calls for lifetime intrinsics for dynamic allocas.
3354 for (const auto &APC : DynamicAllocaPoisonCallVec) {
3355 assert(APC.InsBefore);
3356 assert(APC.AI);
3357 assert(ASan.isInterestingAlloca(*APC.AI));
3358 assert(!APC.AI->isStaticAlloca());
3359
3360 IRBuilder<> IRB(APC.InsBefore);
3361 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3362 // Dynamic allocas will be unpoisoned unconditionally below in
3363 // unpoisonDynamicAllocas.
3364 // Flag that we need unpoison static allocas.
3365 }
3366
3367 // Handle dynamic allocas.
3368 createDynamicAllocasInitStorage();
3369 for (auto &AI : DynamicAllocaVec)
3370 handleDynamicAllocaCall(AI);
3371 unpoisonDynamicAllocas();
3372}
3373
3374/// Collect instructions in the entry block after \p InsBefore which initialize
3375/// permanent storage for a function argument. These instructions must remain in
3376/// the entry block so that uninitialized values do not appear in backtraces. An
3377/// added benefit is that this conserves spill slots. This does not move stores
3378/// before instrumented / "interesting" allocas.
3380 AddressSanitizer &ASan, Instruction &InsBefore,
3381 SmallVectorImpl<Instruction *> &InitInsts) {
3382 Instruction *Start = InsBefore.getNextNode();
3383 for (Instruction *It = Start; It; It = It->getNextNode()) {
3384 // Argument initialization looks like:
3385 // 1) store <Argument>, <Alloca> OR
3386 // 2) <CastArgument> = cast <Argument> to ...
3387 // store <CastArgument> to <Alloca>
3388 // Do not consider any other kind of instruction.
3389 //
3390 // Note: This covers all known cases, but may not be exhaustive. An
3391 // alternative to pattern-matching stores is to DFS over all Argument uses:
3392 // this might be more general, but is probably much more complicated.
3393 if (isa<AllocaInst>(It) || isa<CastInst>(It))
3394 continue;
3395 if (auto *Store = dyn_cast<StoreInst>(It)) {
3396 // The store destination must be an alloca that isn't interesting for
3397 // ASan to instrument. These are moved up before InsBefore, and they're
3398 // not interesting because allocas for arguments can be mem2reg'd.
3399 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3400 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3401 continue;
3402
3403 Value *Val = Store->getValueOperand();
3404 bool IsDirectArgInit = isa<Argument>(Val);
3405 bool IsArgInitViaCast =
3406 isa<CastInst>(Val) &&
3407 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3408 // Check that the cast appears directly before the store. Otherwise
3409 // moving the cast before InsBefore may break the IR.
3410 Val == It->getPrevNode();
3411 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3412 if (!IsArgInit)
3413 continue;
3414
3415 if (IsArgInitViaCast)
3416 InitInsts.push_back(cast<Instruction>(Val));
3417 InitInsts.push_back(Store);
3418 continue;
3419 }
3420
3421 // Do not reorder past unknown instructions: argument initialization should
3422 // only involve casts and stores.
3423 return;
3424 }
3425}
3426
3428 // Alloca could have been renamed for uniqueness. Its true name will have been
3429 // recorded as an annotation.
3430 if (AI->hasMetadata(LLVMContext::MD_annotation)) {
3431 MDTuple *AllocaAnnotations =
3432 cast<MDTuple>(AI->getMetadata(LLVMContext::MD_annotation));
3433 for (auto &Annotation : AllocaAnnotations->operands()) {
3434 if (!isa<MDTuple>(Annotation))
3435 continue;
3436 auto AnnotationTuple = cast<MDTuple>(Annotation);
3437 for (unsigned Index = 0; Index < AnnotationTuple->getNumOperands();
3438 Index++) {
3439 // All annotations are strings
3440 auto MetadataString =
3441 cast<MDString>(AnnotationTuple->getOperand(Index));
3442 if (MetadataString->getString() == "alloca_name_altered")
3443 return cast<MDString>(AnnotationTuple->getOperand(Index + 1))
3444 ->getString();
3445 }
3446 }
3447 }
3448 return AI->getName();
3449}
3450
3451void FunctionStackPoisoner::processStaticAllocas() {
3452 if (AllocaVec.empty()) {
3453 assert(StaticAllocaPoisonCallVec.empty());
3454 return;
3455 }
3456
3457 int StackMallocIdx = -1;
3458 DebugLoc EntryDebugLocation;
3459 if (auto SP = F.getSubprogram())
3460 EntryDebugLocation =
3461 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP);
3462
3463 Instruction *InsBefore = AllocaVec[0];
3464 IRBuilder<> IRB(InsBefore);
3465
3466 // Make sure non-instrumented allocas stay in the entry block. Otherwise,
3467 // debug info is broken, because only entry-block allocas are treated as
3468 // regular stack slots.
3469 auto InsBeforeB = InsBefore->getParent();
3470 assert(InsBeforeB == &F.getEntryBlock());
3471 for (auto *AI : StaticAllocasToMoveUp)
3472 if (AI->getParent() == InsBeforeB)
3473 AI->moveBefore(InsBefore->getIterator());
3474
3475 // Move stores of arguments into entry-block allocas as well. This prevents
3476 // extra stack slots from being generated (to house the argument values until
3477 // they can be stored into the allocas). This also prevents uninitialized
3478 // values from being shown in backtraces.
3479 SmallVector<Instruction *, 8> ArgInitInsts;
3480 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts);
3481 for (Instruction *ArgInitInst : ArgInitInsts)
3482 ArgInitInst->moveBefore(InsBefore->getIterator());
3483
3484 // If we have a call to llvm.localescape, keep it in the entry block.
3485 if (LocalEscapeCall)
3486 LocalEscapeCall->moveBefore(InsBefore->getIterator());
3487
3489 SVD.reserve(AllocaVec.size());
3490 for (AllocaInst *AI : AllocaVec) {
3493 ASan.getAllocaSizeInBytes(*AI),
3494 0,
3495 AI->getAlign().value(),
3496 AI,
3497 0,
3498 0};
3499 SVD.push_back(D);
3500 }
3501
3502 // Minimal header size (left redzone) is 4 pointers,
3503 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
3504 uint64_t Granularity = 1ULL << Mapping.Scale;
3505 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3506 const ASanStackFrameLayout &L =
3507 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
3508
3509 // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
3511 for (auto &Desc : SVD)
3512 AllocaToSVDMap[Desc.AI] = &Desc;
3513
3514 // Update SVD with information from lifetime intrinsics.
3515 for (const auto &APC : StaticAllocaPoisonCallVec) {
3516 assert(APC.InsBefore);
3517 assert(APC.AI);
3518 assert(ASan.isInterestingAlloca(*APC.AI));
3519 assert(APC.AI->isStaticAlloca());
3520
3521 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3522 Desc.LifetimeSize = Desc.Size;
3523 if (const DILocation *FnLoc = EntryDebugLocation.get()) {
3524 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3525 if (LifetimeLoc->getFile() == FnLoc->getFile())
3526 if (unsigned Line = LifetimeLoc->getLine())
3527 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
3528 }
3529 }
3530 }
3531
3532 auto DescriptionString = ComputeASanStackFrameDescription(SVD);
3533 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
3534 uint64_t LocalStackSize = L.FrameSize;
3535 bool DoStackMalloc =
3536 ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never &&
3537 !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize;
3538 bool DoDynamicAlloca = ClDynamicAllocaStack;
3539 // Don't do dynamic alloca or stack malloc if:
3540 // 1) There is inline asm: too often it makes assumptions on which registers
3541 // are available.
3542 // 2) There is a returns_twice call (typically setjmp), which is
3543 // optimization-hostile, and doesn't play well with introduced indirect
3544 // register-relative calculation of local variable addresses.
3545 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3546 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3547
3548 Value *StaticAlloca =
3549 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
3550
3551 Value *FakeStack;
3552 Value *LocalStackBase;
3553 Value *LocalStackBaseAlloca;
3554 uint8_t DIExprFlags = DIExpression::ApplyOffset;
3555
3556 if (DoStackMalloc) {
3557 LocalStackBaseAlloca =
3558 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
3559 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3560 // void *FakeStack = __asan_option_detect_stack_use_after_return
3561 // ? __asan_stack_malloc_N(LocalStackSize)
3562 // : nullptr;
3563 // void *LocalStackBase = (FakeStack) ? FakeStack :
3564 // alloca(LocalStackSize);
3565 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
3567 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
3568 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
3570 Instruction *Term =
3571 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
3572 IRBuilder<> IRBIf(Term);
3573 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3574 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
3575 Value *FakeStackValue =
3576 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3577 ConstantInt::get(IntptrTy, LocalStackSize));
3578 IRB.SetInsertPoint(InsBefore);
3579 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3580 ConstantInt::get(IntptrTy, 0));
3581 } else {
3582 // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always)
3583 // void *FakeStack = __asan_stack_malloc_N(LocalStackSize);
3584 // void *LocalStackBase = (FakeStack) ? FakeStack :
3585 // alloca(LocalStackSize);
3586 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3587 FakeStack =
3588 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3589 ConstantInt::get(IntptrTy, LocalStackSize));
3590 }
3591 Value *NoFakeStack =
3592 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
3593 Instruction *Term =
3594 SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
3595 IRBuilder<> IRBIf(Term);
3596 Value *AllocaValue =
3597 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
3598
3599 IRB.SetInsertPoint(InsBefore);
3600 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3601 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
3602 DIExprFlags |= DIExpression::DerefBefore;
3603 } else {
3604 // void *FakeStack = nullptr;
3605 // void *LocalStackBase = alloca(LocalStackSize);
3606 FakeStack = ConstantInt::get(IntptrTy, 0);
3607 LocalStackBase =
3608 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
3609 LocalStackBaseAlloca = LocalStackBase;
3610 }
3611
3612 // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the
3613 // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse
3614 // later passes and can result in dropped variable coverage in debug info.
3615 Value *LocalStackBaseAllocaPtr =
3616 isa<PtrToIntInst>(LocalStackBaseAlloca)
3617 ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand()
3618 : LocalStackBaseAlloca;
3619 assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) &&
3620 "Variable descriptions relative to ASan stack base will be dropped");
3621
3622 // Replace Alloca instructions with base+offset.
3623 SmallVector<Value *> NewAllocaPtrs;
3624 for (const auto &Desc : SVD) {
3625 AllocaInst *AI = Desc.AI;
3626 replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags,
3627 Desc.Offset);
3628 Value *NewAllocaPtr = IRB.CreateIntToPtr(
3629 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
3630 AI->getType());
3631 AI->replaceAllUsesWith(NewAllocaPtr);
3632 NewAllocaPtrs.push_back(NewAllocaPtr);
3633 }
3634
3635 // The left-most redzone has enough space for at least 4 pointers.
3636 // Write the Magic value to redzone[0].
3637 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
3638 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
3639 BasePlus0);
3640 // Write the frame description constant to redzone[1].
3641 Value *BasePlus1 = IRB.CreateIntToPtr(
3642 IRB.CreateAdd(LocalStackBase,
3643 ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3644 IntptrPtrTy);
3645 GlobalVariable *StackDescriptionGlobal =
3646 createPrivateGlobalForString(*F.getParent(), DescriptionString,
3647 /*AllowMerging*/ true, genName("stack"));
3648 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
3649 IRB.CreateStore(Description, BasePlus1);
3650 // Write the PC to redzone[2].
3651 Value *BasePlus2 = IRB.CreateIntToPtr(
3652 IRB.CreateAdd(LocalStackBase,
3653 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3654 IntptrPtrTy);
3655 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
3656
3657 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
3658
3659 // Poison the stack red zones at the entry.
3660 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3661 // As mask we must use most poisoned case: red zones and after scope.
3662 // As bytes we can use either the same or just red zones only.
3663 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3664
3665 if (!StaticAllocaPoisonCallVec.empty()) {
3666 const auto &ShadowInScope = GetShadowBytes(SVD, L);
3667
3668 // Poison static allocas near lifetime intrinsics.
3669 for (const auto &APC : StaticAllocaPoisonCallVec) {
3670 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3671 assert(Desc.Offset % L.Granularity == 0);
3672 size_t Begin = Desc.Offset / L.Granularity;
3673 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
3674
3675 IRBuilder<> IRB(APC.InsBefore);
3676 copyToShadow(ShadowAfterScope,
3677 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3678 IRB, ShadowBase);
3679 }
3680 }
3681
3682 // Remove lifetime markers now that these are no longer allocas.
3683 for (Value *NewAllocaPtr : NewAllocaPtrs) {
3684 for (User *U : make_early_inc_range(NewAllocaPtr->users())) {
3685 auto *I = cast<Instruction>(U);
3686 if (I->isLifetimeStartOrEnd())
3687 I->eraseFromParent();
3688 }
3689 }
3690
3691 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
3692 SmallVector<uint8_t, 64> ShadowAfterReturn;
3693
3694 // (Un)poison the stack before all ret instructions.
3695 for (Instruction *Ret : RetVec) {
3696 IRBuilder<> IRBRet(Ret);
3697 // Mark the current frame as retired.
3698 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
3699 BasePlus0);
3700 if (DoStackMalloc) {
3701 assert(StackMallocIdx >= 0);
3702 // if FakeStack != 0 // LocalStackBase == FakeStack
3703 // // In use-after-return mode, poison the whole stack frame.
3704 // if StackMallocIdx <= 4
3705 // // For small sizes inline the whole thing:
3706 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
3707 // **SavedFlagPtr(FakeStack) = 0
3708 // else
3709 // __asan_stack_free_N(FakeStack, LocalStackSize)
3710 // else
3711 // <This is not a fake stack; unpoison the redzones>
3712 Value *Cmp =
3713 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
3714 Instruction *ThenTerm, *ElseTerm;
3715 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
3716
3717 IRBuilder<> IRBPoison(ThenTerm);
3718 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3719 int ClassSize = kMinStackMallocSize << StackMallocIdx;
3720 ShadowAfterReturn.resize(ClassSize / L.Granularity,
3722 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3723 ShadowBase);
3724 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3725 FakeStack,
3726 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3727 Value *SavedFlagPtr = IRBPoison.CreateLoad(
3728 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3729 IRBPoison.CreateStore(
3730 Constant::getNullValue(IRBPoison.getInt8Ty()),
3731 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3732 } else {
3733 // For larger frames call __asan_stack_free_*.
3734 RTCI.createRuntimeCall(
3735 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3736 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3737 }
3738
3739 IRBuilder<> IRBElse(ElseTerm);
3740 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3741 } else {
3742 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3743 }
3744 }
3745
3746 // We are done. Remove the old unused alloca instructions.
3747 for (auto *AI : AllocaVec)
3748 AI->eraseFromParent();
3749}
3750
3751void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
3752 IRBuilder<> &IRB, bool DoPoison) {
3753 // For now just insert the call to ASan runtime.
3754 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
3755 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
3756 RTCI.createRuntimeCall(
3757 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3758 {AddrArg, SizeArg});
3759}
3760
3761// Handling llvm.lifetime intrinsics for a given %alloca:
3762// (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
3763// (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
3764// invalid accesses) and unpoison it for llvm.lifetime.start (the memory
3765// could be poisoned by previous llvm.lifetime.end instruction, as the
3766// variable may go in and out of scope several times, e.g. in loops).
3767// (3) if we poisoned at least one %alloca in a function,
3768// unpoison the whole stack frame at function exit.
3769void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
3770 IRBuilder<> IRB(AI);
3771
3772 const Align Alignment = std::max(Align(kAllocaRzSize), AI->getAlign());
3773 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
3774
3775 Value *Zero = Constant::getNullValue(IntptrTy);
3776 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
3777 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3778
3779 // Since we need to extend alloca with additional memory to locate
3780 // redzones, and OldSize is number of allocated blocks with
3781 // ElementSize size, get allocated memory size in bytes by
3782 // OldSize * ElementSize.
3783 const unsigned ElementSize =
3784 F.getDataLayout().getTypeAllocSize(AI->getAllocatedType());
3785 Value *OldSize =
3786 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
3787 ConstantInt::get(IntptrTy, ElementSize));
3788
3789 // PartialSize = OldSize % 32
3790 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
3791
3792 // Misalign = kAllocaRzSize - PartialSize;
3793 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
3794
3795 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
3796 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
3797 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
3798
3799 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize
3800 // Alignment is added to locate left redzone, PartialPadding for possible
3801 // partial redzone and kAllocaRzSize for right redzone respectively.
3802 Value *AdditionalChunkSize = IRB.CreateAdd(
3803 ConstantInt::get(IntptrTy, Alignment.value() + kAllocaRzSize),
3804 PartialPadding);
3805
3806 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
3807
3808 // Insert new alloca with new NewSize and Alignment params.
3809 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
3810 NewAlloca->setAlignment(Alignment);
3811
3812 // NewAddress = Address + Alignment
3813 Value *NewAddress =
3814 IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
3815 ConstantInt::get(IntptrTy, Alignment.value()));
3816
3817 // Insert __asan_alloca_poison call for new created alloca.
3818 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
3819
3820 // Store the last alloca's address to DynamicAllocaLayout. We'll need this
3821 // for unpoisoning stuff.
3822 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
3823
3824 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
3825
3826 // Remove lifetime markers now that this is no longer an alloca.
3827 for (User *U : make_early_inc_range(AI->users())) {
3828 auto *I = cast<Instruction>(U);
3829 if (I->isLifetimeStartOrEnd())
3830 I->eraseFromParent();
3831 }
3832
3833 // Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
3834 AI->replaceAllUsesWith(NewAddressPtr);
3835
3836 // We are done. Erase old alloca from parent.
3837 AI->eraseFromParent();
3838}
3839
3840// isSafeAccess returns true if Addr is always inbounds with respect to its
3841// base object. For example, it is a field access or an array access with
3842// constant inbounds index.
3843bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3844 Value *Addr, TypeSize TypeStoreSize) const {
3845 if (TypeStoreSize.isScalable())
3846 // TODO: We can use vscale_range to convert a scalable value to an
3847 // upper bound on the access size.
3848 return false;
3849
3850 SizeOffsetAPInt SizeOffset = ObjSizeVis.compute(Addr);
3851 if (!SizeOffset.bothKnown())
3852 return false;
3853
3854 uint64_t Size = SizeOffset.Size.getZExtValue();
3855 int64_t Offset = SizeOffset.Offset.getSExtValue();
3856
3857 // Three checks are required to ensure safety:
3858 // . Offset >= 0 (since the offset is given from the base ptr)
3859 // . Size >= Offset (unsigned)
3860 // . Size - Offset >= NeededSize (unsigned)
3861 return Offset >= 0 && Size >= uint64_t(Offset) &&
3862 Size - uint64_t(Offset) >= TypeStoreSize / 8;
3863}
@ Poison
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static Twine genName(StringRef suffix)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
const char kAsanPtrCmp[]
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static const uint64_t kWebAssemblyShadowOffset
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
const char kAsanPtrSub[]
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static StringRef getAllocaName(AllocaInst *AI)
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:687
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
uint64_t Addr
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
if(PassOpts->AAPipeline)
const SmallVectorImpl< MachineOperand > & Cond
raw_pwrite_stream & OS
#define OP(OPC)
Definition: Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition: Debug.h:119
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1540
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1562
LLVM_ABI AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
Definition: Instructions.h:64
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:153
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:128
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:101
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:121
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:143
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
Definition: Instructions.h:132
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:97
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:255
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:412
This class represents an incoming formal argument to a Function.
Definition: Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:506
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:709
AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const
Add an argument attribute to the list.
Definition: Attributes.h:644
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:459
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:393
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:206
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:213
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:233
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:248
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1116
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1415
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
Definition: InstrTypes.h:1948
unsigned arg_size() const
Definition: InstrTypes.h:1290
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
Definition: Comdat.h:39
@ SameSize
The data referenced by the COMDAT must be the same size.
Definition: Comdat.h:41
@ Any
The linker may choose any COMDAT.
Definition: Comdat.h:37
@ NoDeduplicate
No deduplication is performed.
Definition: Comdat.h:40
@ ExactMatch
The data referenced by the COMDAT must be the same.
Definition: Comdat.h:38
ConstantArray - Constant Array Declarations.
Definition: Constants.h:433
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1314
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2314
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:2246
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1274
static LLVM_ABI bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
Definition: Constants.cpp:1602
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1833
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1380
This is an important base class in LLVM.
Definition: Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:435
Debug location.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:124
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:50
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
Definition: Function.h:858
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
Definition: Function.cpp:380
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:903
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1036
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:359
const Constant * getAliasee() const
Definition: GlobalAlias.h:87
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:585
LLVM_ABI void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
Definition: Metadata.cpp:1840
LLVM_ABI void setComdat(Comdat *C)
Definition: Globals.cpp:214
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
Definition: Globals.cpp:275
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:250
void setUnnamedAddr(UnnamedAddr Val)
Definition: GlobalValue.h:233
bool hasLocalLinkage() const
Definition: GlobalValue.h:530
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
Definition: GlobalValue.h:569
ThreadLocalMode getThreadLocalMode() const
Definition: GlobalValue.h:273
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:69
void setVisibility(VisibilityTypes V)
Definition: GlobalValue.h:256
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition: GlobalValue.h:52
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:61
@ CommonLinkage
Tentative definitions.
Definition: GlobalValue.h:63
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:60
@ AvailableExternallyLinkage
Available for inspection, not emission.
Definition: GlobalValue.h:54
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition: GlobalValue.h:62
DLLStorageClassTypes getDLLStorageClass() const
Definition: GlobalValue.h:277
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
Definition: Globals.cpp:540
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalVariable.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:114
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1830
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition: IRBuilder.h:547
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1864
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Definition: IRBuilder.h:687
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2251
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2357
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1005
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:202
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2199
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1513
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:562
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:2036
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:201
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition: IRBuilder.h:567
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2333
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1923
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:834
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:522
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2494
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1805
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2329
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1420
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
Definition: IRBuilder.h:533
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1847
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1551
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1860
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1403
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2194
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition: IRBuilder.h:2651
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2508
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Definition: IRBuilder.cpp:130
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2277
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:207
Type * getVoidTy()
Fetch the type representing void.
Definition: IRBuilder.h:600
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1883
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition: IRBuilder.h:1573
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:552
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2209
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1437
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2780
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:43
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:585
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:262
RetTy visitCleanupReturnInst(CleanupReturnInst &I)
Definition: InstVisitor.h:239
RetTy visitIntrinsicInst(IntrinsicInst &I)
Definition: InstVisitor.h:214
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
RetTy visitReturnInst(ReturnInst &I)
Definition: InstVisitor.h:221
RetTy visitAllocaInst(AllocaInst &I)
Definition: InstVisitor.h:168
RetTy visitResumeInst(ResumeInst &I)
Definition: InstVisitor.h:233
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:513
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:406
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:428
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:510
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:86
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:319
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:49
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
An instruction for reading from memory.
Definition: Instructions.h:180
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition: MDBuilder.cpp:48
Metadata node.
Definition: Metadata.h:1077
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1443
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1565
Tuple of metadata.
Definition: Metadata.h:1493
This is the common base class for memset/memcpy/memmove.
Root of the metadata hierarchy.
Definition: Metadata.h:63
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
Evaluate the size and offset of an object pointed to by a Value* statically.
LLVM_ABI SizeOffsetAPInt compute(Value *V)
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1069
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:99
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:720
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:118
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
Definition: Analysis.h:171
Resume the propagation of an exception.
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:470
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:401
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
bool empty() const
Definition: SmallVector.h:82
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:938
void reserve(size_type N)
Definition: SmallVector.h:664
void resize(size_type N)
Definition: SmallVector.h:639
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
Definition: Instructions.h:296
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:269
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:151
Class to represent struct types.
Definition: DerivedTypes.h:218
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:414
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
EltTy front() const
bool empty() const
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:47
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:818
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
Definition: Triple.h:906
bool isDriverKit() const
Is this an Apple DriverKit triple.
Definition: Triple.h:597
bool isOSNetBSD() const
Definition: Triple.h:627
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:816
bool isABIN32() const
Definition: Triple.h:1125
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
Definition: Triple.h:1027
@ aarch64_be
Definition: Triple.h:55
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:408
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
Definition: Triple.h:1016
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
Definition: Triple.h:1022
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:676
@ DXContainer
Definition: Triple.h:318
@ UnknownObjectFormat
Definition: Triple.h:315
bool isARM() const
Tests whether the target is ARM (little and big endian).
Definition: Triple.h:911
bool isOSLinux() const
Tests whether the OS is Linux.
Definition: Triple.h:725
bool isAMDGPU() const
Definition: Triple.h:903
bool isMacOSX() const
Is this a Mac OS X triple.
Definition: Triple.h:563
bool isOSFreeBSD() const
Definition: Triple.h:635
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition: Triple.h:745
bool isWatchOS() const
Is this an Apple watchOS triple.
Definition: Triple.h:582
bool isiOS() const
Is this an iOS triple.
Definition: Triple.h:572
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
Definition: Triple.h:813
bool isWasm() const
Tests whether the target is wasm (32- and 64-bit).
Definition: Triple.h:1109
bool isOSFuchsia() const
Definition: Triple.h:639
bool isOSHaiku() const
Tests whether the OS is Haiku.
Definition: Triple.h:666
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:311
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
op_range operands()
Definition: User.h:292
Value * getOperand(unsigned i) const
Definition: User.h:232
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:502
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:546
iterator_range< user_iterator > users()
Definition: Value.h:426
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:396
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:203
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:172
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:34
self_iterator getIterator()
Definition: ilist_node.h:134
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:359
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:126
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
Definition: MachO.h:131
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
Definition: X86BaseInfo.h:732
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:712
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
@ Offset
Definition: DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1744
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
LLVM_ABI GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, Twine NamePrefix="")
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
@ Done
Definition: Threading.h:60
LLVM_ABI Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:663
Op::Description Desc
LLVM_ABI bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
LLVM_ABI SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:157
LLVM_ABI FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
LLVM_ABI std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:288
LLVM_ABI std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition: Error.cpp:167
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
AsanDtorKind
Types of ASan module destructors supported.
@ None
Do not emit any destructors for ASan.
LLVM_ABI ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:769
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
LLVM_ABI void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem)
Remove memory attributes that are incompatible with the instrumentation added by AddressSanitizer and...
@ Dynamic
Denotes mode unknown at compile time.
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:74
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition: Alignment.h:111
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AsanCtorKind
Types of ASan module constructors supported.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:3829
LLVM_ABI void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
LLVM_ABI void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
Definition: ModuleUtils.cpp:78
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
DEMANGLE_ABI std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
Definition: Demangle.cpp:20
LLVM_ABI void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, BasicBlock::iterator InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition: Local.cpp:1942
#define N
LLVM_ABI ASanAccessInfo(int32_t Packed)
AsanDetectStackUseAfterReturnMode UseAfterReturn
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:70
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.
bool bothKnown() const