LLVM 22.0.0git
ThreadSanitizer.cpp
Go to the documentation of this file.
1//===-- ThreadSanitizer.cpp - race detector -------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer, a race detector.
10//
11// The tool is under development, for the details about previous versions see
12// http://code.google.com/p/data-race-test
13//
14// The instrumentation phase is quite simple:
15// - Insert calls to run-time library before every memory access.
16// - Optimizations may apply to avoid instrumenting some of the accesses.
17// - Insert calls at function entry/exit.
18// The rest is handled by the run-time library.
19//===----------------------------------------------------------------------===//
20
22#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/Statistic.h"
30#include "llvm/IR/DataLayout.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/IRBuilder.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/LLVMContext.h"
37#include "llvm/IR/Metadata.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Type.h"
42#include "llvm/Support/Debug.h"
48
49using namespace llvm;
50
51#define DEBUG_TYPE "tsan"
52
54 "tsan-instrument-memory-accesses", cl::init(true),
55 cl::desc("Instrument memory accesses"), cl::Hidden);
56static cl::opt<bool>
57 ClInstrumentFuncEntryExit("tsan-instrument-func-entry-exit", cl::init(true),
58 cl::desc("Instrument function entry and exit"),
61 "tsan-handle-cxx-exceptions", cl::init(true),
62 cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"),
64static cl::opt<bool> ClInstrumentAtomics("tsan-instrument-atomics",
65 cl::init(true),
66 cl::desc("Instrument atomics"),
69 "tsan-instrument-memintrinsics", cl::init(true),
70 cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
72 "tsan-distinguish-volatile", cl::init(false),
73 cl::desc("Emit special instrumentation for accesses to volatiles"),
76 "tsan-instrument-read-before-write", cl::init(false),
77 cl::desc("Do not eliminate read instrumentation for read-before-writes"),
80 "tsan-compound-read-before-write", cl::init(false),
81 cl::desc("Emit special compound instrumentation for reads-before-writes"),
83static cl::opt<bool>
84 ClOmitNonCaptured("tsan-omit-by-pointer-capturing", cl::init(true),
85 cl::desc("Omit accesses due to pointer capturing"),
87
88STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
89STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
90STATISTIC(NumOmittedReadsBeforeWrite,
91 "Number of reads ignored due to following writes");
92STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
93STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
94STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
95STATISTIC(NumOmittedReadsFromConstantGlobals,
96 "Number of reads from constant globals");
97STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
98STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
99
100const char kTsanModuleCtorName[] = "tsan.module_ctor";
101const char kTsanInitName[] = "__tsan_init";
102
103namespace {
104
105/// ThreadSanitizer: instrument the code in module to find races.
106///
107/// Instantiating ThreadSanitizer inserts the tsan runtime library API function
108/// declarations into the module if they don't exist already. Instantiating
109/// ensures the __tsan_init function is in the list of global constructors for
110/// the module.
111struct ThreadSanitizer {
112 ThreadSanitizer() {
113 // Check options and warn user.
115 errs()
116 << "warning: Option -tsan-compound-read-before-write has no effect "
117 "when -tsan-instrument-read-before-write is set.\n";
118 }
119 }
120
121 bool sanitizeFunction(Function &F, const TargetLibraryInfo &TLI);
122
123private:
124 // Internal Instruction wrapper that contains more information about the
125 // Instruction from prior analysis.
126 struct InstructionInfo {
127 // Instrumentation emitted for this instruction is for a compounded set of
128 // read and write operations in the same basic block.
129 static constexpr unsigned kCompoundRW = (1U << 0);
130
131 explicit InstructionInfo(Instruction *Inst) : Inst(Inst) {}
132
133 Instruction *Inst;
134 unsigned Flags = 0;
135 };
136
137 void initialize(Module &M, const TargetLibraryInfo &TLI);
138 bool instrumentLoadOrStore(const InstructionInfo &II, const DataLayout &DL);
139 bool instrumentAtomic(Instruction *I, const DataLayout &DL);
140 bool instrumentMemIntrinsic(Instruction *I);
141 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
143 const DataLayout &DL);
144 bool addrPointsToConstantData(Value *Addr);
145 int getMemoryAccessFuncIndex(Type *OrigTy, Value *Addr, const DataLayout &DL);
146 void InsertRuntimeIgnores(Function &F);
147
148 Type *IntptrTy;
149 FunctionCallee TsanFuncEntry;
150 FunctionCallee TsanFuncExit;
151 FunctionCallee TsanIgnoreBegin;
152 FunctionCallee TsanIgnoreEnd;
153 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
154 static const size_t kNumberOfAccessSizes = 5;
155 FunctionCallee TsanRead[kNumberOfAccessSizes];
156 FunctionCallee TsanWrite[kNumberOfAccessSizes];
157 FunctionCallee TsanUnalignedRead[kNumberOfAccessSizes];
158 FunctionCallee TsanUnalignedWrite[kNumberOfAccessSizes];
159 FunctionCallee TsanVolatileRead[kNumberOfAccessSizes];
160 FunctionCallee TsanVolatileWrite[kNumberOfAccessSizes];
161 FunctionCallee TsanUnalignedVolatileRead[kNumberOfAccessSizes];
162 FunctionCallee TsanUnalignedVolatileWrite[kNumberOfAccessSizes];
163 FunctionCallee TsanCompoundRW[kNumberOfAccessSizes];
164 FunctionCallee TsanUnalignedCompoundRW[kNumberOfAccessSizes];
165 FunctionCallee TsanAtomicLoad[kNumberOfAccessSizes];
166 FunctionCallee TsanAtomicStore[kNumberOfAccessSizes];
168 [kNumberOfAccessSizes];
169 FunctionCallee TsanAtomicCAS[kNumberOfAccessSizes];
170 FunctionCallee TsanAtomicThreadFence;
171 FunctionCallee TsanAtomicSignalFence;
172 FunctionCallee TsanVptrUpdate;
173 FunctionCallee TsanVptrLoad;
174 FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
175};
176
177void insertModuleCtor(Module &M) {
179 M, kTsanModuleCtorName, kTsanInitName, /*InitArgTypes=*/{},
180 /*InitArgs=*/{},
181 // This callback is invoked when the functions are created the first
182 // time. Hook them into the global ctors list in that case:
183 [&](Function *Ctor, FunctionCallee) { appendToGlobalCtors(M, Ctor, 0); });
184}
185} // namespace
186
189 ThreadSanitizer TSan;
190 if (TSan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F)))
192 return PreservedAnalyses::all();
193}
194
197 // Return early if nosanitize_thread module flag is present for the module.
198 if (checkIfAlreadyInstrumented(M, "nosanitize_thread"))
199 return PreservedAnalyses::all();
200 insertModuleCtor(M);
202}
203void ThreadSanitizer::initialize(Module &M, const TargetLibraryInfo &TLI) {
204 const DataLayout &DL = M.getDataLayout();
205 LLVMContext &Ctx = M.getContext();
206 IntptrTy = DL.getIntPtrType(Ctx);
207
208 IRBuilder<> IRB(Ctx);
209 AttributeList Attr;
210 Attr = Attr.addFnAttribute(Ctx, Attribute::NoUnwind);
211 // Initialize the callbacks.
212 TsanFuncEntry = M.getOrInsertFunction("__tsan_func_entry", Attr,
213 IRB.getVoidTy(), IRB.getPtrTy());
214 TsanFuncExit =
215 M.getOrInsertFunction("__tsan_func_exit", Attr, IRB.getVoidTy());
216 TsanIgnoreBegin = M.getOrInsertFunction("__tsan_ignore_thread_begin", Attr,
217 IRB.getVoidTy());
218 TsanIgnoreEnd =
219 M.getOrInsertFunction("__tsan_ignore_thread_end", Attr, IRB.getVoidTy());
220 IntegerType *OrdTy = IRB.getInt32Ty();
221 for (size_t i = 0; i < kNumberOfAccessSizes; ++i) {
222 const unsigned ByteSize = 1U << i;
223 const unsigned BitSize = ByteSize * 8;
224 std::string ByteSizeStr = utostr(ByteSize);
225 std::string BitSizeStr = utostr(BitSize);
226 SmallString<32> ReadName("__tsan_read" + ByteSizeStr);
227 TsanRead[i] = M.getOrInsertFunction(ReadName, Attr, IRB.getVoidTy(),
228 IRB.getPtrTy());
229
230 SmallString<32> WriteName("__tsan_write" + ByteSizeStr);
231 TsanWrite[i] = M.getOrInsertFunction(WriteName, Attr, IRB.getVoidTy(),
232 IRB.getPtrTy());
233
234 SmallString<64> UnalignedReadName("__tsan_unaligned_read" + ByteSizeStr);
235 TsanUnalignedRead[i] = M.getOrInsertFunction(
236 UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
237
238 SmallString<64> UnalignedWriteName("__tsan_unaligned_write" + ByteSizeStr);
239 TsanUnalignedWrite[i] = M.getOrInsertFunction(
240 UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
241
242 SmallString<64> VolatileReadName("__tsan_volatile_read" + ByteSizeStr);
243 TsanVolatileRead[i] = M.getOrInsertFunction(
244 VolatileReadName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
245
246 SmallString<64> VolatileWriteName("__tsan_volatile_write" + ByteSizeStr);
247 TsanVolatileWrite[i] = M.getOrInsertFunction(
248 VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
249
250 SmallString<64> UnalignedVolatileReadName("__tsan_unaligned_volatile_read" +
251 ByteSizeStr);
252 TsanUnalignedVolatileRead[i] = M.getOrInsertFunction(
253 UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
254
255 SmallString<64> UnalignedVolatileWriteName(
256 "__tsan_unaligned_volatile_write" + ByteSizeStr);
257 TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction(
258 UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
259
260 SmallString<64> CompoundRWName("__tsan_read_write" + ByteSizeStr);
261 TsanCompoundRW[i] = M.getOrInsertFunction(
262 CompoundRWName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
263
264 SmallString<64> UnalignedCompoundRWName("__tsan_unaligned_read_write" +
265 ByteSizeStr);
266 TsanUnalignedCompoundRW[i] = M.getOrInsertFunction(
267 UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
268
269 Type *Ty = Type::getIntNTy(Ctx, BitSize);
270 Type *PtrTy = PointerType::get(Ctx, 0);
271 SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load");
272 TsanAtomicLoad[i] =
273 M.getOrInsertFunction(AtomicLoadName,
274 TLI.getAttrList(&Ctx, {1}, /*Signed=*/true,
275 /*Ret=*/BitSize <= 32, Attr),
276 Ty, PtrTy, OrdTy);
277
278 // Args of type Ty need extension only when BitSize is 32 or less.
279 using Idxs = std::vector<unsigned>;
280 Idxs Idxs2Or12 ((BitSize <= 32) ? Idxs({1, 2}) : Idxs({2}));
281 Idxs Idxs34Or1234((BitSize <= 32) ? Idxs({1, 2, 3, 4}) : Idxs({3, 4}));
282 SmallString<32> AtomicStoreName("__tsan_atomic" + BitSizeStr + "_store");
283 TsanAtomicStore[i] = M.getOrInsertFunction(
284 AtomicStoreName,
285 TLI.getAttrList(&Ctx, Idxs2Or12, /*Signed=*/true, /*Ret=*/false, Attr),
286 IRB.getVoidTy(), PtrTy, Ty, OrdTy);
287
288 for (unsigned Op = AtomicRMWInst::FIRST_BINOP;
290 TsanAtomicRMW[Op][i] = nullptr;
291 const char *NamePart = nullptr;
292 if (Op == AtomicRMWInst::Xchg)
293 NamePart = "_exchange";
294 else if (Op == AtomicRMWInst::Add)
295 NamePart = "_fetch_add";
296 else if (Op == AtomicRMWInst::Sub)
297 NamePart = "_fetch_sub";
298 else if (Op == AtomicRMWInst::And)
299 NamePart = "_fetch_and";
300 else if (Op == AtomicRMWInst::Or)
301 NamePart = "_fetch_or";
302 else if (Op == AtomicRMWInst::Xor)
303 NamePart = "_fetch_xor";
304 else if (Op == AtomicRMWInst::Nand)
305 NamePart = "_fetch_nand";
306 else
307 continue;
308 SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
309 TsanAtomicRMW[Op][i] = M.getOrInsertFunction(
310 RMWName,
311 TLI.getAttrList(&Ctx, Idxs2Or12, /*Signed=*/true,
312 /*Ret=*/BitSize <= 32, Attr),
313 Ty, PtrTy, Ty, OrdTy);
314 }
315
316 SmallString<32> AtomicCASName("__tsan_atomic" + BitSizeStr +
317 "_compare_exchange_val");
318 TsanAtomicCAS[i] = M.getOrInsertFunction(
319 AtomicCASName,
320 TLI.getAttrList(&Ctx, Idxs34Or1234, /*Signed=*/true,
321 /*Ret=*/BitSize <= 32, Attr),
322 Ty, PtrTy, Ty, Ty, OrdTy, OrdTy);
323 }
324 TsanVptrUpdate =
325 M.getOrInsertFunction("__tsan_vptr_update", Attr, IRB.getVoidTy(),
326 IRB.getPtrTy(), IRB.getPtrTy());
327 TsanVptrLoad = M.getOrInsertFunction("__tsan_vptr_read", Attr,
328 IRB.getVoidTy(), IRB.getPtrTy());
329 TsanAtomicThreadFence = M.getOrInsertFunction(
330 "__tsan_atomic_thread_fence",
331 TLI.getAttrList(&Ctx, {0}, /*Signed=*/true, /*Ret=*/false, Attr),
332 IRB.getVoidTy(), OrdTy);
333
334 TsanAtomicSignalFence = M.getOrInsertFunction(
335 "__tsan_atomic_signal_fence",
336 TLI.getAttrList(&Ctx, {0}, /*Signed=*/true, /*Ret=*/false, Attr),
337 IRB.getVoidTy(), OrdTy);
338
339 MemmoveFn =
340 M.getOrInsertFunction("__tsan_memmove", Attr, IRB.getPtrTy(),
341 IRB.getPtrTy(), IRB.getPtrTy(), IntptrTy);
342 MemcpyFn =
343 M.getOrInsertFunction("__tsan_memcpy", Attr, IRB.getPtrTy(),
344 IRB.getPtrTy(), IRB.getPtrTy(), IntptrTy);
345 MemsetFn = M.getOrInsertFunction(
346 "__tsan_memset",
347 TLI.getAttrList(&Ctx, {1}, /*Signed=*/true, /*Ret=*/false, Attr),
348 IRB.getPtrTy(), IRB.getPtrTy(), IRB.getInt32Ty(), IntptrTy);
349}
350
352 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
353 return Tag->isTBAAVtableAccess();
354 return false;
355}
356
357// Do not instrument known races/"benign races" that come from compiler
358// instrumentatin. The user has no way of suppressing them.
360 // Peel off GEPs and BitCasts.
361 Addr = Addr->stripInBoundsOffsets();
362
363 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
364 if (GV->hasSection()) {
365 StringRef SectionName = GV->getSection();
366 // Check if the global is in the PGO counters section.
367 auto OF = M->getTargetTriple().getObjectFormat();
368 if (SectionName.ends_with(
369 getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
370 return false;
371 }
372 }
373
374 // Do not instrument accesses from different address spaces; we cannot deal
375 // with them.
376 if (Addr) {
377 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
378 if (PtrTy->getPointerAddressSpace() != 0)
379 return false;
380 }
381
382 return true;
383}
384
385bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
386 // If this is a GEP, just analyze its pointer operand.
387 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
388 Addr = GEP->getPointerOperand();
389
390 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
391 if (GV->isConstant()) {
392 // Reads from constant globals can not race with any writes.
393 NumOmittedReadsFromConstantGlobals++;
394 return true;
395 }
396 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
397 if (isVtableAccess(L)) {
398 // Reads from a vtable pointer can not race with any writes.
399 NumOmittedReadsFromVtable++;
400 return true;
401 }
402 }
403 return false;
404}
405
406// Instrumenting some of the accesses may be proven redundant.
407// Currently handled:
408// - read-before-write (within same BB, no calls between)
409// - not captured variables
410//
411// We do not handle some of the patterns that should not survive
412// after the classic compiler optimizations.
413// E.g. two reads from the same temp should be eliminated by CSE,
414// two writes should be eliminated by DSE, etc.
415//
416// 'Local' is a vector of insns within the same BB (no calls between).
417// 'All' is a vector of insns that will be instrumented.
418void ThreadSanitizer::chooseInstructionsToInstrument(
421 DenseMap<Value *, size_t> WriteTargets; // Map of addresses to index in All
422 // Iterate from the end.
423 for (Instruction *I : reverse(Local)) {
424 const bool IsWrite = isa<StoreInst>(*I);
425 Value *Addr = IsWrite ? cast<StoreInst>(I)->getPointerOperand()
426 : cast<LoadInst>(I)->getPointerOperand();
427
428 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
429 continue;
430
431 if (!IsWrite) {
432 const auto WriteEntry = WriteTargets.find(Addr);
433 if (!ClInstrumentReadBeforeWrite && WriteEntry != WriteTargets.end()) {
434 auto &WI = All[WriteEntry->second];
435 // If we distinguish volatile accesses and if either the read or write
436 // is volatile, do not omit any instrumentation.
437 const bool AnyVolatile =
438 ClDistinguishVolatile && (cast<LoadInst>(I)->isVolatile() ||
439 cast<StoreInst>(WI.Inst)->isVolatile());
440 if (!AnyVolatile) {
441 // We will write to this temp, so no reason to analyze the read.
442 // Mark the write instruction as compound.
443 WI.Flags |= InstructionInfo::kCompoundRW;
444 NumOmittedReadsBeforeWrite++;
445 continue;
446 }
447 }
448
449 if (addrPointsToConstantData(Addr)) {
450 // Addr points to some constant data -- it can not race with any writes.
451 continue;
452 }
453 }
454
456 // Instead of Addr, we should check whether its base pointer is captured.
457 if (AI && !PointerMayBeCaptured(AI, /*ReturnCaptures=*/true) &&
459 // The variable is addressable but not captured, so it cannot be
460 // referenced from a different thread and participate in a data race
461 // (see llvm/Analysis/CaptureTracking.h for details).
462 NumOmittedNonCaptured++;
463 continue;
464 }
465
466 // Instrument this instruction.
467 All.emplace_back(I);
468 if (IsWrite) {
469 // For read-before-write and compound instrumentation we only need one
470 // write target, and we can override any previous entry if it exists.
471 WriteTargets[Addr] = All.size() - 1;
472 }
473 }
474 Local.clear();
475}
476
477static bool isTsanAtomic(const Instruction *I) {
478 // TODO: Ask TTI whether synchronization scope is between threads.
479 auto SSID = getAtomicSyncScopeID(I);
480 if (!SSID)
481 return false;
482 if (isa<LoadInst>(I) || isa<StoreInst>(I))
483 return *SSID != SyncScope::SingleThread;
484 return true;
485}
486
487void ThreadSanitizer::InsertRuntimeIgnores(Function &F) {
488 InstrumentationIRBuilder IRB(&F.getEntryBlock(),
489 F.getEntryBlock().getFirstNonPHIIt());
490 IRB.CreateCall(TsanIgnoreBegin);
491 EscapeEnumerator EE(F, "tsan_ignore_cleanup", ClHandleCxxExceptions);
492 while (IRBuilder<> *AtExit = EE.Next()) {
494 AtExit->CreateCall(TsanIgnoreEnd);
495 }
496}
497
498bool ThreadSanitizer::sanitizeFunction(Function &F,
499 const TargetLibraryInfo &TLI) {
500 // This is required to prevent instrumenting call to __tsan_init from within
501 // the module constructor.
502 if (F.getName() == kTsanModuleCtorName)
503 return false;
504 // Naked functions can not have prologue/epilogue
505 // (__tsan_func_entry/__tsan_func_exit) generated, so don't instrument them at
506 // all.
507 if (F.hasFnAttribute(Attribute::Naked))
508 return false;
509
510 // __attribute__(disable_sanitizer_instrumentation) prevents all kinds of
511 // instrumentation.
512 if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
513 return false;
514
515 initialize(*F.getParent(), TLI);
516 SmallVector<InstructionInfo, 8> AllLoadsAndStores;
517 SmallVector<Instruction*, 8> LocalLoadsAndStores;
518 SmallVector<Instruction*, 8> AtomicAccesses;
519 SmallVector<Instruction*, 8> MemIntrinCalls;
520 bool Res = false;
521 bool HasCalls = false;
522 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
523 const DataLayout &DL = F.getDataLayout();
524
525 // Traverse all instructions, collect loads/stores/returns, check for calls.
526 for (auto &BB : F) {
527 for (auto &Inst : BB) {
528 // Skip instructions inserted by another instrumentation.
529 if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
530 continue;
531 if (isTsanAtomic(&Inst))
532 AtomicAccesses.push_back(&Inst);
533 else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
534 LocalLoadsAndStores.push_back(&Inst);
535 else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
536 if (CallInst *CI = dyn_cast<CallInst>(&Inst))
538 if (isa<MemIntrinsic>(Inst))
539 MemIntrinCalls.push_back(&Inst);
540 HasCalls = true;
541 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
542 DL);
543 }
544 }
545 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
546 }
547
548 // We have collected all loads and stores.
549 // FIXME: many of these accesses do not need to be checked for races
550 // (e.g. variables that do not escape, etc).
551
552 // Instrument memory accesses only if we want to report bugs in the function.
553 if (ClInstrumentMemoryAccesses && SanitizeFunction)
554 for (const auto &II : AllLoadsAndStores) {
555 Res |= instrumentLoadOrStore(II, DL);
556 }
557
558 // Instrument atomic memory accesses in any case (they can be used to
559 // implement synchronization).
561 for (auto *Inst : AtomicAccesses) {
562 Res |= instrumentAtomic(Inst, DL);
563 }
564
565 if (ClInstrumentMemIntrinsics && SanitizeFunction)
566 for (auto *Inst : MemIntrinCalls) {
567 Res |= instrumentMemIntrinsic(Inst);
568 }
569
570 if (F.hasFnAttribute("sanitize_thread_no_checking_at_run_time")) {
571 assert(!F.hasFnAttribute(Attribute::SanitizeThread));
572 if (HasCalls)
573 InsertRuntimeIgnores(F);
574 }
575
576 // Instrument function entry/exit points if there were instrumented accesses.
577 if ((Res || HasCalls) && ClInstrumentFuncEntryExit) {
578 InstrumentationIRBuilder IRB(&F.getEntryBlock(),
579 F.getEntryBlock().getFirstNonPHIIt());
580 Value *ReturnAddress =
581 IRB.CreateIntrinsic(Intrinsic::returnaddress, IRB.getInt32(0));
582 IRB.CreateCall(TsanFuncEntry, ReturnAddress);
583
584 EscapeEnumerator EE(F, "tsan_cleanup", ClHandleCxxExceptions);
585 while (IRBuilder<> *AtExit = EE.Next()) {
587 AtExit->CreateCall(TsanFuncExit, {});
588 }
589 Res = true;
590 }
591 return Res;
592}
593
594bool ThreadSanitizer::instrumentLoadOrStore(const InstructionInfo &II,
595 const DataLayout &DL) {
597 const bool IsWrite = isa<StoreInst>(*II.Inst);
598 Value *Addr = IsWrite ? cast<StoreInst>(II.Inst)->getPointerOperand()
599 : cast<LoadInst>(II.Inst)->getPointerOperand();
600 Type *OrigTy = getLoadStoreType(II.Inst);
601
602 // swifterror memory addresses are mem2reg promoted by instruction selection.
603 // As such they cannot have regular uses like an instrumentation function and
604 // it makes no sense to track them as memory.
605 if (Addr->isSwiftError())
606 return false;
607
608 int Idx = getMemoryAccessFuncIndex(OrigTy, Addr, DL);
609 if (Idx < 0)
610 return false;
611 if (IsWrite && isVtableAccess(II.Inst)) {
612 LLVM_DEBUG(dbgs() << " VPTR : " << *II.Inst << "\n");
613 Value *StoredValue = cast<StoreInst>(II.Inst)->getValueOperand();
614 // StoredValue may be a vector type if we are storing several vptrs at once.
615 // In this case, just take the first element of the vector since this is
616 // enough to find vptr races.
617 if (isa<VectorType>(StoredValue->getType()))
618 StoredValue = IRB.CreateExtractElement(
619 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
620 if (StoredValue->getType()->isIntegerTy())
621 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getPtrTy());
622 // Call TsanVptrUpdate.
623 IRB.CreateCall(TsanVptrUpdate, {Addr, StoredValue});
624 NumInstrumentedVtableWrites++;
625 return true;
626 }
627 if (!IsWrite && isVtableAccess(II.Inst)) {
628 IRB.CreateCall(TsanVptrLoad, Addr);
629 NumInstrumentedVtableReads++;
630 return true;
631 }
632
633 const Align Alignment = IsWrite ? cast<StoreInst>(II.Inst)->getAlign()
634 : cast<LoadInst>(II.Inst)->getAlign();
635 const bool IsCompoundRW =
636 ClCompoundReadBeforeWrite && (II.Flags & InstructionInfo::kCompoundRW);
637 const bool IsVolatile = ClDistinguishVolatile &&
638 (IsWrite ? cast<StoreInst>(II.Inst)->isVolatile()
639 : cast<LoadInst>(II.Inst)->isVolatile());
640 assert((!IsVolatile || !IsCompoundRW) && "Compound volatile invalid!");
641
642 const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
643 FunctionCallee OnAccessFunc = nullptr;
644 if (Alignment >= Align(8) || (Alignment.value() % (TypeSize / 8)) == 0) {
645 if (IsCompoundRW)
646 OnAccessFunc = TsanCompoundRW[Idx];
647 else if (IsVolatile)
648 OnAccessFunc = IsWrite ? TsanVolatileWrite[Idx] : TsanVolatileRead[Idx];
649 else
650 OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
651 } else {
652 if (IsCompoundRW)
653 OnAccessFunc = TsanUnalignedCompoundRW[Idx];
654 else if (IsVolatile)
655 OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[Idx]
656 : TsanUnalignedVolatileRead[Idx];
657 else
658 OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
659 }
660 IRB.CreateCall(OnAccessFunc, Addr);
661 if (IsCompoundRW || IsWrite)
662 NumInstrumentedWrites++;
663 if (IsCompoundRW || !IsWrite)
664 NumInstrumentedReads++;
665 return true;
666}
667
669 uint32_t v = 0;
670 switch (ord) {
672 llvm_unreachable("unexpected atomic ordering!");
673 case AtomicOrdering::Unordered: [[fallthrough]];
674 case AtomicOrdering::Monotonic: v = 0; break;
675 // Not specified yet:
676 // case AtomicOrdering::Consume: v = 1; break;
677 case AtomicOrdering::Acquire: v = 2; break;
678 case AtomicOrdering::Release: v = 3; break;
679 case AtomicOrdering::AcquireRelease: v = 4; break;
681 }
682 return IRB->getInt32(v);
683}
684
685// If a memset intrinsic gets inlined by the code gen, we will miss races on it.
686// So, we either need to ensure the intrinsic is not inlined, or instrument it.
687// We do not instrument memset/memmove/memcpy intrinsics (too complicated),
688// instead we simply replace them with regular function calls, which are then
689// intercepted by the run-time.
690// Since tsan is running after everyone else, the calls should not be
691// replaced back with intrinsics. If that becomes wrong at some point,
692// we will need to call e.g. __tsan_memset to avoid the intrinsics.
693bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
695 if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
696 Value *Cast1 = IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false);
697 Value *Cast2 = IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false);
698 IRB.CreateCall(
699 MemsetFn,
700 {M->getArgOperand(0),
701 Cast1,
702 Cast2});
703 I->eraseFromParent();
704 } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
705 IRB.CreateCall(
706 isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
707 {M->getArgOperand(0),
708 M->getArgOperand(1),
709 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
710 I->eraseFromParent();
711 }
712 return false;
713}
714
715// Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
716// standards. For background see C++11 standard. A slightly older, publicly
717// available draft of the standard (not entirely up-to-date, but close enough
718// for casual browsing) is available here:
719// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
720// The following page contains more background information:
721// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
722
723bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
725 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
726 Value *Addr = LI->getPointerOperand();
727 Type *OrigTy = LI->getType();
728 int Idx = getMemoryAccessFuncIndex(OrigTy, Addr, DL);
729 if (Idx < 0)
730 return false;
731 Value *Args[] = {Addr,
732 createOrdering(&IRB, LI->getOrdering())};
733 Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args);
734 Value *Cast = IRB.CreateBitOrPointerCast(C, OrigTy);
735 I->replaceAllUsesWith(Cast);
736 I->eraseFromParent();
737 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
738 Value *Addr = SI->getPointerOperand();
739 int Idx =
740 getMemoryAccessFuncIndex(SI->getValueOperand()->getType(), Addr, DL);
741 if (Idx < 0)
742 return false;
743 const unsigned ByteSize = 1U << Idx;
744 const unsigned BitSize = ByteSize * 8;
745 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
746 Value *Args[] = {Addr,
747 IRB.CreateBitOrPointerCast(SI->getValueOperand(), Ty),
748 createOrdering(&IRB, SI->getOrdering())};
749 IRB.CreateCall(TsanAtomicStore[Idx], Args);
750 SI->eraseFromParent();
751 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
752 Value *Addr = RMWI->getPointerOperand();
753 int Idx =
754 getMemoryAccessFuncIndex(RMWI->getValOperand()->getType(), Addr, DL);
755 if (Idx < 0)
756 return false;
757 FunctionCallee F = TsanAtomicRMW[RMWI->getOperation()][Idx];
758 if (!F)
759 return false;
760 const unsigned ByteSize = 1U << Idx;
761 const unsigned BitSize = ByteSize * 8;
762 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
763 Value *Val = RMWI->getValOperand();
764 Value *Args[] = {Addr, IRB.CreateBitOrPointerCast(Val, Ty),
765 createOrdering(&IRB, RMWI->getOrdering())};
766 Value *C = IRB.CreateCall(F, Args);
767 I->replaceAllUsesWith(IRB.CreateBitOrPointerCast(C, Val->getType()));
768 I->eraseFromParent();
769 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
770 Value *Addr = CASI->getPointerOperand();
771 Type *OrigOldValTy = CASI->getNewValOperand()->getType();
772 int Idx = getMemoryAccessFuncIndex(OrigOldValTy, Addr, DL);
773 if (Idx < 0)
774 return false;
775 const unsigned ByteSize = 1U << Idx;
776 const unsigned BitSize = ByteSize * 8;
777 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
778 Value *CmpOperand =
779 IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
780 Value *NewOperand =
781 IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
782 Value *Args[] = {Addr,
783 CmpOperand,
784 NewOperand,
785 createOrdering(&IRB, CASI->getSuccessOrdering()),
786 createOrdering(&IRB, CASI->getFailureOrdering())};
787 CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args);
788 Value *Success = IRB.CreateICmpEQ(C, CmpOperand);
789 Value *OldVal = C;
790 if (Ty != OrigOldValTy) {
791 // The value is a pointer, so we need to cast the return value.
792 OldVal = IRB.CreateIntToPtr(C, OrigOldValTy);
793 }
794
795 Value *Res =
796 IRB.CreateInsertValue(PoisonValue::get(CASI->getType()), OldVal, 0);
797 Res = IRB.CreateInsertValue(Res, Success, 1);
798
799 I->replaceAllUsesWith(Res);
800 I->eraseFromParent();
801 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
802 Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
803 FunctionCallee F = FI->getSyncScopeID() == SyncScope::SingleThread
804 ? TsanAtomicSignalFence
805 : TsanAtomicThreadFence;
806 IRB.CreateCall(F, Args);
807 FI->eraseFromParent();
808 }
809 return true;
810}
811
812int ThreadSanitizer::getMemoryAccessFuncIndex(Type *OrigTy, Value *Addr,
813 const DataLayout &DL) {
814 assert(OrigTy->isSized());
815 if (OrigTy->isScalableTy()) {
816 // FIXME: support vscale.
817 return -1;
818 }
819 uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
820 if (TypeSize != 8 && TypeSize != 16 &&
821 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
822 NumAccessesWithBadSize++;
823 // Ignore all unusual sizes.
824 return -1;
825 }
826 size_t Idx = llvm::countr_zero(TypeSize / 8);
828 return Idx;
829}
@ HasCalls
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
uint64_t Addr
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
Hexagon Common GEP
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition: Debug.h:119
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, ArrayRef< StringLiteral > StandardNames)
Initialize the set of available library functions based on the specified target triple.
static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr)
static bool isVtableAccess(Instruction *I)
static bool isTsanAtomic(const Instruction *I)
const char kTsanModuleCtorName[]
static cl::opt< bool > ClInstrumentFuncEntryExit("tsan-instrument-func-entry-exit", cl::init(true), cl::desc("Instrument function entry and exit"), cl::Hidden)
static ConstantInt * createOrdering(IRBuilder<> *IRB, AtomicOrdering ord)
static cl::opt< bool > ClInstrumentMemIntrinsics("tsan-instrument-memintrinsics", cl::init(true), cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden)
const char kTsanInitName[]
static cl::opt< bool > ClDistinguishVolatile("tsan-distinguish-volatile", cl::init(false), cl::desc("Emit special instrumentation for accesses to volatiles"), cl::Hidden)
static cl::opt< bool > ClOmitNonCaptured("tsan-omit-by-pointer-capturing", cl::init(true), cl::desc("Omit accesses due to pointer capturing"), cl::Hidden)
static cl::opt< bool > ClCompoundReadBeforeWrite("tsan-compound-read-before-write", cl::init(false), cl::desc("Emit special compound instrumentation for reads-before-writes"), cl::Hidden)
static cl::opt< bool > ClInstrumentAtomics("tsan-instrument-atomics", cl::init(true), cl::desc("Instrument atomics"), cl::Hidden)
static cl::opt< bool > ClHandleCxxExceptions("tsan-handle-cxx-exceptions", cl::init(true), cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"), cl::Hidden)
static cl::opt< bool > ClInstrumentReadBeforeWrite("tsan-instrument-read-before-write", cl::init(false), cl::desc("Do not eliminate read instrumentation for read-before-writes"), cl::Hidden)
static cl::opt< bool > ClInstrumentMemoryAccesses("tsan-instrument-memory-accesses", cl::init(true), cl::desc("Instrument memory accesses"), cl::Hidden)
an instruction to allocate memory on the stack
Definition: Instructions.h:64
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:255
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:412
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:506
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:709
@ Add
*p = old + v
Definition: Instructions.h:725
@ Or
*p = old | v
Definition: Instructions.h:733
@ Sub
*p = old - v
Definition: Instructions.h:727
@ And
*p = old & v
Definition: Instructions.h:729
@ Xor
*p = old ^ v
Definition: Instructions.h:735
@ Nand
*p = ~(old & v)
Definition: Instructions.h:731
AttributeList addFnAttribute(LLVMContext &C, Attribute::AttrKind Kind) const
Add a function attribute to the list.
Definition: Attributes.h:597
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:165
iterator end()
Definition: DenseMap.h:81
EscapeEnumerator - This is a little algorithm to find all escape points from a function so that "fina...
An instruction for ordering other memory operations.
Definition: Instructions.h:429
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:949
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:522
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2780
Class to represent integer types.
Definition: DerivedTypes.h:42
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
An instruction for reading from memory.
Definition: Instructions.h:180
Metadata node.
Definition: Metadata.h:1077
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1885
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:118
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
An instruction for storing to memory.
Definition: Instructions.h:296
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:311
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:240
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition: LLVMContext.h:55
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
LLVM_ABI std::string getInstrProfSectionName(InstrProfSectKind IPSK, Triple::ObjectFormatType OF, bool AddSegmentInfo=true)
Return the name of the profile section corresponding to IPSK.
Definition: InstrProf.cpp:238
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:157
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
LLVM_ABI std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
std::optional< SyncScope::ID > getAtomicSyncScopeID(const Instruction *I)
A helper function that returns an atomic operation's sync scope; returns std::nullopt if it is not an...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
@ Success
The lock was released successfully.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
DWARFExpression::Operation Op
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:74
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:3829
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
static void ensureDebugInfo(IRBuilder<> &IRB, const Function &F)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)