184#include "llvm/IR/IntrinsicsAArch64.h"
185#include "llvm/IR/IntrinsicsX86.h"
215#define DEBUG_TYPE "msan"
218 "Controls which checks to insert");
221 "Controls which instruction to instrument");
239 "msan-track-origins",
244 cl::desc(
"keep going after reporting a UMR"),
253 "msan-poison-stack-with-call",
258 "msan-poison-stack-pattern",
259 cl::desc(
"poison uninitialized stack variables with the given pattern"),
264 cl::desc(
"Print name of local stack variable"),
273 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
278 cl::desc(
"exact handling of relational integer ICmp"),
282 "msan-handle-lifetime-intrinsics",
284 "when possible, poison scoped variables at the beginning of the scope "
285 "(slower, but more precise)"),
296 "msan-handle-asm-conservative",
307 "msan-check-access-address",
308 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
313 cl::desc(
"check arguments and return values at function call boundaries"),
317 "msan-dump-strict-instructions",
318 cl::desc(
"print out instructions with default strict semantics"),
322 "msan-dump-strict-intrinsics",
323 cl::desc(
"Prints 'unknown' intrinsics that were handled heuristically. "
324 "Use -msan-dump-strict-instructions to print intrinsics that "
325 "could not be handled exactly nor heuristically."),
329 "msan-instrumentation-with-call-threshold",
331 "If the function being instrumented requires more than "
332 "this number of checks and origin stores, use callbacks instead of "
333 "inline checks (-1 means never use callbacks)."),
338 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
348 cl::desc(
"Insert checks for constant shadow values"),
355 cl::desc(
"Place MSan constructors in comdat sections"),
361 cl::desc(
"Define custom MSan AndMask"),
365 cl::desc(
"Define custom MSan XorMask"),
369 cl::desc(
"Define custom MSan ShadowBase"),
373 cl::desc(
"Define custom MSan OriginBase"),
378 cl::desc(
"Define threshold for number of checks per "
379 "debug location to force origin update."),
391struct MemoryMapParams {
398struct PlatformMemoryMapParams {
399 const MemoryMapParams *bits32;
400 const MemoryMapParams *bits64;
562class MemorySanitizer {
571 MemorySanitizer(MemorySanitizer &&) =
delete;
572 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
573 MemorySanitizer(
const MemorySanitizer &) =
delete;
574 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
579 friend struct MemorySanitizerVisitor;
580 friend struct VarArgHelperBase;
581 friend struct VarArgAMD64Helper;
582 friend struct VarArgAArch64Helper;
583 friend struct VarArgPowerPCHelper;
584 friend struct VarArgSystemZHelper;
585 friend struct VarArgI386Helper;
586 friend struct VarArgGenericHelper;
588 void initializeModule(
Module &M);
593 template <
typename... ArgsTy>
620 Value *ParamOriginTLS;
626 Value *RetvalOriginTLS;
632 Value *VAArgOriginTLS;
635 Value *VAArgOverflowSizeTLS;
638 bool CallbacksInitialized =
false;
683 Value *MsanMetadataAlloca;
689 const MemoryMapParams *MapParams;
693 MemoryMapParams CustomMapParams;
698 MDNode *OriginStoreWeights;
701void insertModuleCtor(
Module &M) {
729 Recover(getOptOrDefault(
ClKeepGoing, Kernel || R)),
747 MemorySanitizer Msan(*
F.getParent(),
Options);
766 OS, MapClassName2PassName);
773 OS <<
"eager-checks;";
774 OS <<
"track-origins=" <<
Options.TrackOrigins;
790template <
typename... ArgsTy>
797 std::forward<ArgsTy>(Args)...);
800 return M.getOrInsertFunction(
Name, MsanMetadata,
801 std::forward<ArgsTy>(Args)...);
810 RetvalOriginTLS =
nullptr;
812 ParamOriginTLS =
nullptr;
814 VAArgOriginTLS =
nullptr;
815 VAArgOverflowSizeTLS =
nullptr;
817 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
819 IRB.getVoidTy(), IRB.getInt32Ty());
830 MsanGetContextStateFn =
831 M.getOrInsertFunction(
"__msan_get_context_state", PtrTy);
835 for (
int ind = 0, size = 1; ind < 4; ind++,
size <<= 1) {
836 std::string name_load =
837 "__msan_metadata_ptr_for_load_" + std::to_string(size);
838 std::string name_store =
839 "__msan_metadata_ptr_for_store_" + std::to_string(size);
840 MsanMetadataPtrForLoad_1_8[ind] =
841 getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
842 MsanMetadataPtrForStore_1_8[ind] =
843 getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
846 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
847 M,
"__msan_metadata_ptr_for_load_n", PtrTy, IRB.getInt64Ty());
848 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
849 M,
"__msan_metadata_ptr_for_store_n", PtrTy, IRB.getInt64Ty());
852 MsanPoisonAllocaFn =
M.getOrInsertFunction(
853 "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
854 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
855 "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
859 return M.getOrInsertGlobal(
Name, Ty, [&] {
861 nullptr,
Name,
nullptr,
867void MemorySanitizer::createUserspaceApi(
Module &M,
875 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
876 :
"__msan_warning_with_origin_noreturn";
877 WarningFn =
M.getOrInsertFunction(WarningFnName,
879 IRB.getVoidTy(), IRB.getInt32Ty());
882 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
883 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
909 VAArgOverflowSizeTLS =
914 unsigned AccessSize = 1 << AccessSizeIndex;
915 std::string FunctionName =
"__msan_maybe_warning_" + itostr(AccessSize);
916 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
918 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
920 FunctionName =
"__msan_maybe_store_origin_" + itostr(AccessSize);
921 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
923 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
927 MsanSetAllocaOriginWithDescriptionFn =
928 M.getOrInsertFunction(
"__msan_set_alloca_origin_with_descr",
929 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
930 MsanSetAllocaOriginNoDescriptionFn =
931 M.getOrInsertFunction(
"__msan_set_alloca_origin_no_descr",
932 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
933 MsanPoisonStackFn =
M.getOrInsertFunction(
"__msan_poison_stack",
934 IRB.getVoidTy(), PtrTy, IntptrTy);
938void MemorySanitizer::initializeCallbacks(
Module &M,
941 if (CallbacksInitialized)
947 MsanChainOriginFn =
M.getOrInsertFunction(
948 "__msan_chain_origin",
951 MsanSetOriginFn =
M.getOrInsertFunction(
953 IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
955 M.getOrInsertFunction(
"__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
957 M.getOrInsertFunction(
"__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
958 MemsetFn =
M.getOrInsertFunction(
"__msan_memset",
960 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
962 MsanInstrumentAsmStoreFn =
M.getOrInsertFunction(
963 "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
966 createKernelApi(M, TLI);
968 createUserspaceApi(M, TLI);
970 CallbacksInitialized =
true;
976 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
994void MemorySanitizer::initializeModule(
Module &M) {
995 auto &
DL =
M.getDataLayout();
997 TargetTriple =
Triple(
M.getTargetTriple());
999 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
1000 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
1002 if (ShadowPassed || OriginPassed) {
1007 MapParams = &CustomMapParams;
1009 switch (TargetTriple.getOS()) {
1011 switch (TargetTriple.getArch()) {
1026 switch (TargetTriple.getArch()) {
1035 switch (TargetTriple.getArch()) {
1069 C = &(
M.getContext());
1071 IntptrTy = IRB.getIntPtrTy(
DL);
1072 OriginTy = IRB.getInt32Ty();
1073 PtrTy = IRB.getPtrTy();
1078 if (!CompileKernel) {
1080 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1081 return new GlobalVariable(
1082 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1083 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1087 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1088 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1089 GlobalValue::WeakODRLinkage,
1090 IRB.getInt32(Recover),
"__msan_keep_going");
1105struct VarArgHelper {
1106 virtual ~VarArgHelper() =
default;
1121 virtual void finalizeInstrumentation() = 0;
1124struct MemorySanitizerVisitor;
1129 MemorySanitizerVisitor &Visitor);
1136 if (TypeSizeFixed <= 8)
1145class NextNodeIRBuilder :
public IRBuilder<> {
1158struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1160 MemorySanitizer &MS;
1163 std::unique_ptr<VarArgHelper> VAHelper;
1171 bool PropagateShadow;
1175 struct ShadowOriginAndInsertPoint {
1181 : Shadow(S), Origin(
O), OrigIns(
I) {}
1189 int64_t SplittableBlocksCount = 0;
1191 MemorySanitizerVisitor(
Function &
F, MemorySanitizer &MS,
1194 bool SanitizeFunction =
1196 InsertChecks = SanitizeFunction;
1197 PropagateShadow = SanitizeFunction;
1207 MS.initializeCallbacks(*
F.getParent(), TLI);
1209 IRBuilder<>(&
F.getEntryBlock(),
F.getEntryBlock().getFirstNonPHIIt())
1212 if (MS.CompileKernel) {
1214 insertKmsanPrologue(IRB);
1218 <<
"MemorySanitizer is not inserting checks into '"
1219 <<
F.getName() <<
"'\n");
1222 bool instrumentWithCalls(
Value *V) {
1224 if (isa<Constant>(V))
1227 ++SplittableBlocksCount;
1233 return I.getParent() == FnPrologueEnd->
getParent() &&
1234 (&
I == FnPrologueEnd ||
I.comesBefore(FnPrologueEnd));
1242 if (MS.TrackOrigins <= 1)
1244 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1249 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1261 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1262 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1274 auto [InsertPt,
Index] =
1286 Align CurrentAlignment = Alignment;
1287 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1288 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1290 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1295 CurrentAlignment = IntptrAlignment;
1313 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1314 if (
auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1322 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1331 if (instrumentWithCalls(ConvertedShadow) &&
1334 Value *ConvertedShadow2 =
1340 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1344 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1349 void materializeStores() {
1352 Value *Val =
SI->getValueOperand();
1354 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1355 Value *ShadowPtr, *OriginPtr;
1357 const Align Alignment =
SI->getAlign();
1359 std::tie(ShadowPtr, OriginPtr) =
1360 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
true);
1369 if (MS.TrackOrigins && !
SI->isAtomic())
1370 storeOrigin(IRB,
Addr, Shadow, getOrigin(Val), OriginPtr,
1377 if (MS.TrackOrigins < 2)
1380 if (LazyWarningDebugLocationCount.
empty())
1381 for (
const auto &
I : InstrumentationList)
1382 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1396 if (
Instruction *OI = dyn_cast_or_null<Instruction>(Origin)) {
1398 auto NewDebugLoc = OI->getDebugLoc();
1405 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1406 Origin = updateOrigin(Origin, IRBOrigin);
1411 if (MS.CompileKernel || MS.TrackOrigins)
1425 if (instrumentWithCalls(ConvertedShadow) &&
1429 ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1430 Value *ConvertedShadow2 =
1433 Fn, {ConvertedShadow2,
1434 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1438 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1441 !MS.Recover, MS.ColdCallWeights);
1444 insertWarningFn(IRB, Origin);
1449 void materializeInstructionChecks(
1454 bool Combine = !MS.TrackOrigins;
1456 Value *Shadow =
nullptr;
1457 for (
const auto &ShadowData : InstructionChecks) {
1461 Value *ConvertedShadow = ShadowData.Shadow;
1463 if (
auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1470 insertWarningFn(IRB, ShadowData.Origin);
1480 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1485 Shadow = ConvertedShadow;
1489 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1490 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1491 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1497 materializeOneCheck(IRB, Shadow,
nullptr);
1501 void materializeChecks() {
1507 for (
auto I = InstrumentationList.begin();
1508 I != InstrumentationList.end();) {
1509 auto OrigIns =
I->OrigIns;
1513 auto J = std::find_if(
I + 1, InstrumentationList.end(),
1514 [OrigIns](
const ShadowOriginAndInsertPoint &R) {
1515 return OrigIns != R.OrigIns;
1529 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1530 {Zero, IRB.getInt32(0)},
"param_shadow");
1531 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1532 {Zero, IRB.getInt32(1)},
"retval_shadow");
1533 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1534 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1535 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1536 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1537 MS.VAArgOverflowSizeTLS =
1538 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1539 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1540 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1541 {Zero, IRB.getInt32(5)},
"param_origin");
1542 MS.RetvalOriginTLS =
1543 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1544 {Zero, IRB.getInt32(6)},
"retval_origin");
1546 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1563 for (
PHINode *PN : ShadowPHINodes) {
1564 PHINode *PNS = cast<PHINode>(getShadow(PN));
1565 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
1566 size_t NumValues = PN->getNumIncomingValues();
1567 for (
size_t v = 0;
v < NumValues;
v++) {
1568 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1570 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1574 VAHelper->finalizeInstrumentation();
1578 if (InstrumentLifetimeStart) {
1579 for (
auto Item : LifetimeStartList) {
1580 instrumentAlloca(*Item.second, Item.first);
1581 AllocaSet.
remove(Item.second);
1587 instrumentAlloca(*AI);
1590 materializeChecks();
1594 materializeStores();
1600 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1612 if (
VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
1613 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1615 VT->getElementCount());
1617 if (
ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
1618 return ArrayType::get(getShadowTy(AT->getElementType()),
1619 AT->getNumElements());
1621 if (
StructType *ST = dyn_cast<StructType>(OrigTy)) {
1623 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1624 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1626 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1642 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1644 if (Aggregator != FalseVal)
1645 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1647 Aggregator = ShadowBool;
1656 if (!
Array->getNumElements())
1660 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1664 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1665 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1675 return collapseStructShadow(
Struct, V, IRB);
1676 if (
ArrayType *Array = dyn_cast<ArrayType>(
V->getType()))
1677 return collapseArrayShadow(Array, V, IRB);
1678 if (isa<VectorType>(
V->getType())) {
1679 if (isa<ScalableVectorType>(
V->getType()))
1682 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1690 Type *VTy =
V->getType();
1692 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1699 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1700 if (
VectorType *VectTy = dyn_cast<VectorType>(PtrTy)) {
1701 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1702 VectTy->getElementCount());
1708 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1709 if (
VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1710 return VectorType::get(
1711 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1712 VectTy->getElementCount());
1714 assert(IntPtrTy == MS.IntptrTy);
1719 if (
VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1721 VectTy->getElementCount(),
1722 constToIntPtr(VectTy->getElementType(),
C));
1724 assert(IntPtrTy == MS.IntptrTy);
1725 return ConstantInt::get(MS.IntptrTy,
C);
1738 Type *IntptrTy = ptrToIntPtrType(
Addr->getType());
1741 if (
uint64_t AndMask = MS.MapParams->AndMask)
1742 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1744 if (
uint64_t XorMask = MS.MapParams->XorMask)
1745 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1757 std::pair<Value *, Value *>
1764 assert(VectTy->getElementType()->isPointerTy());
1766 Type *IntptrTy = ptrToIntPtrType(
Addr->getType());
1767 Value *ShadowOffset = getShadowPtrOffset(
Addr, IRB);
1768 Value *ShadowLong = ShadowOffset;
1769 if (
uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1771 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1774 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1776 Value *OriginPtr =
nullptr;
1777 if (MS.TrackOrigins) {
1778 Value *OriginLong = ShadowOffset;
1779 uint64_t OriginBase = MS.MapParams->OriginBase;
1780 if (OriginBase != 0)
1782 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1785 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1788 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1790 return std::make_pair(ShadowPtr, OriginPtr);
1793 template <
typename... ArgsTy>
1798 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1799 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1802 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1805 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *
Addr,
1809 Value *ShadowOriginPtrs;
1816 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1818 Value *SizeVal = ConstantInt::get(MS.IntptrTy,
Size);
1819 ShadowOriginPtrs = createMetadataCall(
1821 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1828 return std::make_pair(ShadowPtr, OriginPtr);
1834 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *
Addr,
1841 return getShadowOriginPtrKernelNoVec(
Addr, IRB, ShadowTy,
isStore);
1845 unsigned NumElements = cast<FixedVectorType>(VectTy)->getNumElements();
1846 Value *ShadowPtrs = ConstantInt::getNullValue(
1848 Value *OriginPtrs =
nullptr;
1849 if (MS.TrackOrigins)
1850 OriginPtrs = ConstantInt::getNullValue(
1852 for (
unsigned i = 0; i < NumElements; ++i) {
1855 auto [ShadowPtr, OriginPtr] =
1856 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1859 ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1860 if (MS.TrackOrigins)
1862 OriginPtrs, OriginPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1864 return {ShadowPtrs, OriginPtrs};
1871 if (MS.CompileKernel)
1872 return getShadowOriginPtrKernel(
Addr, IRB, ShadowTy,
isStore);
1873 return getShadowOriginPtrUserspace(
Addr, IRB, ShadowTy, Alignment);
1888 if (!MS.TrackOrigins)
1902 Value *getOriginPtrForRetval() {
1904 return MS.RetvalOriginTLS;
1909 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1910 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1915 if (!MS.TrackOrigins)
1917 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1918 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1919 OriginMap[
V] = Origin;
1923 Type *ShadowTy = getShadowTy(OrigTy);
1933 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
1938 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1940 if (
ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1942 getPoisonedShadow(AT->getElementType()));
1945 if (
StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1947 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1948 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
1956 Type *ShadowTy = getShadowTy(V);
1959 return getPoisonedShadow(ShadowTy);
1971 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
1972 return getCleanShadow(V);
1974 Value *Shadow = ShadowMap[
V];
1976 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
1978 assert(Shadow &&
"No shadow for a value");
1982 if (
UndefValue *U = dyn_cast<UndefValue>(V)) {
1983 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
1984 : getCleanShadow(V);
1989 if (
Argument *
A = dyn_cast<Argument>(V)) {
1991 Value *&ShadowPtr = ShadowMap[
V];
1996 unsigned ArgOffset = 0;
1998 for (
auto &FArg :
F->args()) {
1999 if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
2001 ?
"vscale not fully supported\n"
2002 :
"Arg is not sized\n"));
2004 ShadowPtr = getCleanShadow(V);
2005 setOrigin(
A, getCleanOrigin());
2011 unsigned Size = FArg.hasByValAttr()
2012 ?
DL.getTypeAllocSize(FArg.getParamByValType())
2013 :
DL.getTypeAllocSize(FArg.getType());
2017 if (FArg.hasByValAttr()) {
2021 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
2022 FArg.getParamAlign(), FArg.getParamByValType());
2023 Value *CpShadowPtr, *CpOriginPtr;
2024 std::tie(CpShadowPtr, CpOriginPtr) =
2025 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2027 if (!PropagateShadow || Overflow) {
2029 EntryIRB.CreateMemSet(
2033 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2035 Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign,
Base,
2040 if (MS.TrackOrigins) {
2041 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2045 EntryIRB.CreateMemCpy(
2054 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2055 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2056 ShadowPtr = getCleanShadow(V);
2057 setOrigin(
A, getCleanOrigin());
2060 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2061 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2063 if (MS.TrackOrigins) {
2064 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2065 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2069 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2075 assert(ShadowPtr &&
"Could not find shadow for an argument");
2079 return getCleanShadow(V);
2084 return getShadow(
I->getOperand(i));
2089 if (!MS.TrackOrigins)
2091 if (!PropagateShadow || isa<Constant>(V) || isa<InlineAsm>(V))
2092 return getCleanOrigin();
2093 assert((isa<Instruction>(V) || isa<Argument>(V)) &&
2094 "Unexpected value type in getOrigin()");
2096 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2097 return getCleanOrigin();
2099 Value *Origin = OriginMap[
V];
2100 assert(Origin &&
"Missing origin");
2106 return getOrigin(
I->getOperand(i));
2119 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2120 << *OrigIns <<
"\n");
2125 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy) ||
2126 isa<StructType>(ShadowTy) || isa<ArrayType>(ShadowTy)) &&
2127 "Can only insert checks for integer, vector, and aggregate shadow "
2130 InstrumentationList.push_back(
2131 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2140 Value *Shadow, *Origin;
2142 Shadow = getShadow(Val);
2145 Origin = getOrigin(Val);
2147 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
2150 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
2152 insertShadowCheck(Shadow, Origin, OrigIns);
2157 case AtomicOrdering::NotAtomic:
2158 return AtomicOrdering::NotAtomic;
2159 case AtomicOrdering::Unordered:
2160 case AtomicOrdering::Monotonic:
2161 case AtomicOrdering::Release:
2162 return AtomicOrdering::Release;
2163 case AtomicOrdering::Acquire:
2164 case AtomicOrdering::AcquireRelease:
2165 return AtomicOrdering::AcquireRelease;
2166 case AtomicOrdering::SequentiallyConsistent:
2167 return AtomicOrdering::SequentiallyConsistent;
2173 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2174 uint32_t OrderingTable[NumOrderings] = {};
2176 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2177 OrderingTable[(
int)AtomicOrderingCABI::release] =
2178 (int)AtomicOrderingCABI::release;
2179 OrderingTable[(int)AtomicOrderingCABI::consume] =
2180 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2181 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2182 (
int)AtomicOrderingCABI::acq_rel;
2183 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2184 (
int)AtomicOrderingCABI::seq_cst;
2191 case AtomicOrdering::NotAtomic:
2192 return AtomicOrdering::NotAtomic;
2193 case AtomicOrdering::Unordered:
2194 case AtomicOrdering::Monotonic:
2195 case AtomicOrdering::Acquire:
2196 return AtomicOrdering::Acquire;
2197 case AtomicOrdering::Release:
2198 case AtomicOrdering::AcquireRelease:
2199 return AtomicOrdering::AcquireRelease;
2200 case AtomicOrdering::SequentiallyConsistent:
2201 return AtomicOrdering::SequentiallyConsistent;
2207 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2208 uint32_t OrderingTable[NumOrderings] = {};
2210 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2211 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2212 OrderingTable[(int)AtomicOrderingCABI::consume] =
2213 (
int)AtomicOrderingCABI::acquire;
2214 OrderingTable[(int)AtomicOrderingCABI::release] =
2215 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2216 (int)AtomicOrderingCABI::acq_rel;
2217 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2218 (
int)AtomicOrderingCABI::seq_cst;
2226 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2229 if (isInPrologue(
I))
2234 setShadow(&
I, getCleanShadow(&
I));
2235 setOrigin(&
I, getCleanOrigin());
2247 assert(
I.getType()->isSized() &&
"Load type must have size");
2248 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2249 NextNodeIRBuilder IRB(&
I);
2250 Type *ShadowTy = getShadowTy(&
I);
2252 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2253 const Align Alignment =
I.getAlign();
2254 if (PropagateShadow) {
2255 std::tie(ShadowPtr, OriginPtr) =
2256 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
false);
2260 setShadow(&
I, getCleanShadow(&
I));
2264 insertShadowCheck(
I.getPointerOperand(), &
I);
2269 if (MS.TrackOrigins) {
2270 if (PropagateShadow) {
2275 setOrigin(&
I, getCleanOrigin());
2285 StoreList.push_back(&
I);
2287 insertShadowCheck(
I.getPointerOperand(), &
I);
2291 assert(isa<AtomicRMWInst>(
I) || isa<AtomicCmpXchgInst>(
I));
2295 Value *Val =
I.getOperand(1);
2296 Value *ShadowPtr = getShadowOriginPtr(
Addr, IRB, getShadowTy(Val),
Align(1),
2301 insertShadowCheck(
Addr, &
I);
2306 if (isa<AtomicCmpXchgInst>(
I))
2307 insertShadowCheck(Val, &
I);
2311 setShadow(&
I, getCleanShadow(&
I));
2312 setOrigin(&
I, getCleanOrigin());
2327 insertShadowCheck(
I.getOperand(1), &
I);
2331 setOrigin(&
I, getOrigin(&
I, 0));
2335 insertShadowCheck(
I.getOperand(2), &
I);
2337 auto *Shadow0 = getShadow(&
I, 0);
2338 auto *Shadow1 = getShadow(&
I, 1);
2341 setOriginForNaryOp(
I);
2346 auto *Shadow0 = getShadow(&
I, 0);
2347 auto *Shadow1 = getShadow(&
I, 1);
2350 setOriginForNaryOp(
I);
2356 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2357 setOrigin(&
I, getOrigin(&
I, 0));
2362 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2363 setOrigin(&
I, getOrigin(&
I, 0));
2368 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2369 setOrigin(&
I, getOrigin(&
I, 0));
2376 if (
auto *CI = dyn_cast<CallInst>(
I.getOperand(0)))
2377 if (CI->isMustTailCall())
2381 setOrigin(&
I, getOrigin(&
I, 0));
2387 "_msprop_ptrtoint"));
2388 setOrigin(&
I, getOrigin(&
I, 0));
2394 "_msprop_inttoptr"));
2395 setOrigin(&
I, getOrigin(&
I, 0));
2398 void visitFPToSIInst(
CastInst &
I) { handleShadowOr(
I); }
2399 void visitFPToUIInst(
CastInst &
I) { handleShadowOr(
I); }
2400 void visitSIToFPInst(
CastInst &
I) { handleShadowOr(
I); }
2401 void visitUIToFPInst(
CastInst &
I) { handleShadowOr(
I); }
2402 void visitFPExtInst(
CastInst &
I) { handleShadowOr(
I); }
2403 void visitFPTruncInst(
CastInst &
I) { handleShadowOr(
I); }
2418 Value *S2 = getShadow(&
I, 1);
2419 Value *V1 =
I.getOperand(0);
2428 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2429 setOriginForNaryOp(
I);
2440 Value *S2 = getShadow(&
I, 1);
2450 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2451 setOriginForNaryOp(
I);
2469 template <
bool CombineShadow>
class Combiner {
2470 Value *Shadow =
nullptr;
2471 Value *Origin =
nullptr;
2473 MemorySanitizerVisitor *MSV;
2477 : IRB(IRB), MSV(MSV) {}
2481 if (CombineShadow) {
2486 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2487 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2491 if (MSV->MS.TrackOrigins) {
2496 Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
2498 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2499 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2509 Value *OpShadow = MSV->getShadow(V);
2510 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2511 return Add(OpShadow, OpOrigin);
2517 if (CombineShadow) {
2519 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2520 MSV->setShadow(
I, Shadow);
2522 if (MSV->MS.TrackOrigins) {
2524 MSV->setOrigin(
I, Origin);
2531 if (MSV->MS.TrackOrigins) {
2543 if (!MS.TrackOrigins)
2546 OriginCombiner
OC(
this, IRB);
2547 for (
Use &
Op :
I.operands())
2552 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2554 "Vector of pointers is not a valid shadow type");
2555 return Ty->
isVectorTy() ? cast<FixedVectorType>(Ty)->getNumElements() *
2564 Type *srcTy =
V->getType();
2567 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2568 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2569 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2575 cast<VectorType>(dstTy)->getElementCount() ==
2576 cast<VectorType>(srcTy)->getElementCount())
2587 Type *ShadowTy = getShadowTy(V);
2588 if (
V->getType() == ShadowTy)
2590 if (
V->getType()->isPtrOrPtrVectorTy())
2599 ShadowAndOriginCombiner
SC(
this, IRB);
2600 for (
Use &
Op :
I.operands())
2620 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
2621 unsigned NumElements = cast<FixedVectorType>(VTy)->getNumElements();
2622 Type *EltTy = VTy->getElementType();
2624 for (
unsigned Idx = 0;
Idx < NumElements; ++
Idx) {
2627 const APInt &
V = Elt->getValue();
2629 Elements.push_back(ConstantInt::get(EltTy, V2));
2631 Elements.push_back(ConstantInt::get(EltTy, 1));
2636 if (
ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
2637 const APInt &
V = Elt->getValue();
2639 ShadowMul = ConstantInt::get(Ty, V2);
2641 ShadowMul = ConstantInt::get(Ty, 1);
2647 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2648 setOrigin(&
I, getOrigin(OtherArg));
2652 Constant *constOp0 = dyn_cast<Constant>(
I.getOperand(0));
2653 Constant *constOp1 = dyn_cast<Constant>(
I.getOperand(1));
2654 if (constOp0 && !constOp1)
2655 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2656 else if (constOp1 && !constOp0)
2657 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2672 insertShadowCheck(
I.getOperand(1), &
I);
2673 setShadow(&
I, getShadow(&
I, 0));
2674 setOrigin(&
I, getOrigin(&
I, 0));
2691 void handleEqualityComparison(
ICmpInst &
I) {
2695 Value *Sa = getShadow(
A);
2696 Value *Sb = getShadow(
B);
2722 setOriginForNaryOp(
I);
2730 void handleRelationalComparisonExact(
ICmpInst &
I) {
2734 Value *Sa = getShadow(
A);
2735 Value *Sb = getShadow(
B);
2746 bool IsSigned =
I.isSigned();
2748 auto GetMinMaxUnsigned = [&](
Value *
V,
Value *S) {
2758 V = IRB.
CreateXor(V, ConstantInt::get(
V->getType(), MinVal));
2763 return std::make_pair(Min, Max);
2766 auto [Amin, Amax] = GetMinMaxUnsigned(
A, Sa);
2767 auto [Bmin, Bmax] = GetMinMaxUnsigned(
B, Sb);
2773 setOriginForNaryOp(
I);
2780 void handleSignedRelationalComparison(
ICmpInst &
I) {
2784 if ((constOp = dyn_cast<Constant>(
I.getOperand(1)))) {
2785 op =
I.getOperand(0);
2786 pre =
I.getPredicate();
2787 }
else if ((constOp = dyn_cast<Constant>(
I.getOperand(0)))) {
2788 op =
I.getOperand(1);
2789 pre =
I.getSwappedPredicate();
2802 setShadow(&
I, Shadow);
2803 setOrigin(&
I, getOrigin(
op));
2814 if (
I.isEquality()) {
2815 handleEqualityComparison(
I);
2821 handleRelationalComparisonExact(
I);
2825 handleSignedRelationalComparison(
I);
2830 if ((isa<Constant>(
I.getOperand(0)) || isa<Constant>(
I.getOperand(1)))) {
2831 handleRelationalComparisonExact(
I);
2838 void visitFCmpInst(
FCmpInst &
I) { handleShadowOr(
I); }
2845 Value *S2 = getShadow(&
I, 1);
2850 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
2851 setOriginForNaryOp(
I);
2862 Value *S0 = getShadow(&
I, 0);
2864 Value *S2 = getShadow(&
I, 2);
2870 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
2871 setOriginForNaryOp(
I);
2885 getShadow(
I.getArgOperand(1));
2888 {I.getArgOperand(0), I.getArgOperand(1),
2889 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2890 I.eraseFromParent();
2908 getShadow(
I.getArgOperand(1));
2911 {I.getArgOperand(0), I.getArgOperand(1),
2912 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2913 I.eraseFromParent();
2921 {I.getArgOperand(0),
2922 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
2923 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2924 I.eraseFromParent();
2927 void visitVAStartInst(
VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
2929 void visitVACopyInst(
VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
2938 Value *Shadow = getShadow(&
I, 1);
2939 Value *ShadowPtr, *OriginPtr;
2943 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2948 insertShadowCheck(
Addr, &
I);
2951 if (MS.TrackOrigins)
2964 Type *ShadowTy = getShadowTy(&
I);
2965 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2966 if (PropagateShadow) {
2970 std::tie(ShadowPtr, OriginPtr) =
2971 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
false);
2975 setShadow(&
I, getCleanShadow(&
I));
2979 insertShadowCheck(
Addr, &
I);
2981 if (MS.TrackOrigins) {
2982 if (PropagateShadow)
2983 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
2985 setOrigin(&
I, getCleanOrigin());
3000 [[maybe_unused]]
bool
3002 unsigned int trailingFlags) {
3004 if (!(
RetTy->isIntOrIntVectorTy() ||
RetTy->isFPOrFPVectorTy()))
3007 unsigned NumArgOperands =
I.arg_size();
3008 assert(NumArgOperands >= trailingFlags);
3009 for (
unsigned i = 0; i < NumArgOperands - trailingFlags; ++i) {
3010 Type *Ty =
I.getArgOperand(i)->getType();
3016 ShadowAndOriginCombiner
SC(
this, IRB);
3017 for (
unsigned i = 0; i < NumArgOperands; ++i)
3018 SC.Add(
I.getArgOperand(i));
3035 unsigned NumArgOperands =
I.arg_size();
3036 if (NumArgOperands == 0)
3039 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3040 I.getArgOperand(1)->getType()->isVectorTy() &&
3041 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
3043 return handleVectorStoreIntrinsic(
I);
3046 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3047 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
3049 return handleVectorLoadIntrinsic(
I);
3052 if (
I.doesNotAccessMemory())
3053 if (maybeHandleSimpleNomemIntrinsic(
I, 0))
3062 if (handleUnknownIntrinsicUnlogged(
I)) {
3074 setShadow(&
I, getShadow(&
I, 0));
3075 setOrigin(&
I, getOrigin(&
I, 0));
3083 InstrumentLifetimeStart =
false;
3084 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3090 Type *OpType =
Op->getType();
3093 setOrigin(&
I, getOrigin(
Op));
3098 Value *Src =
I.getArgOperand(0);
3104 Constant *IsZeroPoison = cast<Constant>(
I.getOperand(1));
3107 BoolShadow = IRB.
CreateOr(BoolShadow, BoolZeroPoison,
"_mscz_bs");
3110 Value *OutputShadow =
3111 IRB.
CreateSExt(BoolShadow, getShadowTy(Src),
"_mscz_os");
3113 setShadow(&
I, OutputShadow);
3114 setOriginForNaryOp(
I);
3132 void handleVectorConvertIntrinsic(
IntrinsicInst &
I,
int NumUsedElements,
3133 bool HasRoundingMode =
false) {
3135 Value *CopyOp, *ConvertOp;
3137 assert((!HasRoundingMode ||
3138 isa<ConstantInt>(
I.getArgOperand(
I.arg_size() - 1))) &&
3139 "Invalid rounding mode");
3141 switch (
I.arg_size() - HasRoundingMode) {
3143 CopyOp =
I.getArgOperand(0);
3144 ConvertOp =
I.getArgOperand(1);
3147 ConvertOp =
I.getArgOperand(0);
3161 Value *ConvertShadow = getShadow(ConvertOp);
3162 Value *AggShadow =
nullptr;
3165 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
3166 for (
int i = 1; i < NumUsedElements; ++i) {
3168 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), i));
3169 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3172 AggShadow = ConvertShadow;
3175 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &
I);
3182 Value *ResultShadow = getShadow(CopyOp);
3183 Type *EltTy = cast<VectorType>(ResultShadow->
getType())->getElementType();
3184 for (
int i = 0; i < NumUsedElements; ++i) {
3186 ResultShadow, ConstantInt::getNullValue(EltTy),
3189 setShadow(&
I, ResultShadow);
3190 setOrigin(&
I, getOrigin(CopyOp));
3192 setShadow(&
I, getCleanShadow(&
I));
3193 setOrigin(&
I, getCleanOrigin());
3201 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3204 return CreateShadowCast(IRB, S2,
T,
true);
3212 return CreateShadowCast(IRB, S2,
T,
true);
3229 void handleVectorShiftIntrinsic(
IntrinsicInst &
I,
bool Variable) {
3235 Value *S2 = getShadow(&
I, 1);
3236 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
3237 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3238 Value *V1 =
I.getOperand(0);
3241 {IRB.CreateBitCast(S1, V1->getType()), V2});
3243 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3244 setOriginForNaryOp(
I);
3248 Type *getMMXVectorTy(
unsigned EltSizeInBits) {
3249 const unsigned X86_MMXSizeInBits = 64;
3250 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3251 "Illegal MMX vector element size");
3253 X86_MMXSizeInBits / EltSizeInBits);
3260 case Intrinsic::x86_sse2_packsswb_128:
3261 case Intrinsic::x86_sse2_packuswb_128:
3262 return Intrinsic::x86_sse2_packsswb_128;
3264 case Intrinsic::x86_sse2_packssdw_128:
3265 case Intrinsic::x86_sse41_packusdw:
3266 return Intrinsic::x86_sse2_packssdw_128;
3268 case Intrinsic::x86_avx2_packsswb:
3269 case Intrinsic::x86_avx2_packuswb:
3270 return Intrinsic::x86_avx2_packsswb;
3272 case Intrinsic::x86_avx2_packssdw:
3273 case Intrinsic::x86_avx2_packusdw:
3274 return Intrinsic::x86_avx2_packssdw;
3276 case Intrinsic::x86_mmx_packsswb:
3277 case Intrinsic::x86_mmx_packuswb:
3278 return Intrinsic::x86_mmx_packsswb;
3280 case Intrinsic::x86_mmx_packssdw:
3281 return Intrinsic::x86_mmx_packssdw;
3295 unsigned MMXEltSizeInBits = 0) {
3299 Value *S2 = getShadow(&
I, 1);
3300 assert(
S1->getType()->isVectorTy());
3306 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) :
S1->
getType();
3307 if (MMXEltSizeInBits) {
3315 if (MMXEltSizeInBits) {
3321 {}, {S1_ext, S2_ext},
nullptr,
3322 "_msprop_vector_pack");
3323 if (MMXEltSizeInBits)
3326 setOriginForNaryOp(
I);
3330 Constant *createDppMask(
unsigned Width,
unsigned Mask) {
3343 const unsigned Width =
3344 cast<FixedVectorType>(S->
getType())->getNumElements();
3350 Value *DstMaskV = createDppMask(Width, DstMask);
3370 Value *S0 = getShadow(&
I, 0);
3374 const unsigned Width =
3375 cast<FixedVectorType>(S->
getType())->getNumElements();
3376 assert(Width == 2 || Width == 4 || Width == 8);
3378 const unsigned Mask = cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue();
3379 const unsigned SrcMask =
Mask >> 4;
3380 const unsigned DstMask =
Mask & 0xf;
3383 Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3388 SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3395 setOriginForNaryOp(
I);
3399 C = CreateAppToShadowCast(IRB,
C);
3413 Value *Sc = getShadow(&
I, 2);
3414 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
3419 C = convertBlendvToSelectMask(IRB,
C);
3420 Sc = convertBlendvToSelectMask(IRB, Sc);
3426 handleSelectLikeInst(
I,
C,
T,
F);
3430 void handleVectorSadIntrinsic(
IntrinsicInst &
I,
bool IsMMX =
false) {
3431 const unsigned SignificantBitsPerResultElement = 16;
3433 unsigned ZeroBitsPerResultElement =
3437 auto *Shadow0 = getShadow(&
I, 0);
3438 auto *Shadow1 = getShadow(&
I, 1);
3443 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3446 setOriginForNaryOp(
I);
3451 unsigned MMXEltSizeInBits = 0) {
3453 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits * 2) :
I.
getType();
3455 auto *Shadow0 = getShadow(&
I, 0);
3456 auto *Shadow1 = getShadow(&
I, 1);
3463 setOriginForNaryOp(
I);
3471 Type *ResTy = getShadowTy(&
I);
3472 auto *Shadow0 = getShadow(&
I, 0);
3473 auto *Shadow1 = getShadow(&
I, 1);
3478 setOriginForNaryOp(
I);
3486 auto *Shadow0 = getShadow(&
I, 0);
3487 auto *Shadow1 = getShadow(&
I, 1);
3489 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
3491 setOriginForNaryOp(
I);
3500 setOrigin(&
I, getOrigin(&
I, 0));
3508 Value *OperandShadow = getShadow(&
I, 0);
3510 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
3518 setOrigin(&
I, getOrigin(&
I, 0));
3526 Value *OperandShadow = getShadow(&
I, 0);
3527 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
3535 setOrigin(&
I, getOrigin(&
I, 0));
3543 getShadowOriginPtr(
Addr, IRB, Ty,
Align(1),
true).first;
3548 insertShadowCheck(
Addr, &
I);
3559 Value *ShadowPtr, *OriginPtr;
3560 std::tie(ShadowPtr, OriginPtr) =
3561 getShadowOriginPtr(
Addr, IRB, Ty, Alignment,
false);
3564 insertShadowCheck(
Addr, &
I);
3567 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
3569 insertShadowCheck(Shadow, Origin, &
I);
3577 Value *PassThru =
I.getArgOperand(2);
3580 insertShadowCheck(
Ptr, &
I);
3581 insertShadowCheck(Mask, &
I);
3584 if (!PropagateShadow) {
3585 setShadow(&
I, getCleanShadow(&
I));
3586 setOrigin(&
I, getCleanOrigin());
3590 Type *ShadowTy = getShadowTy(&
I);
3591 Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3592 auto [ShadowPtr, OriginPtr] =
3593 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy,
Align,
false);
3597 getShadow(PassThru),
"_msmaskedexpload");
3599 setShadow(&
I, Shadow);
3602 setOrigin(&
I, getCleanOrigin());
3607 Value *Values =
I.getArgOperand(0);
3613 insertShadowCheck(
Ptr, &
I);
3614 insertShadowCheck(Mask, &
I);
3617 Value *Shadow = getShadow(Values);
3618 Type *ElementShadowTy =
3619 getShadowTy(cast<VectorType>(Values->
getType())->getElementType());
3620 auto [ShadowPtr, OriginPtrs] =
3621 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy,
Align,
true);
3630 Value *Ptrs =
I.getArgOperand(0);
3631 const Align Alignment(
3632 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
3634 Value *PassThru =
I.getArgOperand(3);
3636 Type *PtrsShadowTy = getShadowTy(Ptrs);
3638 insertShadowCheck(Mask, &
I);
3642 insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &
I);
3645 if (!PropagateShadow) {
3646 setShadow(&
I, getCleanShadow(&
I));
3647 setOrigin(&
I, getCleanOrigin());
3651 Type *ShadowTy = getShadowTy(&
I);
3652 Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3653 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3654 Ptrs, IRB, ElementShadowTy, Alignment,
false);
3658 getShadow(PassThru),
"_msmaskedgather");
3660 setShadow(&
I, Shadow);
3663 setOrigin(&
I, getCleanOrigin());
3668 Value *Values =
I.getArgOperand(0);
3669 Value *Ptrs =
I.getArgOperand(1);
3670 const Align Alignment(
3671 cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue());
3674 Type *PtrsShadowTy = getShadowTy(Ptrs);
3676 insertShadowCheck(Mask, &
I);
3680 insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &
I);
3683 Value *Shadow = getShadow(Values);
3684 Type *ElementShadowTy =
3685 getShadowTy(cast<VectorType>(Values->
getType())->getElementType());
3686 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3687 Ptrs, IRB, ElementShadowTy, Alignment,
true);
3700 Value *
V =
I.getArgOperand(0);
3702 const Align Alignment(
3703 cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue());
3705 Value *Shadow = getShadow(V);
3708 insertShadowCheck(
Ptr, &
I);
3709 insertShadowCheck(Mask, &
I);
3714 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3715 Ptr, IRB, Shadow->
getType(), Alignment,
true);
3719 if (!MS.TrackOrigins)
3722 auto &
DL =
F.getDataLayout();
3723 paintOrigin(IRB, getOrigin(V), OriginPtr,
3735 const Align Alignment(
3736 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
3738 Value *PassThru =
I.getArgOperand(3);
3741 insertShadowCheck(
Ptr, &
I);
3742 insertShadowCheck(Mask, &
I);
3745 if (!PropagateShadow) {
3746 setShadow(&
I, getCleanShadow(&
I));
3747 setOrigin(&
I, getCleanOrigin());
3751 Type *ShadowTy = getShadowTy(&
I);
3752 Value *ShadowPtr, *OriginPtr;
3753 std::tie(ShadowPtr, OriginPtr) =
3754 getShadowOriginPtr(
Ptr, IRB, ShadowTy, Alignment,
false);
3756 getShadow(PassThru),
"_msmaskedld"));
3758 if (!MS.TrackOrigins)
3765 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
3770 setOrigin(&
I, Origin);
3789 Value *Dst =
I.getArgOperand(0);
3790 assert(Dst->getType()->isPointerTy() &&
"Destination is not a pointer!");
3793 assert(isa<VectorType>(
Mask->getType()) &&
"Mask is not a vector!");
3795 Value *Src =
I.getArgOperand(2);
3796 assert(isa<VectorType>(Src->getType()) &&
"Source is not a vector!");
3800 Value *SrcShadow = getShadow(Src);
3803 insertShadowCheck(Dst, &
I);
3804 insertShadowCheck(Mask, &
I);
3807 Value *DstShadowPtr;
3808 Value *DstOriginPtr;
3809 std::tie(DstShadowPtr, DstOriginPtr) = getShadowOriginPtr(
3810 Dst, IRB, SrcShadow->
getType(), Alignment,
true);
3813 ShadowArgs.
append(1, DstShadowPtr);
3814 ShadowArgs.
append(1, Mask);
3825 if (!MS.TrackOrigins)
3829 auto &
DL =
F.getDataLayout();
3830 paintOrigin(IRB, getOrigin(Src), DstOriginPtr,
3831 DL.getTypeStoreSize(SrcShadow->
getType()),
3853 Value *Src =
I.getArgOperand(0);
3854 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
3857 assert(isa<VectorType>(
Mask->getType()) &&
"Mask is not a vector!");
3862 insertShadowCheck(Mask, &
I);
3865 Type *SrcShadowTy = getShadowTy(Src);
3866 Value *SrcShadowPtr, *SrcOriginPtr;
3867 std::tie(SrcShadowPtr, SrcOriginPtr) =
3868 getShadowOriginPtr(Src, IRB, SrcShadowTy, Alignment,
false);
3871 ShadowArgs.
append(1, SrcShadowPtr);
3872 ShadowArgs.
append(1, Mask);
3882 if (!MS.TrackOrigins)
3889 setOrigin(&
I, PtrSrcOrigin);
3899 Type *ShadowTy = getShadowTy(&
I);
3902 Value *SMask = getShadow(&
I, 1);
3907 {getShadow(&I, 0), I.getOperand(1)});
3910 setOriginForNaryOp(
I);
3915 for (
unsigned X = OddElements ? 1 : 0;
X < Width;
X += 2) {
3932 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3933 assert(isa<ConstantInt>(
I.getArgOperand(2)) &&
3934 "pclmul 3rd operand must be a constant");
3935 unsigned Imm = cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue();
3937 getPclmulMask(Width, Imm & 0x01));
3939 getPclmulMask(Width, Imm & 0x10));
3940 ShadowAndOriginCombiner SOC(
this, IRB);
3941 SOC.Add(Shuf0, getOrigin(&
I, 0));
3942 SOC.Add(Shuf1, getOrigin(&
I, 1));
3950 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3952 Value *Second = getShadow(&
I, 1);
3955 Mask.push_back(Width);
3956 for (
unsigned i = 1; i < Width; i++)
3960 setShadow(&
I, Shadow);
3961 setOriginForNaryOp(
I);
3966 Value *Shadow0 = getShadow(&
I, 0);
3967 Value *Shadow1 = getShadow(&
I, 1);
3973 setShadow(&
I, Shadow);
3974 setOriginForNaryOp(
I);
3980 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3982 Value *Second = getShadow(&
I, 1);
3986 Mask.push_back(Width);
3987 for (
unsigned i = 1; i < Width; i++)
3991 setShadow(&
I, Shadow);
3992 setOriginForNaryOp(
I);
3999 assert(
I.getArgOperand(0)->getType() ==
I.getType());
4001 assert(isa<ConstantInt>(
I.getArgOperand(1)));
4004 ShadowAndOriginCombiner
SC(
this, IRB);
4005 SC.Add(
I.getArgOperand(0));
4013 assert(
I.getType()->isIntOrIntVectorTy());
4014 assert(
I.getArgOperand(0)->getType() ==
I.getType());
4018 setShadow(&
I, getShadow(&
I, 0));
4019 setOrigin(&
I, getOrigin(&
I, 0));
4024 Value *Shadow = getShadow(&
I, 0);
4025 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
4026 setOrigin(&
I, getOrigin(&
I, 0));
4031 Value *Shadow0 = getShadow(&
I, 0);
4032 Value *Shadow1 = getShadow(&
I, 1);
4035 IRB.
CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
4041 setShadow(&
I, Shadow);
4042 setOriginForNaryOp(
I);
4065 switch (
I.getIntrinsicID()) {
4066 case Intrinsic::x86_sse3_hsub_ps:
4067 shadowIntrinsicID = Intrinsic::x86_sse3_hadd_ps;
4070 case Intrinsic::x86_sse3_hsub_pd:
4071 shadowIntrinsicID = Intrinsic::x86_sse3_hadd_pd;
4074 case Intrinsic::x86_ssse3_phsub_d:
4075 shadowIntrinsicID = Intrinsic::x86_ssse3_phadd_d;
4078 case Intrinsic::x86_ssse3_phsub_d_128:
4079 shadowIntrinsicID = Intrinsic::x86_ssse3_phadd_d_128;
4082 case Intrinsic::x86_ssse3_phsub_w:
4083 shadowIntrinsicID = Intrinsic::x86_ssse3_phadd_w;
4086 case Intrinsic::x86_ssse3_phsub_w_128:
4087 shadowIntrinsicID = Intrinsic::x86_ssse3_phadd_w_128;
4090 case Intrinsic::x86_ssse3_phsub_sw:
4091 shadowIntrinsicID = Intrinsic::x86_ssse3_phadd_sw;
4094 case Intrinsic::x86_ssse3_phsub_sw_128:
4095 shadowIntrinsicID = Intrinsic::x86_ssse3_phadd_sw_128;
4098 case Intrinsic::x86_avx_hsub_pd_256:
4099 shadowIntrinsicID = Intrinsic::x86_avx_hadd_pd_256;
4102 case Intrinsic::x86_avx_hsub_ps_256:
4103 shadowIntrinsicID = Intrinsic::x86_avx_hadd_ps_256;
4106 case Intrinsic::x86_avx2_phsub_d:
4107 shadowIntrinsicID = Intrinsic::x86_avx2_phadd_d;
4110 case Intrinsic::x86_avx2_phsub_w:
4111 shadowIntrinsicID = Intrinsic::x86_avx2_phadd_w;
4114 case Intrinsic::x86_avx2_phsub_sw:
4115 shadowIntrinsicID = Intrinsic::x86_avx2_phadd_sw;
4122 return handleIntrinsicByApplyingToShadow(
I, shadowIntrinsicID,
4140 void handleNEONVectorStoreIntrinsic(
IntrinsicInst &
I,
bool useLane) {
4144 int numArgOperands =
I.arg_size();
4147 assert(numArgOperands >= 1);
4148 Value *
Addr =
I.getArgOperand(numArgOperands - 1);
4150 int skipTrailingOperands = 1;
4153 insertShadowCheck(
Addr, &
I);
4157 skipTrailingOperands++;
4158 assert(numArgOperands >=
static_cast<int>(skipTrailingOperands));
4160 I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
4165 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
4166 assert(isa<FixedVectorType>(
I.getArgOperand(i)->getType()));
4167 Value *Shadow = getShadow(&
I, i);
4168 ShadowArgs.
append(1, Shadow);
4183 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getElementType(),
4184 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements() *
4185 (numArgOperands - skipTrailingOperands));
4186 Type *OutputShadowTy = getShadowTy(OutputVectorTy);
4190 I.getArgOperand(numArgOperands - skipTrailingOperands));
4192 Value *OutputShadowPtr, *OutputOriginPtr;
4194 std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
4195 Addr, IRB, OutputShadowTy,
Align(1),
true);
4196 ShadowArgs.
append(1, OutputShadowPtr);
4202 if (MS.TrackOrigins) {
4210 OriginCombiner
OC(
this, IRB);
4211 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++)
4212 OC.Add(
I.getArgOperand(i));
4215 OC.DoneAndStoreOrigin(
DL.getTypeStoreSize(OutputVectorTy),
4244 unsigned int trailingVerbatimArgs) {
4247 assert(trailingVerbatimArgs <
I.arg_size());
4251 for (
unsigned int i = 0; i <
I.arg_size() - trailingVerbatimArgs; i++) {
4252 Value *Shadow = getShadow(&
I, i);
4260 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
4262 Value *Arg =
I.getArgOperand(i);
4268 Value *CombinedShadow = CI;
4271 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
4274 CreateShadowCast(IRB, getShadow(&
I, i), CombinedShadow->
getType());
4275 CombinedShadow = IRB.
CreateOr(Shadow, CombinedShadow,
"_msprop");
4280 setOriginForNaryOp(
I);
4289 switch (
I.getIntrinsicID()) {
4290 case Intrinsic::uadd_with_overflow:
4291 case Intrinsic::sadd_with_overflow:
4292 case Intrinsic::usub_with_overflow:
4293 case Intrinsic::ssub_with_overflow:
4294 case Intrinsic::umul_with_overflow:
4295 case Intrinsic::smul_with_overflow:
4296 handleArithmeticWithOverflow(
I);
4298 case Intrinsic::abs:
4299 handleAbsIntrinsic(
I);
4301 case Intrinsic::is_fpclass:
4304 case Intrinsic::lifetime_start:
4305 handleLifetimeStart(
I);
4307 case Intrinsic::launder_invariant_group:
4308 case Intrinsic::strip_invariant_group:
4309 handleInvariantGroup(
I);
4311 case Intrinsic::bswap:
4314 case Intrinsic::ctlz:
4315 case Intrinsic::cttz:
4316 handleCountZeroes(
I);
4318 case Intrinsic::masked_compressstore:
4319 handleMaskedCompressStore(
I);
4321 case Intrinsic::masked_expandload:
4322 handleMaskedExpandLoad(
I);
4324 case Intrinsic::masked_gather:
4325 handleMaskedGather(
I);
4327 case Intrinsic::masked_scatter:
4328 handleMaskedScatter(
I);
4330 case Intrinsic::masked_store:
4331 handleMaskedStore(
I);
4333 case Intrinsic::masked_load:
4334 handleMaskedLoad(
I);
4336 case Intrinsic::vector_reduce_and:
4337 handleVectorReduceAndIntrinsic(
I);
4339 case Intrinsic::vector_reduce_or:
4340 handleVectorReduceOrIntrinsic(
I);
4342 case Intrinsic::vector_reduce_add:
4343 case Intrinsic::vector_reduce_xor:
4344 case Intrinsic::vector_reduce_mul:
4345 handleVectorReduceIntrinsic(
I);
4347 case Intrinsic::x86_sse_stmxcsr:
4350 case Intrinsic::x86_sse_ldmxcsr:
4353 case Intrinsic::x86_avx512_vcvtsd2usi64:
4354 case Intrinsic::x86_avx512_vcvtsd2usi32:
4355 case Intrinsic::x86_avx512_vcvtss2usi64:
4356 case Intrinsic::x86_avx512_vcvtss2usi32:
4357 case Intrinsic::x86_avx512_cvttss2usi64:
4358 case Intrinsic::x86_avx512_cvttss2usi:
4359 case Intrinsic::x86_avx512_cvttsd2usi64:
4360 case Intrinsic::x86_avx512_cvttsd2usi:
4361 case Intrinsic::x86_avx512_cvtusi2ss:
4362 case Intrinsic::x86_avx512_cvtusi642sd:
4363 case Intrinsic::x86_avx512_cvtusi642ss:
4364 handleVectorConvertIntrinsic(
I, 1,
true);
4366 case Intrinsic::x86_sse2_cvtsd2si64:
4367 case Intrinsic::x86_sse2_cvtsd2si:
4368 case Intrinsic::x86_sse2_cvtsd2ss:
4369 case Intrinsic::x86_sse2_cvttsd2si64:
4370 case Intrinsic::x86_sse2_cvttsd2si:
4371 case Intrinsic::x86_sse_cvtss2si64:
4372 case Intrinsic::x86_sse_cvtss2si:
4373 case Intrinsic::x86_sse_cvttss2si64:
4374 case Intrinsic::x86_sse_cvttss2si:
4375 handleVectorConvertIntrinsic(
I, 1);
4377 case Intrinsic::x86_sse_cvtps2pi:
4378 case Intrinsic::x86_sse_cvttps2pi:
4379 handleVectorConvertIntrinsic(
I, 2);
4382 case Intrinsic::x86_avx512_psll_w_512:
4383 case Intrinsic::x86_avx512_psll_d_512:
4384 case Intrinsic::x86_avx512_psll_q_512:
4385 case Intrinsic::x86_avx512_pslli_w_512:
4386 case Intrinsic::x86_avx512_pslli_d_512:
4387 case Intrinsic::x86_avx512_pslli_q_512:
4388 case Intrinsic::x86_avx512_psrl_w_512:
4389 case Intrinsic::x86_avx512_psrl_d_512:
4390 case Intrinsic::x86_avx512_psrl_q_512:
4391 case Intrinsic::x86_avx512_psra_w_512:
4392 case Intrinsic::x86_avx512_psra_d_512:
4393 case Intrinsic::x86_avx512_psra_q_512:
4394 case Intrinsic::x86_avx512_psrli_w_512:
4395 case Intrinsic::x86_avx512_psrli_d_512:
4396 case Intrinsic::x86_avx512_psrli_q_512:
4397 case Intrinsic::x86_avx512_psrai_w_512:
4398 case Intrinsic::x86_avx512_psrai_d_512:
4399 case Intrinsic::x86_avx512_psrai_q_512:
4400 case Intrinsic::x86_avx512_psra_q_256:
4401 case Intrinsic::x86_avx512_psra_q_128:
4402 case Intrinsic::x86_avx512_psrai_q_256:
4403 case Intrinsic::x86_avx512_psrai_q_128:
4404 case Intrinsic::x86_avx2_psll_w:
4405 case Intrinsic::x86_avx2_psll_d:
4406 case Intrinsic::x86_avx2_psll_q:
4407 case Intrinsic::x86_avx2_pslli_w:
4408 case Intrinsic::x86_avx2_pslli_d:
4409 case Intrinsic::x86_avx2_pslli_q:
4410 case Intrinsic::x86_avx2_psrl_w:
4411 case Intrinsic::x86_avx2_psrl_d:
4412 case Intrinsic::x86_avx2_psrl_q:
4413 case Intrinsic::x86_avx2_psra_w:
4414 case Intrinsic::x86_avx2_psra_d:
4415 case Intrinsic::x86_avx2_psrli_w:
4416 case Intrinsic::x86_avx2_psrli_d:
4417 case Intrinsic::x86_avx2_psrli_q:
4418 case Intrinsic::x86_avx2_psrai_w:
4419 case Intrinsic::x86_avx2_psrai_d:
4420 case Intrinsic::x86_sse2_psll_w:
4421 case Intrinsic::x86_sse2_psll_d:
4422 case Intrinsic::x86_sse2_psll_q:
4423 case Intrinsic::x86_sse2_pslli_w:
4424 case Intrinsic::x86_sse2_pslli_d:
4425 case Intrinsic::x86_sse2_pslli_q:
4426 case Intrinsic::x86_sse2_psrl_w:
4427 case Intrinsic::x86_sse2_psrl_d:
4428 case Intrinsic::x86_sse2_psrl_q:
4429 case Intrinsic::x86_sse2_psra_w:
4430 case Intrinsic::x86_sse2_psra_d:
4431 case Intrinsic::x86_sse2_psrli_w:
4432 case Intrinsic::x86_sse2_psrli_d:
4433 case Intrinsic::x86_sse2_psrli_q:
4434 case Intrinsic::x86_sse2_psrai_w:
4435 case Intrinsic::x86_sse2_psrai_d:
4436 case Intrinsic::x86_mmx_psll_w:
4437 case Intrinsic::x86_mmx_psll_d:
4438 case Intrinsic::x86_mmx_psll_q:
4439 case Intrinsic::x86_mmx_pslli_w:
4440 case Intrinsic::x86_mmx_pslli_d:
4441 case Intrinsic::x86_mmx_pslli_q:
4442 case Intrinsic::x86_mmx_psrl_w:
4443 case Intrinsic::x86_mmx_psrl_d:
4444 case Intrinsic::x86_mmx_psrl_q:
4445 case Intrinsic::x86_mmx_psra_w:
4446 case Intrinsic::x86_mmx_psra_d:
4447 case Intrinsic::x86_mmx_psrli_w:
4448 case Intrinsic::x86_mmx_psrli_d:
4449 case Intrinsic::x86_mmx_psrli_q:
4450 case Intrinsic::x86_mmx_psrai_w:
4451 case Intrinsic::x86_mmx_psrai_d:
4452 case Intrinsic::aarch64_neon_rshrn:
4453 case Intrinsic::aarch64_neon_sqrshl:
4454 case Intrinsic::aarch64_neon_sqrshrn:
4455 case Intrinsic::aarch64_neon_sqrshrun:
4456 case Intrinsic::aarch64_neon_sqshl:
4457 case Intrinsic::aarch64_neon_sqshlu:
4458 case Intrinsic::aarch64_neon_sqshrn:
4459 case Intrinsic::aarch64_neon_sqshrun:
4460 case Intrinsic::aarch64_neon_srshl:
4461 case Intrinsic::aarch64_neon_sshl:
4462 case Intrinsic::aarch64_neon_uqrshl:
4463 case Intrinsic::aarch64_neon_uqrshrn:
4464 case Intrinsic::aarch64_neon_uqshl:
4465 case Intrinsic::aarch64_neon_uqshrn:
4466 case Intrinsic::aarch64_neon_urshl:
4467 case Intrinsic::aarch64_neon_ushl:
4469 handleVectorShiftIntrinsic(
I,
false);
4471 case Intrinsic::x86_avx2_psllv_d:
4472 case Intrinsic::x86_avx2_psllv_d_256:
4473 case Intrinsic::x86_avx512_psllv_d_512:
4474 case Intrinsic::x86_avx2_psllv_q:
4475 case Intrinsic::x86_avx2_psllv_q_256:
4476 case Intrinsic::x86_avx512_psllv_q_512:
4477 case Intrinsic::x86_avx2_psrlv_d:
4478 case Intrinsic::x86_avx2_psrlv_d_256:
4479 case Intrinsic::x86_avx512_psrlv_d_512:
4480 case Intrinsic::x86_avx2_psrlv_q:
4481 case Intrinsic::x86_avx2_psrlv_q_256:
4482 case Intrinsic::x86_avx512_psrlv_q_512:
4483 case Intrinsic::x86_avx2_psrav_d:
4484 case Intrinsic::x86_avx2_psrav_d_256:
4485 case Intrinsic::x86_avx512_psrav_d_512:
4486 case Intrinsic::x86_avx512_psrav_q_128:
4487 case Intrinsic::x86_avx512_psrav_q_256:
4488 case Intrinsic::x86_avx512_psrav_q_512:
4489 handleVectorShiftIntrinsic(
I,
true);
4492 case Intrinsic::x86_sse2_packsswb_128:
4493 case Intrinsic::x86_sse2_packssdw_128:
4494 case Intrinsic::x86_sse2_packuswb_128:
4495 case Intrinsic::x86_sse41_packusdw:
4496 case Intrinsic::x86_avx2_packsswb:
4497 case Intrinsic::x86_avx2_packssdw:
4498 case Intrinsic::x86_avx2_packuswb:
4499 case Intrinsic::x86_avx2_packusdw:
4500 handleVectorPackIntrinsic(
I);
4503 case Intrinsic::x86_sse41_pblendvb:
4504 case Intrinsic::x86_sse41_blendvpd:
4505 case Intrinsic::x86_sse41_blendvps:
4506 case Intrinsic::x86_avx_blendv_pd_256:
4507 case Intrinsic::x86_avx_blendv_ps_256:
4508 case Intrinsic::x86_avx2_pblendvb:
4509 handleBlendvIntrinsic(
I);
4512 case Intrinsic::x86_avx_dp_ps_256:
4513 case Intrinsic::x86_sse41_dppd:
4514 case Intrinsic::x86_sse41_dpps:
4515 handleDppIntrinsic(
I);
4518 case Intrinsic::x86_mmx_packsswb:
4519 case Intrinsic::x86_mmx_packuswb:
4520 handleVectorPackIntrinsic(
I, 16);
4523 case Intrinsic::x86_mmx_packssdw:
4524 handleVectorPackIntrinsic(
I, 32);
4527 case Intrinsic::x86_mmx_psad_bw:
4528 handleVectorSadIntrinsic(
I,
true);
4530 case Intrinsic::x86_sse2_psad_bw:
4531 case Intrinsic::x86_avx2_psad_bw:
4532 handleVectorSadIntrinsic(
I);
4535 case Intrinsic::x86_sse2_pmadd_wd:
4536 case Intrinsic::x86_avx2_pmadd_wd:
4537 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
4538 case Intrinsic::x86_avx2_pmadd_ub_sw:
4539 handleVectorPmaddIntrinsic(
I);
4542 case Intrinsic::x86_ssse3_pmadd_ub_sw:
4543 handleVectorPmaddIntrinsic(
I, 8);
4546 case Intrinsic::x86_mmx_pmadd_wd:
4547 handleVectorPmaddIntrinsic(
I, 16);
4550 case Intrinsic::x86_sse_cmp_ss:
4551 case Intrinsic::x86_sse2_cmp_sd:
4552 case Intrinsic::x86_sse_comieq_ss:
4553 case Intrinsic::x86_sse_comilt_ss:
4554 case Intrinsic::x86_sse_comile_ss:
4555 case Intrinsic::x86_sse_comigt_ss:
4556 case Intrinsic::x86_sse_comige_ss:
4557 case Intrinsic::x86_sse_comineq_ss:
4558 case Intrinsic::x86_sse_ucomieq_ss:
4559 case Intrinsic::x86_sse_ucomilt_ss:
4560 case Intrinsic::x86_sse_ucomile_ss:
4561 case Intrinsic::x86_sse_ucomigt_ss:
4562 case Intrinsic::x86_sse_ucomige_ss:
4563 case Intrinsic::x86_sse_ucomineq_ss:
4564 case Intrinsic::x86_sse2_comieq_sd:
4565 case Intrinsic::x86_sse2_comilt_sd:
4566 case Intrinsic::x86_sse2_comile_sd:
4567 case Intrinsic::x86_sse2_comigt_sd:
4568 case Intrinsic::x86_sse2_comige_sd:
4569 case Intrinsic::x86_sse2_comineq_sd:
4570 case Intrinsic::x86_sse2_ucomieq_sd:
4571 case Intrinsic::x86_sse2_ucomilt_sd:
4572 case Intrinsic::x86_sse2_ucomile_sd:
4573 case Intrinsic::x86_sse2_ucomigt_sd:
4574 case Intrinsic::x86_sse2_ucomige_sd:
4575 case Intrinsic::x86_sse2_ucomineq_sd:
4576 handleVectorCompareScalarIntrinsic(
I);
4579 case Intrinsic::x86_avx_cmp_pd_256:
4580 case Intrinsic::x86_avx_cmp_ps_256:
4581 case Intrinsic::x86_sse2_cmp_pd:
4582 case Intrinsic::x86_sse_cmp_ps:
4583 handleVectorComparePackedIntrinsic(
I);
4586 case Intrinsic::x86_bmi_bextr_32:
4587 case Intrinsic::x86_bmi_bextr_64:
4588 case Intrinsic::x86_bmi_bzhi_32:
4589 case Intrinsic::x86_bmi_bzhi_64:
4590 case Intrinsic::x86_bmi_pdep_32:
4591 case Intrinsic::x86_bmi_pdep_64:
4592 case Intrinsic::x86_bmi_pext_32:
4593 case Intrinsic::x86_bmi_pext_64:
4594 handleBmiIntrinsic(
I);
4597 case Intrinsic::x86_pclmulqdq:
4598 case Intrinsic::x86_pclmulqdq_256:
4599 case Intrinsic::x86_pclmulqdq_512:
4600 handlePclmulIntrinsic(
I);
4603 case Intrinsic::x86_avx_round_pd_256:
4604 case Intrinsic::x86_avx_round_ps_256:
4605 case Intrinsic::x86_sse41_round_pd:
4606 case Intrinsic::x86_sse41_round_ps:
4607 handleRoundPdPsIntrinsic(
I);
4610 case Intrinsic::x86_sse41_round_sd:
4611 case Intrinsic::x86_sse41_round_ss:
4612 handleUnarySdSsIntrinsic(
I);
4615 case Intrinsic::x86_sse2_max_sd:
4616 case Intrinsic::x86_sse_max_ss:
4617 case Intrinsic::x86_sse2_min_sd:
4618 case Intrinsic::x86_sse_min_ss:
4619 handleBinarySdSsIntrinsic(
I);
4622 case Intrinsic::x86_avx_vtestc_pd:
4623 case Intrinsic::x86_avx_vtestc_pd_256:
4624 case Intrinsic::x86_avx_vtestc_ps:
4625 case Intrinsic::x86_avx_vtestc_ps_256:
4626 case Intrinsic::x86_avx_vtestnzc_pd:
4627 case Intrinsic::x86_avx_vtestnzc_pd_256:
4628 case Intrinsic::x86_avx_vtestnzc_ps:
4629 case Intrinsic::x86_avx_vtestnzc_ps_256:
4630 case Intrinsic::x86_avx_vtestz_pd:
4631 case Intrinsic::x86_avx_vtestz_pd_256:
4632 case Intrinsic::x86_avx_vtestz_ps:
4633 case Intrinsic::x86_avx_vtestz_ps_256:
4634 case Intrinsic::x86_avx_ptestc_256:
4635 case Intrinsic::x86_avx_ptestnzc_256:
4636 case Intrinsic::x86_avx_ptestz_256:
4637 case Intrinsic::x86_sse41_ptestc:
4638 case Intrinsic::x86_sse41_ptestnzc:
4639 case Intrinsic::x86_sse41_ptestz:
4640 handleVtestIntrinsic(
I);
4643 case Intrinsic::x86_sse3_hadd_ps:
4644 case Intrinsic::x86_sse3_hadd_pd:
4645 case Intrinsic::x86_ssse3_phadd_d:
4646 case Intrinsic::x86_ssse3_phadd_d_128:
4647 case Intrinsic::x86_ssse3_phadd_w:
4648 case Intrinsic::x86_ssse3_phadd_w_128:
4649 case Intrinsic::x86_ssse3_phadd_sw:
4650 case Intrinsic::x86_ssse3_phadd_sw_128:
4651 case Intrinsic::x86_avx_hadd_pd_256:
4652 case Intrinsic::x86_avx_hadd_ps_256:
4653 case Intrinsic::x86_avx2_phadd_d:
4654 case Intrinsic::x86_avx2_phadd_w:
4655 case Intrinsic::x86_avx2_phadd_sw:
4656 case Intrinsic::x86_sse3_hsub_ps:
4657 case Intrinsic::x86_sse3_hsub_pd:
4658 case Intrinsic::x86_ssse3_phsub_d:
4659 case Intrinsic::x86_ssse3_phsub_d_128:
4660 case Intrinsic::x86_ssse3_phsub_w:
4661 case Intrinsic::x86_ssse3_phsub_w_128:
4662 case Intrinsic::x86_ssse3_phsub_sw:
4663 case Intrinsic::x86_ssse3_phsub_sw_128:
4664 case Intrinsic::x86_avx_hsub_pd_256:
4665 case Intrinsic::x86_avx_hsub_ps_256:
4666 case Intrinsic::x86_avx2_phsub_d:
4667 case Intrinsic::x86_avx2_phsub_w:
4668 case Intrinsic::x86_avx2_phsub_sw: {
4669 handleAVXHorizontalAddSubIntrinsic(
I);
4673 case Intrinsic::x86_avx_maskstore_ps:
4674 case Intrinsic::x86_avx_maskstore_pd:
4675 case Intrinsic::x86_avx_maskstore_ps_256:
4676 case Intrinsic::x86_avx_maskstore_pd_256:
4677 case Intrinsic::x86_avx2_maskstore_d:
4678 case Intrinsic::x86_avx2_maskstore_q:
4679 case Intrinsic::x86_avx2_maskstore_d_256:
4680 case Intrinsic::x86_avx2_maskstore_q_256: {
4681 handleAVXMaskedStore(
I);
4685 case Intrinsic::x86_avx_maskload_ps:
4686 case Intrinsic::x86_avx_maskload_pd:
4687 case Intrinsic::x86_avx_maskload_ps_256:
4688 case Intrinsic::x86_avx_maskload_pd_256:
4689 case Intrinsic::x86_avx2_maskload_d:
4690 case Intrinsic::x86_avx2_maskload_q:
4691 case Intrinsic::x86_avx2_maskload_d_256:
4692 case Intrinsic::x86_avx2_maskload_q_256: {
4693 handleAVXMaskedLoad(
I);
4698 case Intrinsic::x86_avx512_min_ps_512:
4699 case Intrinsic::x86_avx512_min_pd_512:
4700 case Intrinsic::x86_avx512_max_ps_512:
4701 case Intrinsic::x86_avx512_max_pd_512: {
4705 [[maybe_unused]]
bool Success =
4706 maybeHandleSimpleNomemIntrinsic(
I, 1);
4711 case Intrinsic::fshl:
4712 case Intrinsic::fshr:
4713 handleFunnelShift(
I);
4716 case Intrinsic::is_constant:
4718 setShadow(&
I, getCleanShadow(&
I));
4719 setOrigin(&
I, getCleanOrigin());
4722 case Intrinsic::aarch64_neon_st1x2:
4723 case Intrinsic::aarch64_neon_st1x3:
4724 case Intrinsic::aarch64_neon_st1x4:
4725 case Intrinsic::aarch64_neon_st2:
4726 case Intrinsic::aarch64_neon_st3:
4727 case Intrinsic::aarch64_neon_st4: {
4728 handleNEONVectorStoreIntrinsic(
I,
false);
4732 case Intrinsic::aarch64_neon_st2lane:
4733 case Intrinsic::aarch64_neon_st3lane:
4734 case Intrinsic::aarch64_neon_st4lane: {
4735 handleNEONVectorStoreIntrinsic(
I,
true);
4748 case Intrinsic::aarch64_neon_tbl1:
4749 case Intrinsic::aarch64_neon_tbl2:
4750 case Intrinsic::aarch64_neon_tbl3:
4751 case Intrinsic::aarch64_neon_tbl4:
4752 case Intrinsic::aarch64_neon_tbx1:
4753 case Intrinsic::aarch64_neon_tbx2:
4754 case Intrinsic::aarch64_neon_tbx3:
4755 case Intrinsic::aarch64_neon_tbx4: {
4757 handleIntrinsicByApplyingToShadow(
4758 I,
I.getIntrinsicID(),
4763 case Intrinsic::aarch64_neon_fmulx:
4764 case Intrinsic::aarch64_neon_pmul:
4765 case Intrinsic::aarch64_neon_pmull:
4766 case Intrinsic::aarch64_neon_smull:
4767 case Intrinsic::aarch64_neon_pmull64:
4768 case Intrinsic::aarch64_neon_umull: {
4769 handleNEONVectorMultiplyIntrinsic(
I);
4774 if (!handleUnknownIntrinsic(
I))
4775 visitInstruction(
I);
4780 void visitLibAtomicLoad(
CallBase &CB) {
4782 assert(isa<CallInst>(CB));
4791 Value *NewOrdering =
4795 NextNodeIRBuilder NextIRB(&CB);
4796 Value *SrcShadowPtr, *SrcOriginPtr;
4797 std::tie(SrcShadowPtr, SrcOriginPtr) =
4798 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
4800 Value *DstShadowPtr =
4801 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
4805 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
4806 if (MS.TrackOrigins) {
4807 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
4809 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
4810 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
4814 void visitLibAtomicStore(
CallBase &CB) {
4821 Value *NewOrdering =
4825 Value *DstShadowPtr =
4843 visitAsmInstruction(CB);
4845 visitInstruction(CB);
4854 case LibFunc_atomic_load:
4855 if (!isa<CallInst>(CB)) {
4856 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
4860 visitLibAtomicLoad(CB);
4862 case LibFunc_atomic_store:
4863 visitLibAtomicStore(CB);
4870 if (
auto *Call = dyn_cast<CallInst>(&CB)) {
4871 assert(!isa<IntrinsicInst>(Call) &&
"intrinsics are handled elsewhere");
4879 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
4881 Call->removeFnAttrs(
B);
4883 Func->removeFnAttrs(
B);
4889 bool MayCheckCall = MS.EagerChecks;
4893 MayCheckCall &= !
Func->getName().starts_with(
"__sanitizer_unaligned_");
4896 unsigned ArgOffset = 0;
4899 if (!
A->getType()->isSized()) {
4900 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
4904 if (
A->getType()->isScalableTy()) {
4905 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is vscale: " << CB <<
"\n");
4907 insertShadowCheck(
A, &CB);
4916 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
4919 insertShadowCheck(
A, &CB);
4920 Size =
DL.getTypeAllocSize(
A->getType());
4926 Value *ArgShadow = getShadow(
A);
4927 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
4929 <<
" Shadow: " << *ArgShadow <<
"\n");
4933 assert(
A->getType()->isPointerTy() &&
4934 "ByVal argument is not a pointer!");
4942 Value *AShadowPtr, *AOriginPtr;
4943 std::tie(AShadowPtr, AOriginPtr) =
4944 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
4946 if (!PropagateShadow) {
4953 if (MS.TrackOrigins) {
4954 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
4968 Size =
DL.getTypeAllocSize(
A->getType());
4973 Constant *Cst = dyn_cast<Constant>(ArgShadow);
4974 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
4976 getOriginPtrForArgument(IRB, ArgOffset));
4980 assert(Store !=
nullptr);
4989 if (FT->isVarArg()) {
4990 VAHelper->visitCallBase(CB, IRB);
4997 if (isa<CallInst>(CB) && cast<CallInst>(CB).isMustTailCall())
5000 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
5001 setShadow(&CB, getCleanShadow(&CB));
5002 setOrigin(&CB, getCleanOrigin());
5008 Value *
Base = getShadowPtrForRetval(IRBBefore);
5009 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
5012 if (isa<CallInst>(CB)) {
5016 BasicBlock *NormalDest = cast<InvokeInst>(CB).getNormalDest();
5021 setShadow(&CB, getCleanShadow(&CB));
5022 setOrigin(&CB, getCleanOrigin());
5029 "Could not find insertion point for retval shadow load");
5032 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
5035 setShadow(&CB, RetvalShadow);
5036 if (MS.TrackOrigins)
5037 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
5041 if (
auto *
I = dyn_cast<BitCastInst>(RetVal)) {
5042 RetVal =
I->getOperand(0);
5044 if (
auto *
I = dyn_cast<CallInst>(RetVal)) {
5045 return I->isMustTailCall();
5052 Value *RetVal =
I.getReturnValue();
5058 Value *ShadowPtr = getShadowPtrForRetval(IRB);
5059 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
5060 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
5063 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
5065 Value *Shadow = getShadow(RetVal);
5066 bool StoreOrigin =
true;
5068 insertShadowCheck(RetVal, &
I);
5069 Shadow = getCleanShadow(RetVal);
5070 StoreOrigin =
false;
5077 if (MS.TrackOrigins && StoreOrigin)
5078 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
5084 if (!PropagateShadow) {
5085 setShadow(&
I, getCleanShadow(&
I));
5086 setOrigin(&
I, getCleanOrigin());
5090 ShadowPHINodes.push_back(&
I);
5091 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
5093 if (MS.TrackOrigins)
5095 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
5112 IRB.
CreateCall(MS.MsanPoisonStackFn, {&I, Len});
5114 Value *ShadowBase, *OriginBase;
5115 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
5122 if (PoisonStack && MS.TrackOrigins) {
5123 Value *Idptr = getLocalVarIdptr(
I);
5125 Value *Descr = getLocalVarDescription(
I);
5126 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
5127 {&I, Len, Idptr, Descr});
5129 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
5135 Value *Descr = getLocalVarDescription(
I);
5137 IRB.
CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
5139 IRB.
CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
5146 NextNodeIRBuilder IRB(InsPoint);
5148 TypeSize TS =
DL.getTypeAllocSize(
I.getAllocatedType());
5150 if (
I.isArrayAllocation())
5154 if (MS.CompileKernel)
5155 poisonAllocaKmsan(
I, IRB, Len);
5157 poisonAllocaUserspace(
I, IRB, Len);
5161 setShadow(&
I, getCleanShadow(&
I));
5162 setOrigin(&
I, getCleanOrigin());
5174 handleSelectLikeInst(
I,
B,
C,
D);
5180 Value *Sb = getShadow(
B);
5181 Value *Sc = getShadow(
C);
5182 Value *Sd = getShadow(
D);
5184 Value *Ob = MS.TrackOrigins ? getOrigin(
B) : nullptr;
5185 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
5186 Value *Od = MS.TrackOrigins ? getOrigin(
D) : nullptr;
5191 if (
I.getType()->isAggregateType()) {
5195 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
5203 C = CreateAppToShadowCast(IRB,
C);
5204 D = CreateAppToShadowCast(IRB,
D);
5211 if (MS.TrackOrigins) {
5214 if (
B->getType()->isVectorTy()) {
5215 B = convertToBool(
B, IRB);
5216 Sb = convertToBool(Sb, IRB);
5227 setShadow(&
I, getCleanShadow(&
I));
5228 setOrigin(&
I, getCleanOrigin());
5232 setShadow(&
I, getCleanShadow(&
I));
5233 setOrigin(&
I, getCleanOrigin());
5237 setShadow(&
I, getCleanShadow(&
I));
5238 setOrigin(&
I, getCleanOrigin());
5245 Value *Agg =
I.getAggregateOperand();
5247 Value *AggShadow = getShadow(Agg);
5251 setShadow(&
I, ResShadow);
5252 setOriginForNaryOp(
I);
5258 Value *AggShadow = getShadow(
I.getAggregateOperand());
5259 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
5265 setOriginForNaryOp(
I);
5269 if (
CallInst *CI = dyn_cast<CallInst>(&
I)) {
5272 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
5274 errs() <<
"QQQ " <<
I <<
"\n";
5301 insertShadowCheck(Operand, &
I);
5308 auto Size =
DL.getTypeStoreSize(ElemTy);
5310 if (MS.CompileKernel) {
5311 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
5317 auto [ShadowPtr,
_] =
5318 getShadowOriginPtrUserspace(Operand, IRB, IRB.
getInt8Ty(),
Align(1));
5329 int NumRetOutputs = 0;
5331 Type *
RetTy = cast<Value>(CB)->getType();
5332 if (!
RetTy->isVoidTy()) {
5334 auto *
ST = dyn_cast<StructType>(
RetTy);
5336 NumRetOutputs =
ST->getNumElements();
5342 switch (
Info.Type) {
5350 return NumOutputs - NumRetOutputs;
5373 int OutputArgs = getNumOutputArgs(IA, CB);
5379 for (
int i = OutputArgs; i < NumOperands; i++) {
5387 for (
int i = 0; i < OutputArgs; i++) {
5393 setShadow(&
I, getCleanShadow(&
I));
5394 setOrigin(&
I, getCleanOrigin());
5399 setShadow(&
I, getCleanShadow(&
I));
5400 setOrigin(&
I, getCleanOrigin());
5408 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
5409 Value *Operand =
I.getOperand(i);
5411 insertShadowCheck(Operand, &
I);
5413 setShadow(&
I, getCleanShadow(&
I));
5414 setOrigin(&
I, getCleanOrigin());
5418struct VarArgHelperBase :
public VarArgHelper {
5420 MemorySanitizer &MS;
5421 MemorySanitizerVisitor &MSV;
5423 const unsigned VAListTagSize;
5425 VarArgHelperBase(
Function &
F, MemorySanitizer &MS,
5426 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
5427 :
F(
F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
5431 return IRB.
CreateAdd(
Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
5447 return getShadowPtrForVAArgument(IRB, ArgOffset);
5461 unsigned BaseOffset) {
5470 TailSize,
Align(8));
5475 Value *VAListTag =
I.getArgOperand(0);
5477 auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
5478 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
5481 VAListTagSize, Alignment,
false);
5488 unpoisonVAListTagForInst(
I);
5494 unpoisonVAListTagForInst(
I);
5499struct VarArgAMD64Helper :
public VarArgHelperBase {
5502 static const unsigned AMD64GpEndOffset = 48;
5503 static const unsigned AMD64FpEndOffsetSSE = 176;
5505 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
5507 unsigned AMD64FpEndOffset;
5510 Value *VAArgOverflowSize =
nullptr;
5512 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
5514 VarArgAMD64Helper(
Function &
F, MemorySanitizer &MS,
5515 MemorySanitizerVisitor &MSV)
5516 : VarArgHelperBase(
F, MS, MSV, 24) {
5517 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
5518 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
5519 if (Attr.isStringAttribute() &&
5520 (Attr.getKindAsString() ==
"target-features")) {
5521 if (Attr.getValueAsString().contains(
"-sse"))
5522 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
5528 ArgKind classifyArgument(
Value *arg) {
5531 if (
T->isX86_FP80Ty())
5533 if (
T->isFPOrFPVectorTy())
5534 return AK_FloatingPoint;
5535 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
5536 return AK_GeneralPurpose;
5537 if (
T->isPointerTy())
5538 return AK_GeneralPurpose;
5551 unsigned GpOffset = 0;
5552 unsigned FpOffset = AMD64GpEndOffset;
5553 unsigned OverflowOffset = AMD64FpEndOffset;
5558 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
5565 assert(
A->getType()->isPointerTy());
5567 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
5569 unsigned BaseOffset = OverflowOffset;
5570 Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
5571 Value *OriginBase =
nullptr;
5572 if (MS.TrackOrigins)
5573 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
5574 OverflowOffset += AlignedSize;
5577 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
5581 Value *ShadowPtr, *OriginPtr;
5582 std::tie(ShadowPtr, OriginPtr) =
5587 if (MS.TrackOrigins)
5591 ArgKind AK = classifyArgument(
A);
5592 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
5594 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
5596 Value *ShadowBase, *OriginBase =
nullptr;
5598 case AK_GeneralPurpose:
5599 ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
5600 if (MS.TrackOrigins)
5601 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
5605 case AK_FloatingPoint:
5606 ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
5607 if (MS.TrackOrigins)
5608 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
5615 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
5617 unsigned BaseOffset = OverflowOffset;
5618 ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
5619 if (MS.TrackOrigins) {
5620 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
5622 OverflowOffset += AlignedSize;
5625 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
5634 Value *Shadow = MSV.getShadow(
A);
5636 if (MS.TrackOrigins) {
5637 Value *Origin = MSV.getOrigin(
A);
5639 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
5645 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
5646 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5649 void finalizeInstrumentation()
override {
5650 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
5651 "finalizeInstrumentation called twice");
5652 if (!VAStartInstrumentationList.
empty()) {
5659 ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
5666 Intrinsic::umin, CopySize,
5670 if (MS.TrackOrigins) {
5680 for (
CallInst *OrigInst : VAStartInstrumentationList) {
5681 NextNodeIRBuilder IRB(OrigInst);
5682 Value *VAListTag = OrigInst->getArgOperand(0);
5686 ConstantInt::get(MS.IntptrTy, 16)),
5689 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5691 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5692 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5694 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5696 if (MS.TrackOrigins)
5697 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
5698 Alignment, AMD64FpEndOffset);
5701 ConstantInt::get(MS.IntptrTy, 8)),
5703 Value *OverflowArgAreaPtr =
5704 IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
5705 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
5706 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
5707 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
5711 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
5713 if (MS.TrackOrigins) {
5716 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
5724struct VarArgAArch64Helper :
public VarArgHelperBase {
5725 static const unsigned kAArch64GrArgSize = 64;
5726 static const unsigned kAArch64VrArgSize = 128;
5728 static const unsigned AArch64GrBegOffset = 0;
5729 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
5731 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
5732 static const unsigned AArch64VrEndOffset =
5733 AArch64VrBegOffset + kAArch64VrArgSize;
5734 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
5737 Value *VAArgOverflowSize =
nullptr;
5739 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
5741 VarArgAArch64Helper(
Function &
F, MemorySanitizer &MS,
5742 MemorySanitizerVisitor &MSV)
5743 : VarArgHelperBase(
F, MS, MSV, 32) {}
5746 std::pair<ArgKind, uint64_t> classifyArgument(
Type *
T) {
5747 if (
T->isIntOrPtrTy() &&
T->getPrimitiveSizeInBits() <= 64)
5748 return {AK_GeneralPurpose, 1};
5749 if (
T->isFloatingPointTy() &&
T->getPrimitiveSizeInBits() <= 128)
5750 return {AK_FloatingPoint, 1};
5752 if (
T->isArrayTy()) {
5753 auto R = classifyArgument(
T->getArrayElementType());
5754 R.second *=
T->getScalarType()->getArrayNumElements();
5759 auto R = classifyArgument(FV->getScalarType());
5760 R.second *= FV->getNumElements();
5765 return {AK_Memory, 0};
5778 unsigned GrOffset = AArch64GrBegOffset;
5779 unsigned VrOffset = AArch64VrBegOffset;
5780 unsigned OverflowOffset = AArch64VAEndOffset;
5785 auto [AK, RegNum] = classifyArgument(
A->getType());
5786 if (AK == AK_GeneralPurpose &&
5787 (GrOffset + RegNum * 8) > AArch64GrEndOffset)
5789 if (AK == AK_FloatingPoint &&
5790 (VrOffset + RegNum * 16) > AArch64VrEndOffset)
5794 case AK_GeneralPurpose:
5795 Base = getShadowPtrForVAArgument(IRB, GrOffset);
5796 GrOffset += 8 * RegNum;
5798 case AK_FloatingPoint:
5799 Base = getShadowPtrForVAArgument(IRB, VrOffset);
5800 VrOffset += 16 * RegNum;
5807 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
5809 unsigned BaseOffset = OverflowOffset;
5810 Base = getShadowPtrForVAArgument(IRB, BaseOffset);
5811 OverflowOffset += AlignedSize;
5814 CleanUnusedTLS(IRB,
Base, BaseOffset);
5826 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
5827 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5834 ConstantInt::get(MS.IntptrTy, offset)),
5843 ConstantInt::get(MS.IntptrTy, offset)),
5846 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
5849 void finalizeInstrumentation()
override {
5850 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
5851 "finalizeInstrumentation called twice");
5852 if (!VAStartInstrumentationList.empty()) {
5859 ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
5866 Intrinsic::umin, CopySize,
5872 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
5873 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
5877 for (
CallInst *OrigInst : VAStartInstrumentationList) {
5878 NextNodeIRBuilder IRB(OrigInst);
5880 Value *VAListTag = OrigInst->getArgOperand(0);
5897 Value *StackSaveAreaPtr =
5898 IRB.
CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
5901 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
5902 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
5905 IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
5908 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
5909 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
5912 IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
5918 Value *GrRegSaveAreaShadowPtrOff =
5919 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
5921 Value *GrRegSaveAreaShadowPtr =
5922 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5928 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
5934 Value *VrRegSaveAreaShadowPtrOff =
5935 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
5937 Value *VrRegSaveAreaShadowPtr =
5938 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5945 VrRegSaveAreaShadowPtrOff);
5946 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
5952 Value *StackSaveAreaShadowPtr =
5953 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5958 VAArgTLSCopy, IRB.
getInt32(AArch64VAEndOffset));
5961 Align(16), VAArgOverflowSize);
5967struct VarArgPowerPCHelper :
public VarArgHelperBase {
5969 Value *VAArgSize =
nullptr;
5971 VarArgPowerPCHelper(
Function &
F, MemorySanitizer &MS,
5972 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
5973 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
5983 Triple TargetTriple(
F.getParent()->getTargetTriple());
5987 if (TargetTriple.isPPC64()) {
5988 if (TargetTriple.isPPC64ELFv2ABI())
5996 unsigned VAArgOffset = VAArgBase;
6000 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
6002 assert(
A->getType()->isPointerTy());
6004 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
6007 ArgAlign =
Align(8);
6008 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
6011 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
6013 Value *AShadowPtr, *AOriginPtr;
6014 std::tie(AShadowPtr, AOriginPtr) =
6015 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
6025 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
6027 if (
A->getType()->isArrayTy()) {
6030 Type *ElementTy =
A->getType()->getArrayElementType();
6032 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
6033 }
else if (
A->getType()->isVectorTy()) {
6035 ArgAlign =
Align(ArgSize);
6038 ArgAlign =
Align(8);
6039 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
6040 if (
DL.isBigEndian()) {
6044 VAArgOffset += (8 - ArgSize);
6048 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
6052 VAArgOffset += ArgSize;
6056 VAArgBase = VAArgOffset;
6060 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
6063 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
6066 void finalizeInstrumentation()
override {
6067 assert(!VAArgSize && !VAArgTLSCopy &&
6068 "finalizeInstrumentation called twice");
6071 Value *CopySize = VAArgSize;
6073 if (!VAStartInstrumentationList.empty()) {
6083 Intrinsic::umin, CopySize,
6091 Triple TargetTriple(
F.getParent()->getTargetTriple());
6092 for (
CallInst *OrigInst : VAStartInstrumentationList) {
6093 NextNodeIRBuilder IRB(OrigInst);
6094 Value *VAListTag = OrigInst->getArgOperand(0);
6098 if (!TargetTriple.isPPC64()) {
6100 IRB.
CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
6102 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
6105 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
6107 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6109 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
6110 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
6112 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6119struct VarArgSystemZHelper :
public VarArgHelperBase {
6120 static const unsigned SystemZGpOffset = 16;
6121 static const unsigned SystemZGpEndOffset = 56;
6122 static const unsigned SystemZFpOffset = 128;
6123 static const unsigned SystemZFpEndOffset = 160;
6124 static const unsigned SystemZMaxVrArgs = 8;
6125 static const unsigned SystemZRegSaveAreaSize = 160;
6126 static const unsigned SystemZOverflowOffset = 160;
6127 static const unsigned SystemZVAListTagSize = 32;
6128 static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
6129 static const unsigned SystemZRegSaveAreaPtrOffset = 24;
6131 bool IsSoftFloatABI;
6134 Value *VAArgOverflowSize =
nullptr;
6136 enum class ArgKind {
6144 enum class ShadowExtension {
None,
Zero, Sign };
6146 VarArgSystemZHelper(
Function &
F, MemorySanitizer &MS,
6147 MemorySanitizerVisitor &MSV)
6148 : VarArgHelperBase(
F, MS, MSV, SystemZVAListTagSize),
6149 IsSoftFloatABI(
F.getFnAttribute(
"use-soft-float").getValueAsBool()) {}
6151 ArgKind classifyArgument(
Type *
T) {
6158 if (
T->isIntegerTy(128) ||
T->isFP128Ty())
6159 return ArgKind::Indirect;
6160 if (
T->isFloatingPointTy())
6161 return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
6162 if (
T->isIntegerTy() ||
T->isPointerTy())
6163 return ArgKind::GeneralPurpose;
6164 if (
T->isVectorTy())
6165 return ArgKind::Vector;
6166 return ArgKind::Memory;
6169 ShadowExtension getShadowExtension(
const CallBase &CB,
unsigned ArgNo) {
6179 return ShadowExtension::Zero;
6183 return ShadowExtension::Sign;
6185 return ShadowExtension::None;
6189 unsigned GpOffset = SystemZGpOffset;
6190 unsigned FpOffset = SystemZFpOffset;
6191 unsigned VrIndex = 0;
6192 unsigned OverflowOffset = SystemZOverflowOffset;
6199 ArgKind AK = classifyArgument(
T);
6200 if (AK == ArgKind::Indirect) {
6202 AK = ArgKind::GeneralPurpose;
6204 if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
6205 AK = ArgKind::Memory;
6206 if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
6207 AK = ArgKind::Memory;
6208 if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
6209 AK = ArgKind::Memory;
6210 Value *ShadowBase =
nullptr;
6211 Value *OriginBase =
nullptr;
6212 ShadowExtension SE = ShadowExtension::None;
6214 case ArgKind::GeneralPurpose: {
6219 SE = getShadowExtension(CB, ArgNo);
6221 if (SE == ShadowExtension::None) {
6223 assert(ArgAllocSize <= ArgSize);
6224 GapSize = ArgSize - ArgAllocSize;
6226 ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
6227 if (MS.TrackOrigins)
6228 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
6230 GpOffset += ArgSize;
6236 case ArgKind::FloatingPoint: {
6245 ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
6246 if (MS.TrackOrigins)
6247 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
6249 FpOffset += ArgSize;
6255 case ArgKind::Vector: {
6262 case ArgKind::Memory: {
6270 SE = getShadowExtension(CB, ArgNo);
6272 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
6274 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
6275 if (MS.TrackOrigins)
6277 getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
6278 OverflowOffset += ArgSize;
6285 case ArgKind::Indirect:
6288 if (ShadowBase ==
nullptr)
6290 Value *Shadow = MSV.getShadow(
A);
6291 if (SE != ShadowExtension::None)
6292 Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.
getInt64Ty(),
6293 SE == ShadowExtension::Sign);
6294 ShadowBase = IRB.
CreateIntToPtr(ShadowBase, MS.PtrTy,
"_msarg_va_s");
6296 if (MS.TrackOrigins) {
6297 Value *Origin = MSV.getOrigin(
A);
6299 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
6303 Constant *OverflowSize = ConstantInt::get(
6304 IRB.
getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
6305 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
6312 ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
6315 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
6317 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
6318 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(), Alignment,
6323 unsigned RegSaveAreaSize =
6324 IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
6325 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6327 if (MS.TrackOrigins)
6328 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
6329 Alignment, RegSaveAreaSize);
6338 ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
6340 Value *OverflowArgAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
6341 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
6343 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
6344 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
6347 SystemZOverflowOffset);
6348 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
6350 if (MS.TrackOrigins) {
6352 SystemZOverflowOffset);
6353 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
6358 void finalizeInstrumentation()
override {
6359 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
6360 "finalizeInstrumentation called twice");
6361 if (!VAStartInstrumentationList.empty()) {
6368 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
6376 Intrinsic::umin, CopySize,
6380 if (MS.TrackOrigins) {
6390 for (
CallInst *OrigInst : VAStartInstrumentationList) {
6391 NextNodeIRBuilder IRB(OrigInst);
6392 Value *VAListTag = OrigInst->getArgOperand(0);
6393 copyRegSaveArea(IRB, VAListTag);
6394 copyOverflowArea(IRB, VAListTag);
6400struct VarArgI386Helper :
public VarArgHelperBase {
6402 Value *VAArgSize =
nullptr;
6404 VarArgI386Helper(
Function &
F, MemorySanitizer &MS,
6405 MemorySanitizerVisitor &MSV)
6406 : VarArgHelperBase(
F, MS, MSV, 4) {}
6410 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6411 unsigned VAArgOffset = 0;
6414 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
6416 assert(
A->getType()->isPointerTy());
6418 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
6420 if (ArgAlign < IntptrSize)
6421 ArgAlign =
Align(IntptrSize);
6422 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
6424 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
6426 Value *AShadowPtr, *AOriginPtr;
6427 std::tie(AShadowPtr, AOriginPtr) =
6428 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
6438 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
6440 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
6441 if (
DL.isBigEndian()) {
6444 if (ArgSize < IntptrSize)
6445 VAArgOffset += (IntptrSize - ArgSize);
6448 Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
6451 VAArgOffset += ArgSize;
6457 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
6460 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
6463 void finalizeInstrumentation()
override {
6464 assert(!VAArgSize && !VAArgTLSCopy &&
6465 "finalizeInstrumentation called twice");
6468 Value *CopySize = VAArgSize;
6470 if (!VAStartInstrumentationList.empty()) {
6479 Intrinsic::umin, CopySize,
6487 for (
CallInst *OrigInst : VAStartInstrumentationList) {
6488 NextNodeIRBuilder IRB(OrigInst);
6489 Value *VAListTag = OrigInst->getArgOperand(0);
6490 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
6491 Value *RegSaveAreaPtrPtr =
6493 PointerType::get(*MS.C, 0));
6494 Value *RegSaveAreaPtr =
6495 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
6496 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
6498 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6500 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
6501 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
6503 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6511struct VarArgGenericHelper :
public VarArgHelperBase {
6513 Value *VAArgSize =
nullptr;
6515 VarArgGenericHelper(
Function &
F, MemorySanitizer &MS,
6516 MemorySanitizerVisitor &MSV,
const unsigned VAListTagSize)
6517 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
6520 unsigned VAArgOffset = 0;
6522 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6527 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
6528 if (
DL.isBigEndian()) {
6531 if (ArgSize < IntptrSize)
6532 VAArgOffset += (IntptrSize - ArgSize);
6534 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
6535 VAArgOffset += ArgSize;
6536 VAArgOffset =
alignTo(VAArgOffset, IntptrSize);
6542 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
6545 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
6548 void finalizeInstrumentation()
override {
6549 assert(!VAArgSize && !VAArgTLSCopy &&
6550 "finalizeInstrumentation called twice");
6553 Value *CopySize = VAArgSize;
6555 if (!VAStartInstrumentationList.empty()) {
6564 Intrinsic::umin, CopySize,
6572 for (
CallInst *OrigInst : VAStartInstrumentationList) {
6573 NextNodeIRBuilder IRB(OrigInst);
6574 Value *VAListTag = OrigInst->getArgOperand(0);
6575 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
6576 Value *RegSaveAreaPtrPtr =
6578 PointerType::get(*MS.C, 0));
6579 Value *RegSaveAreaPtr =
6580 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
6581 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
6583 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6585 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
6586 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
6588 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6596using VarArgARM32Helper = VarArgGenericHelper;
6597using VarArgRISCVHelper = VarArgGenericHelper;
6598using VarArgMIPSHelper = VarArgGenericHelper;
6599using VarArgLoongArch64Helper = VarArgGenericHelper;
6602struct VarArgNoOpHelper :
public VarArgHelper {
6603 VarArgNoOpHelper(
Function &
F, MemorySanitizer &MS,
6604 MemorySanitizerVisitor &MSV) {}
6612 void finalizeInstrumentation()
override {}
6618 MemorySanitizerVisitor &Visitor) {
6621 Triple TargetTriple(Func.getParent()->getTargetTriple());
6624 return new VarArgI386Helper(Func, Msan, Visitor);
6627 return new VarArgAMD64Helper(Func, Msan, Visitor);
6629 if (TargetTriple.
isARM())
6630 return new VarArgARM32Helper(Func, Msan, Visitor, 4);
6633 return new VarArgAArch64Helper(Func, Msan, Visitor);
6636 return new VarArgSystemZHelper(Func, Msan, Visitor);
6641 return new VarArgPowerPCHelper(Func, Msan, Visitor, 12);
6644 return new VarArgPowerPCHelper(Func, Msan, Visitor, 8);
6647 return new VarArgRISCVHelper(Func, Msan, Visitor, 4);
6650 return new VarArgRISCVHelper(Func, Msan, Visitor, 8);
6653 return new VarArgMIPSHelper(Func, Msan, Visitor, 4);
6656 return new VarArgMIPSHelper(Func, Msan, Visitor, 8);
6659 return new VarArgLoongArch64Helper(Func, Msan, Visitor,
6662 return new VarArgNoOpHelper(Func, Msan, Visitor);
6669 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
6672 MemorySanitizerVisitor Visitor(
F, *
this, TLI);
6676 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
6679 return Visitor.runOnFunction();
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static const size_t kNumberOfAccessSizes
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
static bool isAMustTailRetVal(Value *RetVal)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Module.h This file contains the declarations for the Module class.
static const MemoryMapParams Linux_LoongArch64_MemoryMapParams
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams
static const Align kMinOriginAlignment
static const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< uint64_t > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
static cl::opt< uint64_t > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
static const unsigned kOriginSize
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
static const Align kShadowTLSAlignment
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
static cl::opt< bool > ClDumpStrictIntrinsics("msan-dump-strict-intrinsics", cl::desc("Prints 'unknown' intrinsics that were handled heuristically. " "Use -msan-dump-strict-instructions to print intrinsics that " "could not be handled exactly nor heuristically."), cl::Hidden, cl::init(false))
static const MemoryMapParams Linux_S390X_MemoryMapParams
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("poison undef temps"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_I386_MemoryMapParams
const char kMsanInitName[]
static cl::opt< bool > ClPrintStackNames("msan-print-stack-names", cl::desc("Print name of local stack variable"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_AArch64_MemoryMapParams
static cl::opt< uint64_t > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleLifetimeIntrinsics("msan-handle-lifetime-intrinsics", cl::desc("when possible, poison scoped variables at the beginning of the scope " "(slower, but more precise)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
static GlobalVariable * createPrivateConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClEagerChecks("msan-eager-checks", cl::desc("check arguments and return values at function call boundaries"), cl::Hidden, cl::init(false))
static cl::opt< int > ClDisambiguateWarning("msan-disambiguate-warning-threshold", cl::desc("Define threshold for number of checks per " "debug location to force origin update."), cl::Hidden, cl::init(3))
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
static cl::opt< uint64_t > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams
static const unsigned kParamTLSSize
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
static const unsigned kRetvalTLSSize
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams
const char kMsanModuleCtorName[]
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDisableChecks("msan-disable-checks", cl::desc("Apply no_sanitize to the whole file"), cl::Hidden, cl::init(false))
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
an instruction to allocate memory on the stack
void setAlignment(Align Align)
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM Basic Block Representation.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
This class represents a no-op cast from one type to another.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_SGT
signed greater than
@ ICMP_SGE
signed greater or equal
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
This is the shared class of boolean and integer constants.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static ConstantInt * getBool(LLVMContext &Context, bool V)
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isZeroValue() const
Return true if the value is negative zero or null value.
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
static bool shouldExecute(unsigned CounterName)
This instruction compares its operands according to the predicate given to the constructor.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
void setComdat(Comdat *C)
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ ExternalLinkage
Externally visible function.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::vector< ConstraintInfo > ConstraintInfoVector
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
This instruction inserts a single (scalar) element into a VectorType value.
This instruction inserts a struct field of array element value into an aggregate value.
Base class for instruction visitors.
void visit(Iterator Start, Iterator End)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
This class represents a cast from an integer to a pointer.
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
This class wraps the llvm.memcpy intrinsic.
This class wraps the llvm.memmove intrinsic.
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void abandon()
Mark an analysis as abandoned.
This class represents a cast from a pointer to an integer.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
bool remove(const value_type &X)
Remove an item from the set vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This instruction constructs a fixed permutation of two input vectors.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
bool isRISCV32() const
Tests whether the target is 32-bit RISC-V.
bool isPPC32() const
Tests whether the target is 32-bit PowerPC (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
bool isSystemZ() const
Tests whether the target is SystemZ.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
'undef' values are things that do not have specified contents.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This represents the llvm.va_copy intrinsic.
This represents the llvm.va_start intrinsic.
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void setName(const Twine &Name)
Change the name of the value.
StringRef getName() const
Return a constant reference to the value's name.
Type * getElementType() const
This class represents zero extension of integer types.
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
This class provides various memory handling functions that manipulate MemoryBlock instances.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ C
The default llvm calling convention, compatible with C.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
initializer< Ty > init(const Ty &Val)
Function * Kernel
Summary of a kernel (=entry point for target offloading).
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
std::pair< Instruction *, Value * > SplitBlockAndInsertSimpleForLoop(Value *End, BasicBlock::iterator SplitBefore)
Insert a for (int i = 0; i < End; i++) loop structure (with the exception that End is assumed > 0,...
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Or
Bitwise or logical OR of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
constexpr unsigned BitWidth
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
iterator_range< df_iterator< T > > depth_first(const T &G)
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A CRTP mix-in to automatically provide informational APIs needed for passes.