184#include "llvm/IR/IntrinsicsAArch64.h"
185#include "llvm/IR/IntrinsicsX86.h"
216#define DEBUG_TYPE "msan"
219 "Controls which checks to insert");
222 "Controls which instruction to instrument");
240 "msan-track-origins",
245 cl::desc(
"keep going after reporting a UMR"),
254 "msan-poison-stack-with-call",
259 "msan-poison-stack-pattern",
260 cl::desc(
"poison uninitialized stack variables with the given pattern"),
265 cl::desc(
"Print name of local stack variable"),
270 cl::desc(
"Poison fully undef temporary values. "
271 "Partially undefined constant vectors "
272 "are unaffected by this flag (see "
273 "-msan-poison-undef-vectors)."),
277 "msan-poison-undef-vectors",
278 cl::desc(
"Precisely poison partially undefined constant vectors. "
279 "If false (legacy behavior), the entire vector is "
280 "considered fully initialized, which may lead to false "
281 "negatives. Fully undefined constant vectors are "
282 "unaffected by this flag (see -msan-poison-undef)."),
286 "msan-precise-disjoint-or",
287 cl::desc(
"Precisely poison disjoint OR. If false (legacy behavior), "
288 "disjointedness is ignored (i.e., 1|1 is initialized)."),
293 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
298 cl::desc(
"exact handling of relational integer ICmp"),
302 "msan-handle-lifetime-intrinsics",
304 "when possible, poison scoped variables at the beginning of the scope "
305 "(slower, but more precise)"),
316 "msan-handle-asm-conservative",
327 "msan-check-access-address",
328 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
333 cl::desc(
"check arguments and return values at function call boundaries"),
337 "msan-dump-strict-instructions",
338 cl::desc(
"print out instructions with default strict semantics i.e.,"
339 "check that all the inputs are fully initialized, and mark "
340 "the output as fully initialized. These semantics are applied "
341 "to instructions that could not be handled explicitly nor "
350 "msan-dump-heuristic-instructions",
351 cl::desc(
"Prints 'unknown' instructions that were handled heuristically. "
352 "Use -msan-dump-strict-instructions to print instructions that "
353 "could not be handled explicitly nor heuristically."),
357 "msan-instrumentation-with-call-threshold",
359 "If the function being instrumented requires more than "
360 "this number of checks and origin stores, use callbacks instead of "
361 "inline checks (-1 means never use callbacks)."),
366 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
376 cl::desc(
"Insert checks for constant shadow values"),
383 cl::desc(
"Place MSan constructors in comdat sections"),
389 cl::desc(
"Define custom MSan AndMask"),
393 cl::desc(
"Define custom MSan XorMask"),
397 cl::desc(
"Define custom MSan ShadowBase"),
401 cl::desc(
"Define custom MSan OriginBase"),
406 cl::desc(
"Define threshold for number of checks per "
407 "debug location to force origin update."),
419struct MemoryMapParams {
426struct PlatformMemoryMapParams {
427 const MemoryMapParams *bits32;
428 const MemoryMapParams *bits64;
590class MemorySanitizer {
599 MemorySanitizer(MemorySanitizer &&) =
delete;
600 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
601 MemorySanitizer(
const MemorySanitizer &) =
delete;
602 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
604 bool sanitizeFunction(Function &
F, TargetLibraryInfo &TLI);
607 friend struct MemorySanitizerVisitor;
608 friend struct VarArgHelperBase;
609 friend struct VarArgAMD64Helper;
610 friend struct VarArgAArch64Helper;
611 friend struct VarArgPowerPC64Helper;
612 friend struct VarArgPowerPC32Helper;
613 friend struct VarArgSystemZHelper;
614 friend struct VarArgI386Helper;
615 friend struct VarArgGenericHelper;
617 void initializeModule(
Module &M);
618 void initializeCallbacks(
Module &M,
const TargetLibraryInfo &TLI);
619 void createKernelApi(
Module &M,
const TargetLibraryInfo &TLI);
620 void createUserspaceApi(
Module &M,
const TargetLibraryInfo &TLI);
622 template <
typename... ArgsTy>
623 FunctionCallee getOrInsertMsanMetadataFunction(
Module &M, StringRef Name,
649 Value *ParamOriginTLS;
655 Value *RetvalOriginTLS;
661 Value *VAArgOriginTLS;
664 Value *VAArgOverflowSizeTLS;
667 bool CallbacksInitialized =
false;
670 FunctionCallee WarningFn;
674 FunctionCallee MaybeWarningVarSizeFn;
679 FunctionCallee MsanSetAllocaOriginWithDescriptionFn;
681 FunctionCallee MsanSetAllocaOriginNoDescriptionFn;
684 FunctionCallee MsanPoisonStackFn;
688 FunctionCallee MsanChainOriginFn;
691 FunctionCallee MsanSetOriginFn;
694 FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
697 StructType *MsanContextStateTy;
698 FunctionCallee MsanGetContextStateFn;
701 FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
707 FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
708 FunctionCallee MsanMetadataPtrForLoad_1_8[4];
709 FunctionCallee MsanMetadataPtrForStore_1_8[4];
710 FunctionCallee MsanInstrumentAsmStoreFn;
713 Value *MsanMetadataAlloca;
716 FunctionCallee getKmsanShadowOriginAccessFn(
bool isStore,
int size);
719 const MemoryMapParams *MapParams;
723 MemoryMapParams CustomMapParams;
725 MDNode *ColdCallWeights;
728 MDNode *OriginStoreWeights;
731void insertModuleCtor(
Module &M) {
768 if (!Options.Kernel) {
777 MemorySanitizer Msan(*
F.getParent(), Options);
796 OS, MapClassName2PassName);
802 if (Options.EagerChecks)
803 OS <<
"eager-checks;";
804 OS <<
"track-origins=" << Options.TrackOrigins;
820template <
typename... ArgsTy>
822MemorySanitizer::getOrInsertMsanMetadataFunction(
Module &M,
StringRef Name,
827 std::forward<ArgsTy>(Args)...);
830 return M.getOrInsertFunction(Name, MsanMetadata,
831 std::forward<ArgsTy>(Args)...);
840 RetvalOriginTLS =
nullptr;
842 ParamOriginTLS =
nullptr;
844 VAArgOriginTLS =
nullptr;
845 VAArgOverflowSizeTLS =
nullptr;
847 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
849 IRB.getVoidTy(), IRB.getInt32Ty());
860 MsanGetContextStateFn =
861 M.getOrInsertFunction(
"__msan_get_context_state", PtrTy);
865 for (
int ind = 0,
size = 1; ind < 4; ind++,
size <<= 1) {
866 std::string name_load =
867 "__msan_metadata_ptr_for_load_" + std::to_string(
size);
868 std::string name_store =
869 "__msan_metadata_ptr_for_store_" + std::to_string(
size);
870 MsanMetadataPtrForLoad_1_8[ind] =
871 getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
872 MsanMetadataPtrForStore_1_8[ind] =
873 getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
876 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
877 M,
"__msan_metadata_ptr_for_load_n", PtrTy, IntptrTy);
878 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
879 M,
"__msan_metadata_ptr_for_store_n", PtrTy, IntptrTy);
882 MsanPoisonAllocaFn =
M.getOrInsertFunction(
883 "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
884 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
885 "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
889 return M.getOrInsertGlobal(Name, Ty, [&] {
891 nullptr, Name,
nullptr,
897void MemorySanitizer::createUserspaceApi(
Module &M,
905 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
906 :
"__msan_warning_with_origin_noreturn";
907 WarningFn =
M.getOrInsertFunction(WarningFnName,
909 IRB.getVoidTy(), IRB.getInt32Ty());
912 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
913 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
940 IRB.getIntPtrTy(
M.getDataLayout()));
944 unsigned AccessSize = 1 << AccessSizeIndex;
945 std::string FunctionName =
"__msan_maybe_warning_" +
itostr(AccessSize);
946 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
948 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
949 MaybeWarningVarSizeFn =
M.getOrInsertFunction(
950 "__msan_maybe_warning_N", TLI.
getAttrList(
C, {},
false),
951 IRB.getVoidTy(), PtrTy, IRB.getInt64Ty(), IRB.getInt32Ty());
952 FunctionName =
"__msan_maybe_store_origin_" +
itostr(AccessSize);
953 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
955 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
959 MsanSetAllocaOriginWithDescriptionFn =
960 M.getOrInsertFunction(
"__msan_set_alloca_origin_with_descr",
961 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
962 MsanSetAllocaOriginNoDescriptionFn =
963 M.getOrInsertFunction(
"__msan_set_alloca_origin_no_descr",
964 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
965 MsanPoisonStackFn =
M.getOrInsertFunction(
"__msan_poison_stack",
966 IRB.getVoidTy(), PtrTy, IntptrTy);
970void MemorySanitizer::initializeCallbacks(
Module &M,
973 if (CallbacksInitialized)
979 MsanChainOriginFn =
M.getOrInsertFunction(
980 "__msan_chain_origin",
983 MsanSetOriginFn =
M.getOrInsertFunction(
985 IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
987 M.getOrInsertFunction(
"__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
989 M.getOrInsertFunction(
"__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
990 MemsetFn =
M.getOrInsertFunction(
"__msan_memset",
992 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
994 MsanInstrumentAsmStoreFn =
M.getOrInsertFunction(
995 "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
998 createKernelApi(M, TLI);
1000 createUserspaceApi(M, TLI);
1002 CallbacksInitialized =
true;
1008 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
1026void MemorySanitizer::initializeModule(
Module &M) {
1027 auto &
DL =
M.getDataLayout();
1029 TargetTriple =
M.getTargetTriple();
1031 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
1032 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
1034 if (ShadowPassed || OriginPassed) {
1039 MapParams = &CustomMapParams;
1041 switch (TargetTriple.getOS()) {
1043 switch (TargetTriple.getArch()) {
1058 switch (TargetTriple.getArch()) {
1067 switch (TargetTriple.getArch()) {
1101 C = &(
M.getContext());
1103 IntptrTy = IRB.getIntPtrTy(
DL);
1104 OriginTy = IRB.getInt32Ty();
1105 PtrTy = IRB.getPtrTy();
1110 if (!CompileKernel) {
1112 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1113 return new GlobalVariable(
1114 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1115 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1119 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1120 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1121 GlobalValue::WeakODRLinkage,
1122 IRB.getInt32(Recover),
"__msan_keep_going");
1137struct VarArgHelper {
1138 virtual ~VarArgHelper() =
default;
1141 virtual void visitCallBase(CallBase &CB,
IRBuilder<> &IRB) = 0;
1144 virtual void visitVAStartInst(VAStartInst &
I) = 0;
1147 virtual void visitVACopyInst(VACopyInst &
I) = 0;
1153 virtual void finalizeInstrumentation() = 0;
1156struct MemorySanitizerVisitor;
1161 MemorySanitizerVisitor &Visitor);
1168 if (TypeSizeFixed <= 8)
1177class NextNodeIRBuilder :
public IRBuilder<> {
1190struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1192 MemorySanitizer &MS;
1194 ValueMap<Value *, Value *> ShadowMap, OriginMap;
1195 std::unique_ptr<VarArgHelper> VAHelper;
1196 const TargetLibraryInfo *TLI;
1203 bool PropagateShadow;
1206 bool PoisonUndefVectors;
1208 struct ShadowOriginAndInsertPoint {
1213 ShadowOriginAndInsertPoint(
Value *S,
Value *O, Instruction *
I)
1214 : Shadow(S), Origin(
O), OrigIns(
I) {}
1217 DenseMap<const DILocation *, int> LazyWarningDebugLocationCount;
1218 SmallSetVector<AllocaInst *, 16> AllocaSet;
1221 int64_t SplittableBlocksCount = 0;
1223 MemorySanitizerVisitor(Function &
F, MemorySanitizer &MS,
1224 const TargetLibraryInfo &TLI)
1226 bool SanitizeFunction =
1228 InsertChecks = SanitizeFunction;
1229 PropagateShadow = SanitizeFunction;
1240 MS.initializeCallbacks(*
F.getParent(), TLI);
1242 IRBuilder<>(&
F.getEntryBlock(),
F.getEntryBlock().getFirstNonPHIIt())
1243 .CreateIntrinsic(Intrinsic::donothing, {});
1245 if (MS.CompileKernel) {
1247 insertKmsanPrologue(IRB);
1251 <<
"MemorySanitizer is not inserting checks into '"
1252 <<
F.getName() <<
"'\n");
1255 bool instrumentWithCalls(
Value *V) {
1259 ++SplittableBlocksCount;
1264 bool isInPrologue(Instruction &
I) {
1265 return I.getParent() == FnPrologueEnd->
getParent() &&
1274 if (MS.TrackOrigins <= 1)
1276 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1280 const DataLayout &
DL =
F.getDataLayout();
1281 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1291 TypeSize TS, Align Alignment) {
1292 const DataLayout &
DL =
F.getDataLayout();
1293 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1294 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1306 auto [InsertPt,
Index] =
1318 Align CurrentAlignment = Alignment;
1319 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1320 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1322 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1327 CurrentAlignment = IntptrAlignment;
1340 Value *OriginPtr, Align Alignment) {
1341 const DataLayout &
DL =
F.getDataLayout();
1343 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
1345 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1354 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1361 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1363 if (instrumentWithCalls(ConvertedShadow) &&
1365 FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1366 Value *ConvertedShadow2 =
1368 CallBase *CB = IRB.
CreateCall(Fn, {ConvertedShadow2, Addr, Origin});
1372 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1376 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1381 void materializeStores() {
1382 for (StoreInst *SI : StoreList) {
1384 Value *Val =
SI->getValueOperand();
1385 Value *Addr =
SI->getPointerOperand();
1386 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1387 Value *ShadowPtr, *OriginPtr;
1389 const Align Alignment =
SI->getAlign();
1391 std::tie(ShadowPtr, OriginPtr) =
1392 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
true);
1394 [[maybe_unused]] StoreInst *NewSI =
1401 if (MS.TrackOrigins && !
SI->isAtomic())
1402 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1409 if (MS.TrackOrigins < 2)
1412 if (LazyWarningDebugLocationCount.
empty())
1413 for (
const auto &
I : InstrumentationList)
1414 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1430 auto NewDebugLoc = OI->getDebugLoc();
1437 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1438 Origin = updateOrigin(Origin, IRBOrigin);
1443 if (MS.CompileKernel || MS.TrackOrigins)
1454 const DataLayout &
DL =
F.getDataLayout();
1455 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1457 if (instrumentWithCalls(ConvertedShadow) && !MS.CompileKernel) {
1459 ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1460 Value *ConvertedShadow2 =
1464 FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1468 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1472 FunctionCallee Fn = MS.MaybeWarningVarSizeFn;
1475 unsigned ShadowSize =
DL.getTypeAllocSize(ConvertedShadow2->
getType());
1478 {ShadowAlloca, ConstantInt::get(IRB.
getInt64Ty(), ShadowSize),
1479 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1484 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1487 !MS.Recover, MS.ColdCallWeights);
1490 insertWarningFn(IRB, Origin);
1495 void materializeInstructionChecks(
1497 const DataLayout &
DL =
F.getDataLayout();
1500 bool Combine = !MS.TrackOrigins;
1502 Value *Shadow =
nullptr;
1503 for (
const auto &ShadowData : InstructionChecks) {
1504 assert(ShadowData.OrigIns == Instruction);
1507 Value *ConvertedShadow = ShadowData.Shadow;
1516 insertWarningFn(IRB, ShadowData.Origin);
1526 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1531 Shadow = ConvertedShadow;
1535 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1536 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1537 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1543 materializeOneCheck(IRB, Shadow,
nullptr);
1547 void materializeChecks() {
1550 SmallPtrSet<Instruction *, 16>
Done;
1553 for (
auto I = InstrumentationList.begin();
1554 I != InstrumentationList.end();) {
1555 auto OrigIns =
I->OrigIns;
1559 auto J = std::find_if(
I + 1, InstrumentationList.end(),
1560 [OrigIns](
const ShadowOriginAndInsertPoint &R) {
1561 return OrigIns != R.OrigIns;
1575 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1576 {Zero, IRB.getInt32(0)},
"param_shadow");
1577 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1578 {Zero, IRB.getInt32(1)},
"retval_shadow");
1579 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1580 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1581 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1582 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1583 MS.VAArgOverflowSizeTLS =
1584 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1585 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1586 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1587 {Zero, IRB.getInt32(5)},
"param_origin");
1588 MS.RetvalOriginTLS =
1589 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1590 {Zero, IRB.getInt32(6)},
"retval_origin");
1592 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1605 for (Instruction *
I : Instructions)
1609 for (PHINode *PN : ShadowPHINodes) {
1611 PHINode *PNO = MS.TrackOrigins ?
cast<PHINode>(getOrigin(PN)) : nullptr;
1612 size_t NumValues = PN->getNumIncomingValues();
1613 for (
size_t v = 0;
v < NumValues;
v++) {
1614 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1616 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1620 VAHelper->finalizeInstrumentation();
1625 for (
auto Item : LifetimeStartList) {
1626 instrumentAlloca(*Item.second, Item.first);
1627 AllocaSet.
remove(Item.second);
1632 for (AllocaInst *AI : AllocaSet)
1633 instrumentAlloca(*AI);
1636 materializeChecks();
1640 materializeStores();
1646 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1657 const DataLayout &
DL =
F.getDataLayout();
1659 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1661 VT->getElementCount());
1664 return ArrayType::get(getShadowTy(AT->getElementType()),
1665 AT->getNumElements());
1669 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1670 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1672 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1675 uint32_t TypeSize =
DL.getTypeSizeInBits(OrigTy);
1685 for (
unsigned Idx = 0; Idx <
Struct->getNumElements(); Idx++) {
1688 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1690 if (Aggregator != FalseVal)
1691 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1693 Aggregator = ShadowBool;
1700 Value *collapseArrayShadow(ArrayType *Array,
Value *Shadow,
1702 if (!
Array->getNumElements())
1706 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1708 for (
unsigned Idx = 1; Idx <
Array->getNumElements(); Idx++) {
1710 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1711 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1721 return collapseStructShadow(
Struct, V, IRB);
1723 return collapseArrayShadow(Array, V, IRB);
1728 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1736 Type *VTy =
V->getType();
1738 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1745 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1747 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1748 VectTy->getElementCount());
1754 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1756 return VectorType::get(
1757 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1758 VectTy->getElementCount());
1760 assert(IntPtrTy == MS.IntptrTy);
1767 VectTy->getElementCount(),
1768 constToIntPtr(VectTy->getElementType(),
C));
1770 assert(IntPtrTy == MS.IntptrTy);
1771 return ConstantInt::get(MS.IntptrTy,
C);
1784 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1787 if (uint64_t AndMask = MS.MapParams->AndMask)
1788 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1790 if (uint64_t XorMask = MS.MapParams->XorMask)
1791 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1803 std::pair<Value *, Value *>
1805 MaybeAlign Alignment) {
1810 assert(VectTy->getElementType()->isPointerTy());
1812 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1813 Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1814 Value *ShadowLong = ShadowOffset;
1815 if (uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1817 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1820 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1822 Value *OriginPtr =
nullptr;
1823 if (MS.TrackOrigins) {
1824 Value *OriginLong = ShadowOffset;
1825 uint64_t OriginBase = MS.MapParams->OriginBase;
1826 if (OriginBase != 0)
1828 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1831 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1834 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1836 return std::make_pair(ShadowPtr, OriginPtr);
1839 template <
typename... ArgsTy>
1844 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1845 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1848 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1851 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *Addr,
1855 Value *ShadowOriginPtrs;
1856 const DataLayout &
DL =
F.getDataLayout();
1857 TypeSize
Size =
DL.getTypeStoreSize(ShadowTy);
1859 FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(
isStore,
Size);
1862 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1864 Value *SizeVal = ConstantInt::get(MS.IntptrTy,
Size);
1865 ShadowOriginPtrs = createMetadataCall(
1867 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1874 return std::make_pair(ShadowPtr, OriginPtr);
1880 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *Addr,
1887 return getShadowOriginPtrKernelNoVec(Addr, IRB, ShadowTy,
isStore);
1892 Value *ShadowPtrs = ConstantInt::getNullValue(
1894 Value *OriginPtrs =
nullptr;
1895 if (MS.TrackOrigins)
1896 OriginPtrs = ConstantInt::getNullValue(
1898 for (
unsigned i = 0; i < NumElements; ++i) {
1901 auto [ShadowPtr, OriginPtr] =
1902 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1905 ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1906 if (MS.TrackOrigins)
1908 OriginPtrs, OriginPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1910 return {ShadowPtrs, OriginPtrs};
1913 std::pair<Value *, Value *> getShadowOriginPtr(
Value *Addr,
IRBuilder<> &IRB,
1915 MaybeAlign Alignment,
1917 if (MS.CompileKernel)
1918 return getShadowOriginPtrKernel(Addr, IRB, ShadowTy,
isStore);
1919 return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1934 if (!MS.TrackOrigins)
1948 Value *getOriginPtrForRetval() {
1950 return MS.RetvalOriginTLS;
1955 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1956 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1961 if (!MS.TrackOrigins)
1963 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1964 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1965 OriginMap[
V] = Origin;
1969 Type *ShadowTy = getShadowTy(OrigTy);
1979 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
1988 getPoisonedShadow(AT->getElementType()));
1993 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1994 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
2002 Type *ShadowTy = getShadowTy(V);
2005 return getPoisonedShadow(ShadowTy);
2017 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
2018 return getCleanShadow(V);
2020 Value *Shadow = ShadowMap[
V];
2022 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
2023 assert(Shadow &&
"No shadow for a value");
2030 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
2031 : getCleanShadow(V);
2037 Value *&ShadowPtr = ShadowMap[
V];
2042 unsigned ArgOffset = 0;
2043 const DataLayout &
DL =
F->getDataLayout();
2044 for (
auto &FArg :
F->args()) {
2045 if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
2047 ?
"vscale not fully supported\n"
2048 :
"Arg is not sized\n"));
2050 ShadowPtr = getCleanShadow(V);
2051 setOrigin(
A, getCleanOrigin());
2057 unsigned Size = FArg.hasByValAttr()
2058 ?
DL.getTypeAllocSize(FArg.getParamByValType())
2059 :
DL.getTypeAllocSize(FArg.getType());
2063 if (FArg.hasByValAttr()) {
2067 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
2068 FArg.getParamAlign(), FArg.getParamByValType());
2069 Value *CpShadowPtr, *CpOriginPtr;
2070 std::tie(CpShadowPtr, CpOriginPtr) =
2071 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2073 if (!PropagateShadow || Overflow) {
2075 EntryIRB.CreateMemSet(
2079 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2081 [[maybe_unused]]
Value *Cpy = EntryIRB.CreateMemCpy(
2082 CpShadowPtr, CopyAlign,
Base, CopyAlign,
Size);
2085 if (MS.TrackOrigins) {
2086 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2090 EntryIRB.CreateMemCpy(
2099 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2100 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2101 ShadowPtr = getCleanShadow(V);
2102 setOrigin(
A, getCleanOrigin());
2105 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2106 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2108 if (MS.TrackOrigins) {
2109 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2110 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2114 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2120 assert(ShadowPtr &&
"Could not find shadow for an argument");
2127 cast<Constant>(V)->containsUndefOrPoisonElement() && PropagateShadow &&
2128 PoisonUndefVectors) {
2131 for (
unsigned i = 0; i != NumElems; ++i) {
2134 : getCleanShadow(Elem);
2138 LLVM_DEBUG(
dbgs() <<
"Partial undef constant vector: " << *V <<
" ==> "
2139 << *ShadowConstant <<
"\n");
2141 return ShadowConstant;
2147 return getCleanShadow(V);
2151 Value *getShadow(Instruction *
I,
int i) {
2152 return getShadow(
I->getOperand(i));
2157 if (!MS.TrackOrigins)
2160 return getCleanOrigin();
2162 "Unexpected value type in getOrigin()");
2164 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2165 return getCleanOrigin();
2167 Value *Origin = OriginMap[
V];
2168 assert(Origin &&
"Missing origin");
2173 Value *getOrigin(Instruction *
I,
int i) {
2174 return getOrigin(
I->getOperand(i));
2181 void insertCheckShadow(
Value *Shadow,
Value *Origin, Instruction *OrigIns) {
2187 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2188 << *OrigIns <<
"\n");
2195 "Can only insert checks for integer, vector, and aggregate shadow "
2198 InstrumentationList.push_back(
2199 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2207 void insertCheckShadowOf(
Value *Val, Instruction *OrigIns) {
2209 Value *Shadow, *Origin;
2211 Shadow = getShadow(Val);
2214 Origin = getOrigin(Val);
2221 insertCheckShadow(Shadow, Origin, OrigIns);
2226 case AtomicOrdering::NotAtomic:
2227 return AtomicOrdering::NotAtomic;
2228 case AtomicOrdering::Unordered:
2229 case AtomicOrdering::Monotonic:
2230 case AtomicOrdering::Release:
2231 return AtomicOrdering::Release;
2232 case AtomicOrdering::Acquire:
2233 case AtomicOrdering::AcquireRelease:
2234 return AtomicOrdering::AcquireRelease;
2235 case AtomicOrdering::SequentiallyConsistent:
2236 return AtomicOrdering::SequentiallyConsistent;
2242 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2243 uint32_t OrderingTable[NumOrderings] = {};
2245 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2246 OrderingTable[(
int)AtomicOrderingCABI::release] =
2247 (int)AtomicOrderingCABI::release;
2248 OrderingTable[(int)AtomicOrderingCABI::consume] =
2249 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2250 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2251 (
int)AtomicOrderingCABI::acq_rel;
2252 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2253 (
int)AtomicOrderingCABI::seq_cst;
2260 case AtomicOrdering::NotAtomic:
2261 return AtomicOrdering::NotAtomic;
2262 case AtomicOrdering::Unordered:
2263 case AtomicOrdering::Monotonic:
2264 case AtomicOrdering::Acquire:
2265 return AtomicOrdering::Acquire;
2266 case AtomicOrdering::Release:
2267 case AtomicOrdering::AcquireRelease:
2268 return AtomicOrdering::AcquireRelease;
2269 case AtomicOrdering::SequentiallyConsistent:
2270 return AtomicOrdering::SequentiallyConsistent;
2276 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2277 uint32_t OrderingTable[NumOrderings] = {};
2279 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2280 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2281 OrderingTable[(int)AtomicOrderingCABI::consume] =
2282 (
int)AtomicOrderingCABI::acquire;
2283 OrderingTable[(int)AtomicOrderingCABI::release] =
2284 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2285 (int)AtomicOrderingCABI::acq_rel;
2286 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2287 (
int)AtomicOrderingCABI::seq_cst;
2293 using InstVisitor<MemorySanitizerVisitor>
::visit;
2294 void visit(Instruction &
I) {
2295 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2298 if (isInPrologue(
I))
2303 setShadow(&
I, getCleanShadow(&
I));
2304 setOrigin(&
I, getCleanOrigin());
2315 void visitLoadInst(LoadInst &
I) {
2316 assert(
I.getType()->isSized() &&
"Load type must have size");
2317 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2318 NextNodeIRBuilder IRB(&
I);
2319 Type *ShadowTy = getShadowTy(&
I);
2320 Value *Addr =
I.getPointerOperand();
2321 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2322 const Align Alignment =
I.getAlign();
2323 if (PropagateShadow) {
2324 std::tie(ShadowPtr, OriginPtr) =
2325 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
2329 setShadow(&
I, getCleanShadow(&
I));
2333 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2338 if (MS.TrackOrigins) {
2339 if (PropagateShadow) {
2344 setOrigin(&
I, getCleanOrigin());
2353 void visitStoreInst(StoreInst &
I) {
2354 StoreList.push_back(&
I);
2356 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2359 void handleCASOrRMW(Instruction &
I) {
2363 Value *Addr =
I.getOperand(0);
2364 Value *Val =
I.getOperand(1);
2365 Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, getShadowTy(Val),
Align(1),
2370 insertCheckShadowOf(Addr, &
I);
2376 insertCheckShadowOf(Val, &
I);
2380 setShadow(&
I, getCleanShadow(&
I));
2381 setOrigin(&
I, getCleanOrigin());
2384 void visitAtomicRMWInst(AtomicRMWInst &
I) {
2389 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2395 void visitExtractElementInst(ExtractElementInst &
I) {
2396 insertCheckShadowOf(
I.getOperand(1), &
I);
2400 setOrigin(&
I, getOrigin(&
I, 0));
2403 void visitInsertElementInst(InsertElementInst &
I) {
2404 insertCheckShadowOf(
I.getOperand(2), &
I);
2406 auto *Shadow0 = getShadow(&
I, 0);
2407 auto *Shadow1 = getShadow(&
I, 1);
2410 setOriginForNaryOp(
I);
2413 void visitShuffleVectorInst(ShuffleVectorInst &
I) {
2415 auto *Shadow0 = getShadow(&
I, 0);
2416 auto *Shadow1 = getShadow(&
I, 1);
2419 setOriginForNaryOp(
I);
2423 void visitSExtInst(SExtInst &
I) {
2425 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2426 setOrigin(&
I, getOrigin(&
I, 0));
2429 void visitZExtInst(ZExtInst &
I) {
2431 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2432 setOrigin(&
I, getOrigin(&
I, 0));
2435 void visitTruncInst(TruncInst &
I) {
2437 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2438 setOrigin(&
I, getOrigin(&
I, 0));
2441 void visitBitCastInst(BitCastInst &
I) {
2446 if (CI->isMustTailCall())
2450 setOrigin(&
I, getOrigin(&
I, 0));
2453 void visitPtrToIntInst(PtrToIntInst &
I) {
2456 "_msprop_ptrtoint"));
2457 setOrigin(&
I, getOrigin(&
I, 0));
2460 void visitIntToPtrInst(IntToPtrInst &
I) {
2463 "_msprop_inttoptr"));
2464 setOrigin(&
I, getOrigin(&
I, 0));
2467 void visitFPToSIInst(CastInst &
I) { handleShadowOr(
I); }
2468 void visitFPToUIInst(CastInst &
I) { handleShadowOr(
I); }
2469 void visitSIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2470 void visitUIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2471 void visitFPExtInst(CastInst &
I) { handleShadowOr(
I); }
2472 void visitFPTruncInst(CastInst &
I) { handleShadowOr(
I); }
2479 void visitAnd(BinaryOperator &
I) {
2487 Value *S2 = getShadow(&
I, 1);
2488 Value *V1 =
I.getOperand(0);
2489 Value *V2 =
I.getOperand(1);
2497 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2498 setOriginForNaryOp(
I);
2501 void visitOr(BinaryOperator &
I) {
2514 Value *S2 = getShadow(&
I, 1);
2515 Value *V1 =
I.getOperand(0);
2516 Value *V2 =
I.getOperand(1);
2535 S = IRB.
CreateOr(S, DisjointOrShadow,
"_ms_disjoint");
2539 setOriginForNaryOp(
I);
2557 template <
bool CombineShadow>
class Combiner {
2558 Value *Shadow =
nullptr;
2559 Value *Origin =
nullptr;
2561 MemorySanitizerVisitor *MSV;
2564 Combiner(MemorySanitizerVisitor *MSV,
IRBuilder<> &IRB)
2565 : IRB(IRB), MSV(MSV) {}
2569 if (CombineShadow) {
2574 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2575 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2579 if (MSV->MS.TrackOrigins) {
2586 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2587 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2597 Value *OpShadow = MSV->getShadow(V);
2598 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2599 return Add(OpShadow, OpOrigin);
2604 void Done(Instruction *
I) {
2605 if (CombineShadow) {
2607 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2608 MSV->setShadow(
I, Shadow);
2610 if (MSV->MS.TrackOrigins) {
2612 MSV->setOrigin(
I, Origin);
2618 void DoneAndStoreOrigin(TypeSize TS,
Value *OriginPtr) {
2619 if (MSV->MS.TrackOrigins) {
2626 using ShadowAndOriginCombiner = Combiner<true>;
2627 using OriginCombiner = Combiner<false>;
2630 void setOriginForNaryOp(Instruction &
I) {
2631 if (!MS.TrackOrigins)
2634 OriginCombiner
OC(
this, IRB);
2635 for (Use &
Op :
I.operands())
2640 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2642 "Vector of pointers is not a valid shadow type");
2652 Type *srcTy =
V->getType();
2655 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2656 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2657 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2675 Type *ShadowTy = getShadowTy(V);
2676 if (
V->getType() == ShadowTy)
2678 if (
V->getType()->isPtrOrPtrVectorTy())
2685 void handleShadowOr(Instruction &
I) {
2687 ShadowAndOriginCombiner SC(
this, IRB);
2688 for (Use &
Op :
I.operands())
2705 Value *horizontalReduce(IntrinsicInst &
I,
unsigned ReductionFactor,
2708 unsigned TotalNumElems =
2713 TotalNumElems = TotalNumElems * 2;
2716 assert(TotalNumElems % ReductionFactor == 0);
2721 for (
unsigned i = 0; i < ReductionFactor; i++) {
2722 SmallVector<int, 16>
Mask;
2723 for (
unsigned X = 0;
X < TotalNumElems;
X += ReductionFactor)
2724 Mask.push_back(
X + i);
2746 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I) {
2747 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2749 assert(
I.getType()->isVectorTy());
2750 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2752 [[maybe_unused]] FixedVectorType *ParamType =
2756 [[maybe_unused]] FixedVectorType *
ReturnType =
2764 Value *FirstArgShadow = getShadow(&
I, 0);
2765 Value *SecondArgShadow =
nullptr;
2766 if (
I.arg_size() == 2)
2767 SecondArgShadow = getShadow(&
I, 1);
2769 Value *OrShadow = horizontalReduce(
I, 2, FirstArgShadow,
2772 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2774 setShadow(&
I, OrShadow);
2775 setOriginForNaryOp(
I);
2785 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I,
2786 int ReinterpretElemWidth) {
2787 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2789 assert(
I.getType()->isVectorTy());
2790 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2792 FixedVectorType *ParamType =
2797 [[maybe_unused]] FixedVectorType *
ReturnType =
2804 FixedVectorType *ReinterpretShadowTy =
nullptr;
2812 Value *FirstArgShadow = getShadow(&
I, 0);
2813 FirstArgShadow = IRB.
CreateBitCast(FirstArgShadow, ReinterpretShadowTy);
2823 Value *SecondArgShadow =
nullptr;
2824 if (
I.arg_size() == 2) {
2825 SecondArgShadow = getShadow(&
I, 1);
2826 SecondArgShadow = IRB.
CreateBitCast(SecondArgShadow, ReinterpretShadowTy);
2829 Value *OrShadow = horizontalReduce(
I, 2, FirstArgShadow,
2832 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2834 setShadow(&
I, OrShadow);
2835 setOriginForNaryOp(
I);
2838 void visitFNeg(UnaryOperator &
I) { handleShadowOr(
I); }
2849 void handleMulByConstant(BinaryOperator &
I, Constant *ConstArg,
2855 Type *EltTy = VTy->getElementType();
2857 for (
unsigned Idx = 0; Idx < NumElements; ++Idx) {
2858 if (ConstantInt *Elt =
2860 const APInt &
V = Elt->getValue();
2861 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2862 Elements.push_back(ConstantInt::get(EltTy, V2));
2864 Elements.push_back(ConstantInt::get(EltTy, 1));
2870 const APInt &
V = Elt->getValue();
2871 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2872 ShadowMul = ConstantInt::get(Ty, V2);
2874 ShadowMul = ConstantInt::get(Ty, 1);
2880 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2881 setOrigin(&
I, getOrigin(OtherArg));
2884 void visitMul(BinaryOperator &
I) {
2887 if (constOp0 && !constOp1)
2888 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2889 else if (constOp1 && !constOp0)
2890 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2895 void visitFAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2896 void visitFSub(BinaryOperator &
I) { handleShadowOr(
I); }
2897 void visitFMul(BinaryOperator &
I) { handleShadowOr(
I); }
2898 void visitAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2899 void visitSub(BinaryOperator &
I) { handleShadowOr(
I); }
2900 void visitXor(BinaryOperator &
I) { handleShadowOr(
I); }
2902 void handleIntegerDiv(Instruction &
I) {
2905 insertCheckShadowOf(
I.getOperand(1), &
I);
2906 setShadow(&
I, getShadow(&
I, 0));
2907 setOrigin(&
I, getOrigin(&
I, 0));
2910 void visitUDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2911 void visitSDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2912 void visitURem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2913 void visitSRem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2917 void visitFDiv(BinaryOperator &
I) { handleShadowOr(
I); }
2918 void visitFRem(BinaryOperator &
I) { handleShadowOr(
I); }
2924 void handleEqualityComparison(ICmpInst &
I) {
2928 Value *Sa = getShadow(
A);
2929 Value *Sb = getShadow(
B);
2955 setOriginForNaryOp(
I);
2963 void handleRelationalComparisonExact(ICmpInst &
I) {
2967 Value *Sa = getShadow(
A);
2968 Value *Sb = getShadow(
B);
2979 bool IsSigned =
I.isSigned();
2981 auto GetMinMaxUnsigned = [&](
Value *
V,
Value *S) {
2991 V = IRB.
CreateXor(V, ConstantInt::get(
V->getType(), MinVal));
2996 return std::make_pair(Min, Max);
2999 auto [Amin, Amax] = GetMinMaxUnsigned(
A, Sa);
3000 auto [Bmin, Bmax] = GetMinMaxUnsigned(
B, Sb);
3006 setOriginForNaryOp(
I);
3013 void handleSignedRelationalComparison(ICmpInst &
I) {
3018 op =
I.getOperand(0);
3019 pre =
I.getPredicate();
3021 op =
I.getOperand(1);
3022 pre =
I.getSwappedPredicate();
3035 setShadow(&
I, Shadow);
3036 setOrigin(&
I, getOrigin(
op));
3042 void visitICmpInst(ICmpInst &
I) {
3047 if (
I.isEquality()) {
3048 handleEqualityComparison(
I);
3054 handleRelationalComparisonExact(
I);
3058 handleSignedRelationalComparison(
I);
3064 handleRelationalComparisonExact(
I);
3071 void visitFCmpInst(FCmpInst &
I) { handleShadowOr(
I); }
3073 void handleShift(BinaryOperator &
I) {
3078 Value *S2 = getShadow(&
I, 1);
3081 Value *V2 =
I.getOperand(1);
3083 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3084 setOriginForNaryOp(
I);
3087 void visitShl(BinaryOperator &
I) { handleShift(
I); }
3088 void visitAShr(BinaryOperator &
I) { handleShift(
I); }
3089 void visitLShr(BinaryOperator &
I) { handleShift(
I); }
3091 void handleFunnelShift(IntrinsicInst &
I) {
3095 Value *S0 = getShadow(&
I, 0);
3097 Value *S2 = getShadow(&
I, 2);
3100 Value *V2 =
I.getOperand(2);
3103 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3104 setOriginForNaryOp(
I);
3117 void visitMemMoveInst(MemMoveInst &
I) {
3118 getShadow(
I.getArgOperand(1));
3121 {I.getArgOperand(0), I.getArgOperand(1),
3122 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3140 void visitMemCpyInst(MemCpyInst &
I) {
3141 getShadow(
I.getArgOperand(1));
3144 {I.getArgOperand(0), I.getArgOperand(1),
3145 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3150 void visitMemSetInst(MemSetInst &
I) {
3154 {I.getArgOperand(0),
3155 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
3156 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3160 void visitVAStartInst(VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
3162 void visitVACopyInst(VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
3168 bool handleVectorStoreIntrinsic(IntrinsicInst &
I) {
3172 Value *Addr =
I.getArgOperand(0);
3173 Value *Shadow = getShadow(&
I, 1);
3174 Value *ShadowPtr, *OriginPtr;
3178 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3183 insertCheckShadowOf(Addr, &
I);
3186 if (MS.TrackOrigins)
3195 bool handleVectorLoadIntrinsic(IntrinsicInst &
I) {
3199 Value *Addr =
I.getArgOperand(0);
3201 Type *ShadowTy = getShadowTy(&
I);
3202 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
3203 if (PropagateShadow) {
3207 std::tie(ShadowPtr, OriginPtr) =
3208 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
3212 setShadow(&
I, getCleanShadow(&
I));
3216 insertCheckShadowOf(Addr, &
I);
3218 if (MS.TrackOrigins) {
3219 if (PropagateShadow)
3220 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
3222 setOrigin(&
I, getCleanOrigin());
3242 [[maybe_unused]]
bool
3243 maybeHandleSimpleNomemIntrinsic(IntrinsicInst &
I,
3244 unsigned int trailingFlags) {
3245 Type *RetTy =
I.getType();
3249 unsigned NumArgOperands =
I.arg_size();
3250 assert(NumArgOperands >= trailingFlags);
3251 for (
unsigned i = 0; i < NumArgOperands - trailingFlags; ++i) {
3252 Type *Ty =
I.getArgOperand(i)->getType();
3258 ShadowAndOriginCombiner SC(
this, IRB);
3259 for (
unsigned i = 0; i < NumArgOperands; ++i)
3260 SC.Add(
I.getArgOperand(i));
3277 bool maybeHandleUnknownIntrinsicUnlogged(IntrinsicInst &
I) {
3278 unsigned NumArgOperands =
I.arg_size();
3279 if (NumArgOperands == 0)
3282 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3283 I.getArgOperand(1)->getType()->isVectorTy() &&
3284 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
3286 return handleVectorStoreIntrinsic(
I);
3289 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3290 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
3292 return handleVectorLoadIntrinsic(
I);
3295 if (
I.doesNotAccessMemory())
3296 if (maybeHandleSimpleNomemIntrinsic(
I, 0))
3304 bool maybeHandleUnknownIntrinsic(IntrinsicInst &
I) {
3305 if (maybeHandleUnknownIntrinsicUnlogged(
I)) {
3309 LLVM_DEBUG(
dbgs() <<
"UNKNOWN INSTRUCTION HANDLED HEURISTICALLY: " <<
I
3316 void handleInvariantGroup(IntrinsicInst &
I) {
3317 setShadow(&
I, getShadow(&
I, 0));
3318 setOrigin(&
I, getOrigin(&
I, 0));
3321 void handleLifetimeStart(IntrinsicInst &
I) {
3326 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3329 void handleBswap(IntrinsicInst &
I) {
3332 Type *OpType =
Op->getType();
3335 setOrigin(&
I, getOrigin(
Op));
3356 void handleCountLeadingTrailingZeros(IntrinsicInst &
I) {
3358 Value *Src =
I.getArgOperand(0);
3359 Value *SrcShadow = getShadow(Src);
3363 I.getType(),
I.getIntrinsicID(), {Src, False});
3365 I.getType(),
I.getIntrinsicID(), {SrcShadow, False});
3368 ConcreteZerosCount, ShadowZerosCount,
"_mscz_cmp_zeros");
3370 Value *NotAllZeroShadow =
3372 Value *OutputShadow =
3373 IRB.
CreateAnd(CompareConcreteZeros, NotAllZeroShadow,
"_mscz_main");
3379 OutputShadow = IRB.
CreateOr(OutputShadow, BoolZeroPoison,
"_mscz_bs");
3382 OutputShadow = IRB.
CreateSExt(OutputShadow, getShadowTy(Src),
"_mscz_os");
3384 setShadow(&
I, OutputShadow);
3385 setOriginForNaryOp(
I);
3395 void handleNEONVectorConvertIntrinsic(IntrinsicInst &
I) {
3399 Value *S0 = getShadow(&
I, 0);
3408 setShadow(&
I, OutShadow);
3409 setOriginForNaryOp(
I);
3418 FixedVectorType *maybeShrinkVectorShadowType(
Value *Src, IntrinsicInst &
I) {
3438 Value *maybeExtendVectorShadowWithZeros(
Value *Shadow, IntrinsicInst &
I) {
3443 Value *FullShadow = getCleanShadow(&
I);
3444 unsigned ShadowNumElems =
3446 unsigned FullShadowNumElems =
3449 assert((ShadowNumElems == FullShadowNumElems) ||
3450 (ShadowNumElems * 2 == FullShadowNumElems));
3452 if (ShadowNumElems == FullShadowNumElems) {
3453 FullShadow = Shadow;
3457 std::iota(ShadowMask.begin(), ShadowMask.end(), 0);
3482 void handleSSEVectorConvertIntrinsicByProp(IntrinsicInst &
I,
3483 bool HasRoundingMode) {
3484 if (HasRoundingMode) {
3492 Value *Src =
I.getArgOperand(0);
3493 assert(Src->getType()->isVectorTy());
3497 VectorType *ShadowType = maybeShrinkVectorShadowType(Src,
I);
3500 Value *S0 = getShadow(&
I, 0);
3512 Value *FullShadow = maybeExtendVectorShadowWithZeros(Shadow,
I);
3514 setShadow(&
I, FullShadow);
3515 setOriginForNaryOp(
I);
3536 void handleSSEVectorConvertIntrinsic(IntrinsicInst &
I,
int NumUsedElements,
3537 bool HasRoundingMode =
false) {
3539 Value *CopyOp, *ConvertOp;
3541 assert((!HasRoundingMode ||
3543 "Invalid rounding mode");
3545 switch (
I.arg_size() - HasRoundingMode) {
3547 CopyOp =
I.getArgOperand(0);
3548 ConvertOp =
I.getArgOperand(1);
3551 ConvertOp =
I.getArgOperand(0);
3565 Value *ConvertShadow = getShadow(ConvertOp);
3566 Value *AggShadow =
nullptr;
3569 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
3570 for (
int i = 1; i < NumUsedElements; ++i) {
3572 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), i));
3573 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3576 AggShadow = ConvertShadow;
3579 insertCheckShadow(AggShadow, getOrigin(ConvertOp), &
I);
3586 Value *ResultShadow = getShadow(CopyOp);
3588 for (
int i = 0; i < NumUsedElements; ++i) {
3590 ResultShadow, ConstantInt::getNullValue(EltTy),
3593 setShadow(&
I, ResultShadow);
3594 setOrigin(&
I, getOrigin(CopyOp));
3596 setShadow(&
I, getCleanShadow(&
I));
3597 setOrigin(&
I, getCleanOrigin());
3605 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3608 return CreateShadowCast(IRB, S2,
T,
true);
3616 return CreateShadowCast(IRB, S2,
T,
true);
3633 void handleVectorShiftIntrinsic(IntrinsicInst &
I,
bool Variable) {
3639 Value *S2 = getShadow(&
I, 1);
3641 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3642 Value *V1 =
I.getOperand(0);
3643 Value *V2 =
I.getOperand(1);
3645 {IRB.CreateBitCast(S1, V1->getType()), V2});
3647 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3648 setOriginForNaryOp(
I);
3653 Type *getMMXVectorTy(
unsigned EltSizeInBits,
3654 unsigned X86_MMXSizeInBits = 64) {
3655 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3656 "Illegal MMX vector element size");
3658 X86_MMXSizeInBits / EltSizeInBits);
3665 case Intrinsic::x86_sse2_packsswb_128:
3666 case Intrinsic::x86_sse2_packuswb_128:
3667 return Intrinsic::x86_sse2_packsswb_128;
3669 case Intrinsic::x86_sse2_packssdw_128:
3670 case Intrinsic::x86_sse41_packusdw:
3671 return Intrinsic::x86_sse2_packssdw_128;
3673 case Intrinsic::x86_avx2_packsswb:
3674 case Intrinsic::x86_avx2_packuswb:
3675 return Intrinsic::x86_avx2_packsswb;
3677 case Intrinsic::x86_avx2_packssdw:
3678 case Intrinsic::x86_avx2_packusdw:
3679 return Intrinsic::x86_avx2_packssdw;
3681 case Intrinsic::x86_mmx_packsswb:
3682 case Intrinsic::x86_mmx_packuswb:
3683 return Intrinsic::x86_mmx_packsswb;
3685 case Intrinsic::x86_mmx_packssdw:
3686 return Intrinsic::x86_mmx_packssdw;
3688 case Intrinsic::x86_avx512_packssdw_512:
3689 case Intrinsic::x86_avx512_packusdw_512:
3690 return Intrinsic::x86_avx512_packssdw_512;
3692 case Intrinsic::x86_avx512_packsswb_512:
3693 case Intrinsic::x86_avx512_packuswb_512:
3694 return Intrinsic::x86_avx512_packsswb_512;
3710 void handleVectorPackIntrinsic(IntrinsicInst &
I,
3711 unsigned MMXEltSizeInBits = 0) {
3715 Value *S2 = getShadow(&
I, 1);
3716 assert(
S1->getType()->isVectorTy());
3722 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) :
S1->
getType();
3723 if (MMXEltSizeInBits) {
3731 if (MMXEltSizeInBits) {
3737 {S1_ext, S2_ext},
nullptr,
3738 "_msprop_vector_pack");
3739 if (MMXEltSizeInBits)
3742 setOriginForNaryOp(
I);
3746 Constant *createDppMask(
unsigned Width,
unsigned Mask) {
3759 const unsigned Width =
3766 Value *DstMaskV = createDppMask(Width, DstMask);
3783 void handleDppIntrinsic(IntrinsicInst &
I) {
3786 Value *S0 = getShadow(&
I, 0);
3790 const unsigned Width =
3792 assert(Width == 2 || Width == 4 || Width == 8);
3795 const unsigned SrcMask =
Mask >> 4;
3796 const unsigned DstMask =
Mask & 0xf;
3799 Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3804 SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3811 setOriginForNaryOp(
I);
3815 C = CreateAppToShadowCast(IRB,
C);
3824 void handleBlendvIntrinsic(IntrinsicInst &
I) {
3829 Value *Sc = getShadow(&
I, 2);
3830 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
3835 C = convertBlendvToSelectMask(IRB,
C);
3836 Sc = convertBlendvToSelectMask(IRB, Sc);
3842 handleSelectLikeInst(
I,
C,
T,
F);
3846 void handleVectorSadIntrinsic(IntrinsicInst &
I,
bool IsMMX =
false) {
3847 const unsigned SignificantBitsPerResultElement = 16;
3849 unsigned ZeroBitsPerResultElement =
3853 auto *Shadow0 = getShadow(&
I, 0);
3854 auto *Shadow1 = getShadow(&
I, 1);
3859 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3862 setOriginForNaryOp(
I);
3880 void handleVectorPmaddIntrinsic(IntrinsicInst &
I,
unsigned ReductionFactor,
3881 unsigned EltSizeInBits = 0) {
3884 [[maybe_unused]] FixedVectorType *
ReturnType =
3889 Value *Va =
nullptr;
3890 Value *Vb =
nullptr;
3891 Value *Sa =
nullptr;
3892 Value *Sb =
nullptr;
3894 assert(
I.arg_size() == 2 ||
I.arg_size() == 3);
3895 if (
I.arg_size() == 2) {
3896 Va =
I.getOperand(0);
3897 Vb =
I.getOperand(1);
3899 Sa = getShadow(&
I, 0);
3900 Sb = getShadow(&
I, 1);
3901 }
else if (
I.arg_size() == 3) {
3903 Va =
I.getOperand(1);
3904 Vb =
I.getOperand(2);
3906 Sa = getShadow(&
I, 1);
3907 Sb = getShadow(&
I, 2);
3916 if (
I.arg_size() == 3) {
3917 [[maybe_unused]]
auto *AccumulatorType =
3919 assert(AccumulatorType == ReturnType);
3922 FixedVectorType *ImplicitReturnType =
ReturnType;
3924 if (EltSizeInBits) {
3926 getMMXVectorTy(EltSizeInBits * ReductionFactor,
3938 ReturnType->getNumElements() * ReductionFactor);
3964 Value *
And = IRB.
CreateOr({SaAndSbNonZero, VaAndSbNonZero, SaAndVbNonZero});
3983 ImplicitReturnType);
3988 OutShadow = CreateShadowCast(IRB, OutShadow, getShadowTy(&
I));
3991 if (
I.arg_size() == 3)
3992 OutShadow = IRB.
CreateOr(OutShadow, getShadow(&
I, 0));
3994 setShadow(&
I, OutShadow);
3995 setOriginForNaryOp(
I);
4001 void handleVectorComparePackedIntrinsic(IntrinsicInst &
I) {
4003 Type *ResTy = getShadowTy(&
I);
4004 auto *Shadow0 = getShadow(&
I, 0);
4005 auto *Shadow1 = getShadow(&
I, 1);
4010 setOriginForNaryOp(
I);
4016 void handleVectorCompareScalarIntrinsic(IntrinsicInst &
I) {
4018 auto *Shadow0 = getShadow(&
I, 0);
4019 auto *Shadow1 = getShadow(&
I, 1);
4021 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
4023 setOriginForNaryOp(
I);
4032 void handleVectorReduceIntrinsic(IntrinsicInst &
I,
bool AllowShadowCast) {
4037 if (AllowShadowCast)
4038 S = CreateShadowCast(IRB, S, getShadowTy(&
I));
4042 setOriginForNaryOp(
I);
4052 void handleVectorReduceWithStarterIntrinsic(IntrinsicInst &
I) {
4056 Value *Shadow0 = getShadow(&
I, 0);
4062 setOriginForNaryOp(
I);
4068 void handleVectorReduceOrIntrinsic(IntrinsicInst &
I) {
4072 Value *OperandShadow = getShadow(&
I, 0);
4074 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
4082 setOrigin(&
I, getOrigin(&
I, 0));
4088 void handleVectorReduceAndIntrinsic(IntrinsicInst &
I) {
4092 Value *OperandShadow = getShadow(&
I, 0);
4093 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
4101 setOrigin(&
I, getOrigin(&
I, 0));
4104 void handleStmxcsr(IntrinsicInst &
I) {
4106 Value *Addr =
I.getArgOperand(0);
4109 getShadowOriginPtr(Addr, IRB, Ty,
Align(1),
true).first;
4114 insertCheckShadowOf(Addr, &
I);
4117 void handleLdmxcsr(IntrinsicInst &
I) {
4122 Value *Addr =
I.getArgOperand(0);
4125 Value *ShadowPtr, *OriginPtr;
4126 std::tie(ShadowPtr, OriginPtr) =
4127 getShadowOriginPtr(Addr, IRB, Ty, Alignment,
false);
4130 insertCheckShadowOf(Addr, &
I);
4133 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
4135 insertCheckShadow(Shadow, Origin, &
I);
4138 void handleMaskedExpandLoad(IntrinsicInst &
I) {
4141 MaybeAlign
Align =
I.getParamAlign(0);
4143 Value *PassThru =
I.getArgOperand(2);
4146 insertCheckShadowOf(
Ptr, &
I);
4147 insertCheckShadowOf(Mask, &
I);
4150 if (!PropagateShadow) {
4151 setShadow(&
I, getCleanShadow(&
I));
4152 setOrigin(&
I, getCleanOrigin());
4156 Type *ShadowTy = getShadowTy(&
I);
4158 auto [ShadowPtr, OriginPtr] =
4159 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy, Align,
false);
4163 getShadow(PassThru),
"_msmaskedexpload");
4165 setShadow(&
I, Shadow);
4168 setOrigin(&
I, getCleanOrigin());
4171 void handleMaskedCompressStore(IntrinsicInst &
I) {
4173 Value *Values =
I.getArgOperand(0);
4175 MaybeAlign
Align =
I.getParamAlign(1);
4179 insertCheckShadowOf(
Ptr, &
I);
4180 insertCheckShadowOf(Mask, &
I);
4183 Value *Shadow = getShadow(Values);
4184 Type *ElementShadowTy =
4186 auto [ShadowPtr, OriginPtrs] =
4187 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy, Align,
true);
4194 void handleMaskedGather(IntrinsicInst &
I) {
4196 Value *Ptrs =
I.getArgOperand(0);
4197 const Align Alignment(
4200 Value *PassThru =
I.getArgOperand(3);
4202 Type *PtrsShadowTy = getShadowTy(Ptrs);
4204 insertCheckShadowOf(Mask, &
I);
4208 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4211 if (!PropagateShadow) {
4212 setShadow(&
I, getCleanShadow(&
I));
4213 setOrigin(&
I, getCleanOrigin());
4217 Type *ShadowTy = getShadowTy(&
I);
4219 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4220 Ptrs, IRB, ElementShadowTy, Alignment,
false);
4224 getShadow(PassThru),
"_msmaskedgather");
4226 setShadow(&
I, Shadow);
4229 setOrigin(&
I, getCleanOrigin());
4232 void handleMaskedScatter(IntrinsicInst &
I) {
4234 Value *Values =
I.getArgOperand(0);
4235 Value *Ptrs =
I.getArgOperand(1);
4236 const Align Alignment(
4240 Type *PtrsShadowTy = getShadowTy(Ptrs);
4242 insertCheckShadowOf(Mask, &
I);
4246 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4249 Value *Shadow = getShadow(Values);
4250 Type *ElementShadowTy =
4252 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4253 Ptrs, IRB, ElementShadowTy, Alignment,
true);
4264 void handleMaskedStore(IntrinsicInst &
I) {
4266 Value *
V =
I.getArgOperand(0);
4268 const Align Alignment(
4271 Value *Shadow = getShadow(V);
4274 insertCheckShadowOf(
Ptr, &
I);
4275 insertCheckShadowOf(Mask, &
I);
4280 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
4281 Ptr, IRB, Shadow->
getType(), Alignment,
true);
4285 if (!MS.TrackOrigins)
4288 auto &
DL =
F.getDataLayout();
4289 paintOrigin(IRB, getOrigin(V), OriginPtr,
4298 void handleMaskedLoad(IntrinsicInst &
I) {
4301 const Align Alignment(
4304 Value *PassThru =
I.getArgOperand(3);
4307 insertCheckShadowOf(
Ptr, &
I);
4308 insertCheckShadowOf(Mask, &
I);
4311 if (!PropagateShadow) {
4312 setShadow(&
I, getCleanShadow(&
I));
4313 setOrigin(&
I, getCleanOrigin());
4317 Type *ShadowTy = getShadowTy(&
I);
4318 Value *ShadowPtr, *OriginPtr;
4319 std::tie(ShadowPtr, OriginPtr) =
4320 getShadowOriginPtr(
Ptr, IRB, ShadowTy, Alignment,
false);
4322 getShadow(PassThru),
"_msmaskedld"));
4324 if (!MS.TrackOrigins)
4331 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
4336 setOrigin(&
I, Origin);
4352 void handleAVXMaskedStore(IntrinsicInst &
I) {
4357 Value *Dst =
I.getArgOperand(0);
4358 assert(Dst->getType()->isPointerTy() &&
"Destination is not a pointer!");
4363 Value *Src =
I.getArgOperand(2);
4368 Value *SrcShadow = getShadow(Src);
4371 insertCheckShadowOf(Dst, &
I);
4372 insertCheckShadowOf(Mask, &
I);
4375 Value *DstShadowPtr;
4376 Value *DstOriginPtr;
4377 std::tie(DstShadowPtr, DstOriginPtr) = getShadowOriginPtr(
4378 Dst, IRB, SrcShadow->
getType(), Alignment,
true);
4380 SmallVector<Value *, 2> ShadowArgs;
4381 ShadowArgs.
append(1, DstShadowPtr);
4382 ShadowArgs.
append(1, Mask);
4393 if (!MS.TrackOrigins)
4397 auto &
DL =
F.getDataLayout();
4398 paintOrigin(IRB, getOrigin(Src), DstOriginPtr,
4399 DL.getTypeStoreSize(SrcShadow->
getType()),
4418 void handleAVXMaskedLoad(IntrinsicInst &
I) {
4423 Value *Src =
I.getArgOperand(0);
4424 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
4432 insertCheckShadowOf(Mask, &
I);
4435 Type *SrcShadowTy = getShadowTy(Src);
4436 Value *SrcShadowPtr, *SrcOriginPtr;
4437 std::tie(SrcShadowPtr, SrcOriginPtr) =
4438 getShadowOriginPtr(Src, IRB, SrcShadowTy, Alignment,
false);
4440 SmallVector<Value *, 2> ShadowArgs;
4441 ShadowArgs.
append(1, SrcShadowPtr);
4442 ShadowArgs.
append(1, Mask);
4451 if (!MS.TrackOrigins)
4458 setOrigin(&
I, PtrSrcOrigin);
4467 assert(isFixedIntVector(Idx));
4468 auto IdxVectorSize =
4476 auto *IdxShadow = getShadow(Idx);
4481 insertCheckShadow(Truncated, getOrigin(Idx),
I);
4486 void handleAVXVpermilvar(IntrinsicInst &
I) {
4488 Value *Shadow = getShadow(&
I, 0);
4489 maskedCheckAVXIndexShadow(IRB,
I.getArgOperand(1), &
I);
4493 Shadow = IRB.
CreateBitCast(Shadow,
I.getArgOperand(0)->getType());
4495 {Shadow, I.getArgOperand(1)});
4498 setOriginForNaryOp(
I);
4503 void handleAVXVpermi2var(IntrinsicInst &
I) {
4508 [[maybe_unused]]
auto ArgVectorSize =
4511 ->getNumElements() == ArgVectorSize);
4513 ->getNumElements() == ArgVectorSize);
4514 assert(
I.getArgOperand(0)->getType() ==
I.getArgOperand(2)->getType());
4515 assert(
I.getType() ==
I.getArgOperand(0)->getType());
4516 assert(
I.getArgOperand(1)->getType()->isIntOrIntVectorTy());
4518 Value *AShadow = getShadow(&
I, 0);
4519 Value *Idx =
I.getArgOperand(1);
4520 Value *BShadow = getShadow(&
I, 2);
4522 maskedCheckAVXIndexShadow(IRB, Idx, &
I);
4526 AShadow = IRB.
CreateBitCast(AShadow,
I.getArgOperand(0)->getType());
4527 BShadow = IRB.
CreateBitCast(BShadow,
I.getArgOperand(2)->getType());
4529 {AShadow, Idx, BShadow});
4531 setOriginForNaryOp(
I);
4534 [[maybe_unused]]
static bool isFixedIntVectorTy(
const Type *
T) {
4538 [[maybe_unused]]
static bool isFixedFPVectorTy(
const Type *
T) {
4542 [[maybe_unused]]
static bool isFixedIntVector(
const Value *V) {
4543 return isFixedIntVectorTy(
V->getType());
4546 [[maybe_unused]]
static bool isFixedFPVector(
const Value *V) {
4547 return isFixedFPVectorTy(
V->getType());
4569 void handleAVX512VectorConvertFPToInt(IntrinsicInst &
I,
bool LastMask) {
4574 Value *WriteThrough;
4578 WriteThrough =
I.getOperand(2);
4579 Mask =
I.getOperand(3);
4582 WriteThrough =
I.getOperand(1);
4583 Mask =
I.getOperand(2);
4588 assert(isFixedIntVector(WriteThrough));
4590 unsigned ANumElements =
4592 [[maybe_unused]]
unsigned WriteThruNumElements =
4594 assert(ANumElements == WriteThruNumElements ||
4595 ANumElements * 2 == WriteThruNumElements);
4598 unsigned MaskNumElements =
Mask->getType()->getScalarSizeInBits();
4599 assert(ANumElements == MaskNumElements ||
4600 ANumElements * 2 == MaskNumElements);
4602 assert(WriteThruNumElements == MaskNumElements);
4606 insertCheckShadowOf(Mask, &
I);
4616 Value *AShadow = getShadow(
A);
4617 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4619 if (ANumElements * 2 == MaskNumElements) {
4631 "_ms_mask_bitcast");
4641 getShadowTy(&
I),
"_ms_a_shadow");
4643 Value *WriteThroughShadow = getShadow(WriteThrough);
4645 "_ms_writethru_select");
4647 setShadow(&
I, Shadow);
4648 setOriginForNaryOp(
I);
4656 void handleBmiIntrinsic(IntrinsicInst &
I) {
4658 Type *ShadowTy = getShadowTy(&
I);
4661 Value *SMask = getShadow(&
I, 1);
4666 {getShadow(&I, 0), I.getOperand(1)});
4669 setOriginForNaryOp(
I);
4672 static SmallVector<int, 8> getPclmulMask(
unsigned Width,
bool OddElements) {
4673 SmallVector<int, 8>
Mask;
4674 for (
unsigned X = OddElements ? 1 : 0;
X < Width;
X += 2) {
4688 void handlePclmulIntrinsic(IntrinsicInst &
I) {
4693 "pclmul 3rd operand must be a constant");
4696 getPclmulMask(Width, Imm & 0x01));
4698 getPclmulMask(Width, Imm & 0x10));
4699 ShadowAndOriginCombiner SOC(
this, IRB);
4700 SOC.Add(Shuf0, getOrigin(&
I, 0));
4701 SOC.Add(Shuf1, getOrigin(&
I, 1));
4706 void handleUnarySdSsIntrinsic(IntrinsicInst &
I) {
4711 Value *Second = getShadow(&
I, 1);
4713 SmallVector<int, 16>
Mask;
4714 Mask.push_back(Width);
4715 for (
unsigned i = 1; i < Width; i++)
4719 setShadow(&
I, Shadow);
4720 setOriginForNaryOp(
I);
4723 void handleVtestIntrinsic(IntrinsicInst &
I) {
4725 Value *Shadow0 = getShadow(&
I, 0);
4726 Value *Shadow1 = getShadow(&
I, 1);
4732 setShadow(&
I, Shadow);
4733 setOriginForNaryOp(
I);
4736 void handleBinarySdSsIntrinsic(IntrinsicInst &
I) {
4741 Value *Second = getShadow(&
I, 1);
4744 SmallVector<int, 16>
Mask;
4745 Mask.push_back(Width);
4746 for (
unsigned i = 1; i < Width; i++)
4750 setShadow(&
I, Shadow);
4751 setOriginForNaryOp(
I);
4757 void handleRoundPdPsIntrinsic(IntrinsicInst &
I) {
4758 assert(
I.getArgOperand(0)->getType() ==
I.getType());
4763 ShadowAndOriginCombiner SC(
this, IRB);
4764 SC.Add(
I.getArgOperand(0));
4772 void handleAbsIntrinsic(IntrinsicInst &
I) {
4774 Value *Src =
I.getArgOperand(0);
4775 Value *IsIntMinPoison =
I.getArgOperand(1);
4777 assert(
I.getType()->isIntOrIntVectorTy());
4779 assert(Src->getType() ==
I.getType());
4785 Value *SrcShadow = getShadow(Src);
4789 Value *MinValVec = ConstantInt::get(Src->getType(), MinVal);
4792 Value *PoisonedShadow = getPoisonedShadow(Src);
4793 Value *PoisonedIfIntMinShadow =
4796 IRB.
CreateSelect(IsIntMinPoison, PoisonedIfIntMinShadow, SrcShadow);
4798 setShadow(&
I, Shadow);
4799 setOrigin(&
I, getOrigin(&
I, 0));
4802 void handleIsFpClass(IntrinsicInst &
I) {
4804 Value *Shadow = getShadow(&
I, 0);
4805 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
4806 setOrigin(&
I, getOrigin(&
I, 0));
4809 void handleArithmeticWithOverflow(IntrinsicInst &
I) {
4811 Value *Shadow0 = getShadow(&
I, 0);
4812 Value *Shadow1 = getShadow(&
I, 1);
4815 IRB.
CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
4821 setShadow(&
I, Shadow);
4822 setOriginForNaryOp(
I);
4828 Value *Shadow = getShadow(V);
4850 void handleAVX512VectorDownConvert(IntrinsicInst &
I) {
4855 Value *WriteThrough =
I.getOperand(1);
4859 assert(isFixedIntVector(WriteThrough));
4861 unsigned ANumElements =
4863 unsigned OutputNumElements =
4865 assert(ANumElements == OutputNumElements ||
4866 ANumElements * 2 == OutputNumElements);
4869 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
4870 insertCheckShadowOf(Mask, &
I);
4881 if (ANumElements != OutputNumElements) {
4883 Mask = IRB.
CreateZExt(Mask, Type::getIntNTy(*MS.C, OutputNumElements),
4890 Value *AShadow = getShadow(
A);
4894 VectorType *ShadowType = maybeShrinkVectorShadowType(
A,
I);
4904 AShadow = IRB.
CreateTrunc(AShadow, ShadowType,
"_ms_trunc_shadow");
4905 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4907 Value *WriteThroughShadow = getShadow(WriteThrough);
4910 setShadow(&
I, Shadow);
4911 setOriginForNaryOp(
I);
4931 void handleAVX512VectorGenericMaskedFP(IntrinsicInst &
I) {
4936 Value *WriteThrough =
I.getOperand(1);
4940 assert(isFixedFPVector(WriteThrough));
4942 [[maybe_unused]]
unsigned ANumElements =
4944 unsigned OutputNumElements =
4946 assert(ANumElements == OutputNumElements);
4951 insertCheckShadowOf(Mask, &
I);
4954 if (
Mask->getType()->getScalarSizeInBits() == 8 && ANumElements < 8)
4956 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
4963 Value *AShadow = getShadow(
A);
4969 Value *WriteThroughShadow = getShadow(WriteThrough);
4972 setShadow(&
I, Shadow);
4974 setOriginForNaryOp(
I);
4984 void visitGenericScalarHalfwordInst(IntrinsicInst &
I) {
4990 Value *WriteThrough =
I.getOperand(2);
4997 insertCheckShadowOf(Mask, &
I);
5001 unsigned NumElements =
5003 assert(NumElements == 8);
5004 assert(
A->getType() ==
B->getType());
5006 assert(
Mask->getType()->getPrimitiveSizeInBits() == NumElements);
5009 Value *ALowerShadow = extractLowerShadow(IRB,
A);
5010 Value *BLowerShadow = extractLowerShadow(IRB,
B);
5012 Value *ABLowerShadow = IRB.
CreateOr(ALowerShadow, BLowerShadow);
5014 Value *WriteThroughLowerShadow = extractLowerShadow(IRB, WriteThrough);
5021 Value *AShadow = getShadow(
A);
5022 Value *DstLowerShadow =
5023 IRB.
CreateSelect(MaskLower, ABLowerShadow, WriteThroughLowerShadow);
5025 AShadow, DstLowerShadow, ConstantInt::get(IRB.
getInt32Ty(), 0),
5028 setShadow(&
I, DstShadow);
5029 setOriginForNaryOp(
I);
5059 void handleAVXGF2P8Affine(IntrinsicInst &
I) {
5070 ->getScalarSizeInBits() == 8);
5072 assert(
A->getType() ==
X->getType());
5074 assert(
B->getType()->isIntegerTy());
5075 assert(
B->getType()->getScalarSizeInBits() == 8);
5077 assert(
I.getType() ==
A->getType());
5079 Value *AShadow = getShadow(
A);
5080 Value *XShadow = getShadow(
X);
5081 Value *BZeroShadow = getCleanShadow(
B);
5084 I.getType(),
I.getIntrinsicID(), {XShadow, AShadow, BZeroShadow});
5086 {X, AShadow, BZeroShadow});
5088 {XShadow, A, BZeroShadow});
5091 Value *BShadow = getShadow(
B);
5092 Value *BBroadcastShadow = getCleanShadow(AShadow);
5097 for (
unsigned i = 0; i < NumElements; i++)
5101 {AShadowXShadow, AShadowX, XShadowA, BBroadcastShadow}));
5102 setOriginForNaryOp(
I);
5116 void handleNEONVectorLoad(IntrinsicInst &
I,
bool WithLane) {
5117 unsigned int numArgs =
I.arg_size();
5120 assert(
I.getType()->isStructTy());
5130 assert(4 <= numArgs && numArgs <= 6);
5144 for (
unsigned int i = 0; i < numArgs - 2; i++)
5145 ShadowArgs.
push_back(getShadow(
I.getArgOperand(i)));
5148 Value *LaneNumber =
I.getArgOperand(numArgs - 2);
5152 insertCheckShadowOf(LaneNumber, &
I);
5155 Value *Src =
I.getArgOperand(numArgs - 1);
5156 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
5158 Type *SrcShadowTy = getShadowTy(Src);
5159 auto [SrcShadowPtr, SrcOriginPtr] =
5160 getShadowOriginPtr(Src, IRB, SrcShadowTy,
Align(1),
false);
5170 if (!MS.TrackOrigins)
5174 setOrigin(&
I, PtrSrcOrigin);
5191 void handleNEONVectorStoreIntrinsic(IntrinsicInst &
I,
bool useLane) {
5195 int numArgOperands =
I.arg_size();
5198 assert(numArgOperands >= 1);
5199 Value *Addr =
I.getArgOperand(numArgOperands - 1);
5201 int skipTrailingOperands = 1;
5204 insertCheckShadowOf(Addr, &
I);
5208 skipTrailingOperands++;
5209 assert(numArgOperands >=
static_cast<int>(skipTrailingOperands));
5211 I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
5214 SmallVector<Value *, 8> ShadowArgs;
5216 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
5218 Value *Shadow = getShadow(&
I, i);
5219 ShadowArgs.
append(1, Shadow);
5236 (numArgOperands - skipTrailingOperands));
5237 Type *OutputShadowTy = getShadowTy(OutputVectorTy);
5241 I.getArgOperand(numArgOperands - skipTrailingOperands));
5243 Value *OutputShadowPtr, *OutputOriginPtr;
5245 std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
5246 Addr, IRB, OutputShadowTy,
Align(1),
true);
5247 ShadowArgs.
append(1, OutputShadowPtr);
5253 if (MS.TrackOrigins) {
5261 OriginCombiner
OC(
this, IRB);
5262 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++)
5263 OC.Add(
I.getArgOperand(i));
5265 const DataLayout &
DL =
F.getDataLayout();
5266 OC.DoneAndStoreOrigin(
DL.getTypeStoreSize(OutputVectorTy),
5293 void handleIntrinsicByApplyingToShadow(IntrinsicInst &
I,
5295 unsigned int trailingVerbatimArgs) {
5298 assert(trailingVerbatimArgs <
I.arg_size());
5300 SmallVector<Value *, 8> ShadowArgs;
5302 for (
unsigned int i = 0; i <
I.arg_size() - trailingVerbatimArgs; i++) {
5303 Value *Shadow = getShadow(&
I, i);
5311 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5313 Value *Arg =
I.getArgOperand(i);
5319 Value *CombinedShadow = CI;
5322 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5325 CreateShadowCast(IRB, getShadow(&
I, i), CombinedShadow->
getType());
5326 CombinedShadow = IRB.
CreateOr(Shadow, CombinedShadow,
"_msprop");
5331 setOriginForNaryOp(
I);
5337 void handleNEONVectorMultiplyIntrinsic(IntrinsicInst &
I) {
5343 bool maybeHandleCrossPlatformIntrinsic(IntrinsicInst &
I) {
5344 switch (
I.getIntrinsicID()) {
5345 case Intrinsic::uadd_with_overflow:
5346 case Intrinsic::sadd_with_overflow:
5347 case Intrinsic::usub_with_overflow:
5348 case Intrinsic::ssub_with_overflow:
5349 case Intrinsic::umul_with_overflow:
5350 case Intrinsic::smul_with_overflow:
5351 handleArithmeticWithOverflow(
I);
5353 case Intrinsic::abs:
5354 handleAbsIntrinsic(
I);
5356 case Intrinsic::bitreverse:
5357 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
5360 case Intrinsic::is_fpclass:
5363 case Intrinsic::lifetime_start:
5364 handleLifetimeStart(
I);
5366 case Intrinsic::launder_invariant_group:
5367 case Intrinsic::strip_invariant_group:
5368 handleInvariantGroup(
I);
5370 case Intrinsic::bswap:
5373 case Intrinsic::ctlz:
5374 case Intrinsic::cttz:
5375 handleCountLeadingTrailingZeros(
I);
5377 case Intrinsic::masked_compressstore:
5378 handleMaskedCompressStore(
I);
5380 case Intrinsic::masked_expandload:
5381 handleMaskedExpandLoad(
I);
5383 case Intrinsic::masked_gather:
5384 handleMaskedGather(
I);
5386 case Intrinsic::masked_scatter:
5387 handleMaskedScatter(
I);
5389 case Intrinsic::masked_store:
5390 handleMaskedStore(
I);
5392 case Intrinsic::masked_load:
5393 handleMaskedLoad(
I);
5395 case Intrinsic::vector_reduce_and:
5396 handleVectorReduceAndIntrinsic(
I);
5398 case Intrinsic::vector_reduce_or:
5399 handleVectorReduceOrIntrinsic(
I);
5402 case Intrinsic::vector_reduce_add:
5403 case Intrinsic::vector_reduce_xor:
5404 case Intrinsic::vector_reduce_mul:
5407 case Intrinsic::vector_reduce_smax:
5408 case Intrinsic::vector_reduce_smin:
5409 case Intrinsic::vector_reduce_umax:
5410 case Intrinsic::vector_reduce_umin:
5413 case Intrinsic::vector_reduce_fmax:
5414 case Intrinsic::vector_reduce_fmin:
5415 handleVectorReduceIntrinsic(
I,
false);
5418 case Intrinsic::vector_reduce_fadd:
5419 case Intrinsic::vector_reduce_fmul:
5420 handleVectorReduceWithStarterIntrinsic(
I);
5423 case Intrinsic::scmp:
5424 case Intrinsic::ucmp: {
5429 case Intrinsic::fshl:
5430 case Intrinsic::fshr:
5431 handleFunnelShift(
I);
5434 case Intrinsic::is_constant:
5436 setShadow(&
I, getCleanShadow(&
I));
5437 setOrigin(&
I, getCleanOrigin());
5447 bool maybeHandleX86SIMDIntrinsic(IntrinsicInst &
I) {
5448 switch (
I.getIntrinsicID()) {
5449 case Intrinsic::x86_sse_stmxcsr:
5452 case Intrinsic::x86_sse_ldmxcsr:
5459 case Intrinsic::x86_avx512_vcvtsd2usi64:
5460 case Intrinsic::x86_avx512_vcvtsd2usi32:
5461 case Intrinsic::x86_avx512_vcvtss2usi64:
5462 case Intrinsic::x86_avx512_vcvtss2usi32:
5463 case Intrinsic::x86_avx512_cvttss2usi64:
5464 case Intrinsic::x86_avx512_cvttss2usi:
5465 case Intrinsic::x86_avx512_cvttsd2usi64:
5466 case Intrinsic::x86_avx512_cvttsd2usi:
5467 case Intrinsic::x86_avx512_cvtusi2ss:
5468 case Intrinsic::x86_avx512_cvtusi642sd:
5469 case Intrinsic::x86_avx512_cvtusi642ss:
5470 handleSSEVectorConvertIntrinsic(
I, 1,
true);
5472 case Intrinsic::x86_sse2_cvtsd2si64:
5473 case Intrinsic::x86_sse2_cvtsd2si:
5474 case Intrinsic::x86_sse2_cvtsd2ss:
5475 case Intrinsic::x86_sse2_cvttsd2si64:
5476 case Intrinsic::x86_sse2_cvttsd2si:
5477 case Intrinsic::x86_sse_cvtss2si64:
5478 case Intrinsic::x86_sse_cvtss2si:
5479 case Intrinsic::x86_sse_cvttss2si64:
5480 case Intrinsic::x86_sse_cvttss2si:
5481 handleSSEVectorConvertIntrinsic(
I, 1);
5483 case Intrinsic::x86_sse_cvtps2pi:
5484 case Intrinsic::x86_sse_cvttps2pi:
5485 handleSSEVectorConvertIntrinsic(
I, 2);
5493 case Intrinsic::x86_vcvtps2ph_128:
5494 case Intrinsic::x86_vcvtps2ph_256: {
5495 handleSSEVectorConvertIntrinsicByProp(
I,
true);
5504 case Intrinsic::x86_avx512_mask_cvtps2dq_512:
5505 handleAVX512VectorConvertFPToInt(
I,
false);
5510 case Intrinsic::x86_sse2_cvtpd2ps:
5511 case Intrinsic::x86_sse2_cvtps2dq:
5512 case Intrinsic::x86_sse2_cvtpd2dq:
5513 case Intrinsic::x86_sse2_cvttps2dq:
5514 case Intrinsic::x86_sse2_cvttpd2dq:
5515 case Intrinsic::x86_avx_cvt_pd2_ps_256:
5516 case Intrinsic::x86_avx_cvt_ps2dq_256:
5517 case Intrinsic::x86_avx_cvt_pd2dq_256:
5518 case Intrinsic::x86_avx_cvtt_ps2dq_256:
5519 case Intrinsic::x86_avx_cvtt_pd2dq_256: {
5520 handleSSEVectorConvertIntrinsicByProp(
I,
false);
5531 case Intrinsic::x86_avx512_mask_vcvtps2ph_512:
5532 case Intrinsic::x86_avx512_mask_vcvtps2ph_256:
5533 case Intrinsic::x86_avx512_mask_vcvtps2ph_128:
5534 handleAVX512VectorConvertFPToInt(
I,
true);
5538 case Intrinsic::x86_avx512_psll_w_512:
5539 case Intrinsic::x86_avx512_psll_d_512:
5540 case Intrinsic::x86_avx512_psll_q_512:
5541 case Intrinsic::x86_avx512_pslli_w_512:
5542 case Intrinsic::x86_avx512_pslli_d_512:
5543 case Intrinsic::x86_avx512_pslli_q_512:
5544 case Intrinsic::x86_avx512_psrl_w_512:
5545 case Intrinsic::x86_avx512_psrl_d_512:
5546 case Intrinsic::x86_avx512_psrl_q_512:
5547 case Intrinsic::x86_avx512_psra_w_512:
5548 case Intrinsic::x86_avx512_psra_d_512:
5549 case Intrinsic::x86_avx512_psra_q_512:
5550 case Intrinsic::x86_avx512_psrli_w_512:
5551 case Intrinsic::x86_avx512_psrli_d_512:
5552 case Intrinsic::x86_avx512_psrli_q_512:
5553 case Intrinsic::x86_avx512_psrai_w_512:
5554 case Intrinsic::x86_avx512_psrai_d_512:
5555 case Intrinsic::x86_avx512_psrai_q_512:
5556 case Intrinsic::x86_avx512_psra_q_256:
5557 case Intrinsic::x86_avx512_psra_q_128:
5558 case Intrinsic::x86_avx512_psrai_q_256:
5559 case Intrinsic::x86_avx512_psrai_q_128:
5560 case Intrinsic::x86_avx2_psll_w:
5561 case Intrinsic::x86_avx2_psll_d:
5562 case Intrinsic::x86_avx2_psll_q:
5563 case Intrinsic::x86_avx2_pslli_w:
5564 case Intrinsic::x86_avx2_pslli_d:
5565 case Intrinsic::x86_avx2_pslli_q:
5566 case Intrinsic::x86_avx2_psrl_w:
5567 case Intrinsic::x86_avx2_psrl_d:
5568 case Intrinsic::x86_avx2_psrl_q:
5569 case Intrinsic::x86_avx2_psra_w:
5570 case Intrinsic::x86_avx2_psra_d:
5571 case Intrinsic::x86_avx2_psrli_w:
5572 case Intrinsic::x86_avx2_psrli_d:
5573 case Intrinsic::x86_avx2_psrli_q:
5574 case Intrinsic::x86_avx2_psrai_w:
5575 case Intrinsic::x86_avx2_psrai_d:
5576 case Intrinsic::x86_sse2_psll_w:
5577 case Intrinsic::x86_sse2_psll_d:
5578 case Intrinsic::x86_sse2_psll_q:
5579 case Intrinsic::x86_sse2_pslli_w:
5580 case Intrinsic::x86_sse2_pslli_d:
5581 case Intrinsic::x86_sse2_pslli_q:
5582 case Intrinsic::x86_sse2_psrl_w:
5583 case Intrinsic::x86_sse2_psrl_d:
5584 case Intrinsic::x86_sse2_psrl_q:
5585 case Intrinsic::x86_sse2_psra_w:
5586 case Intrinsic::x86_sse2_psra_d:
5587 case Intrinsic::x86_sse2_psrli_w:
5588 case Intrinsic::x86_sse2_psrli_d:
5589 case Intrinsic::x86_sse2_psrli_q:
5590 case Intrinsic::x86_sse2_psrai_w:
5591 case Intrinsic::x86_sse2_psrai_d:
5592 case Intrinsic::x86_mmx_psll_w:
5593 case Intrinsic::x86_mmx_psll_d:
5594 case Intrinsic::x86_mmx_psll_q:
5595 case Intrinsic::x86_mmx_pslli_w:
5596 case Intrinsic::x86_mmx_pslli_d:
5597 case Intrinsic::x86_mmx_pslli_q:
5598 case Intrinsic::x86_mmx_psrl_w:
5599 case Intrinsic::x86_mmx_psrl_d:
5600 case Intrinsic::x86_mmx_psrl_q:
5601 case Intrinsic::x86_mmx_psra_w:
5602 case Intrinsic::x86_mmx_psra_d:
5603 case Intrinsic::x86_mmx_psrli_w:
5604 case Intrinsic::x86_mmx_psrli_d:
5605 case Intrinsic::x86_mmx_psrli_q:
5606 case Intrinsic::x86_mmx_psrai_w:
5607 case Intrinsic::x86_mmx_psrai_d:
5608 handleVectorShiftIntrinsic(
I,
false);
5610 case Intrinsic::x86_avx2_psllv_d:
5611 case Intrinsic::x86_avx2_psllv_d_256:
5612 case Intrinsic::x86_avx512_psllv_d_512:
5613 case Intrinsic::x86_avx2_psllv_q:
5614 case Intrinsic::x86_avx2_psllv_q_256:
5615 case Intrinsic::x86_avx512_psllv_q_512:
5616 case Intrinsic::x86_avx2_psrlv_d:
5617 case Intrinsic::x86_avx2_psrlv_d_256:
5618 case Intrinsic::x86_avx512_psrlv_d_512:
5619 case Intrinsic::x86_avx2_psrlv_q:
5620 case Intrinsic::x86_avx2_psrlv_q_256:
5621 case Intrinsic::x86_avx512_psrlv_q_512:
5622 case Intrinsic::x86_avx2_psrav_d:
5623 case Intrinsic::x86_avx2_psrav_d_256:
5624 case Intrinsic::x86_avx512_psrav_d_512:
5625 case Intrinsic::x86_avx512_psrav_q_128:
5626 case Intrinsic::x86_avx512_psrav_q_256:
5627 case Intrinsic::x86_avx512_psrav_q_512:
5628 handleVectorShiftIntrinsic(
I,
true);
5632 case Intrinsic::x86_sse2_packsswb_128:
5633 case Intrinsic::x86_sse2_packssdw_128:
5634 case Intrinsic::x86_sse2_packuswb_128:
5635 case Intrinsic::x86_sse41_packusdw:
5636 case Intrinsic::x86_avx2_packsswb:
5637 case Intrinsic::x86_avx2_packssdw:
5638 case Intrinsic::x86_avx2_packuswb:
5639 case Intrinsic::x86_avx2_packusdw:
5645 case Intrinsic::x86_avx512_packsswb_512:
5646 case Intrinsic::x86_avx512_packssdw_512:
5647 case Intrinsic::x86_avx512_packuswb_512:
5648 case Intrinsic::x86_avx512_packusdw_512:
5649 handleVectorPackIntrinsic(
I);
5652 case Intrinsic::x86_sse41_pblendvb:
5653 case Intrinsic::x86_sse41_blendvpd:
5654 case Intrinsic::x86_sse41_blendvps:
5655 case Intrinsic::x86_avx_blendv_pd_256:
5656 case Intrinsic::x86_avx_blendv_ps_256:
5657 case Intrinsic::x86_avx2_pblendvb:
5658 handleBlendvIntrinsic(
I);
5661 case Intrinsic::x86_avx_dp_ps_256:
5662 case Intrinsic::x86_sse41_dppd:
5663 case Intrinsic::x86_sse41_dpps:
5664 handleDppIntrinsic(
I);
5667 case Intrinsic::x86_mmx_packsswb:
5668 case Intrinsic::x86_mmx_packuswb:
5669 handleVectorPackIntrinsic(
I, 16);
5672 case Intrinsic::x86_mmx_packssdw:
5673 handleVectorPackIntrinsic(
I, 32);
5676 case Intrinsic::x86_mmx_psad_bw:
5677 handleVectorSadIntrinsic(
I,
true);
5679 case Intrinsic::x86_sse2_psad_bw:
5680 case Intrinsic::x86_avx2_psad_bw:
5681 handleVectorSadIntrinsic(
I);
5707 case Intrinsic::x86_sse2_pmadd_wd:
5708 case Intrinsic::x86_avx2_pmadd_wd:
5709 case Intrinsic::x86_avx512_pmaddw_d_512:
5710 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
5711 case Intrinsic::x86_avx2_pmadd_ub_sw:
5712 case Intrinsic::x86_avx512_pmaddubs_w_512:
5713 handleVectorPmaddIntrinsic(
I, 2);
5717 case Intrinsic::x86_ssse3_pmadd_ub_sw:
5718 handleVectorPmaddIntrinsic(
I, 2, 8);
5722 case Intrinsic::x86_mmx_pmadd_wd:
5723 handleVectorPmaddIntrinsic(
I, 2, 16);
5785 case Intrinsic::x86_avx512_vpdpbusd_128:
5786 case Intrinsic::x86_avx512_vpdpbusd_256:
5787 case Intrinsic::x86_avx512_vpdpbusd_512:
5788 case Intrinsic::x86_avx512_vpdpbusds_128:
5789 case Intrinsic::x86_avx512_vpdpbusds_256:
5790 case Intrinsic::x86_avx512_vpdpbusds_512:
5791 case Intrinsic::x86_avx2_vpdpbssd_128:
5792 case Intrinsic::x86_avx2_vpdpbssd_256:
5793 case Intrinsic::x86_avx2_vpdpbssds_128:
5794 case Intrinsic::x86_avx2_vpdpbssds_256:
5795 case Intrinsic::x86_avx10_vpdpbssd_512:
5796 case Intrinsic::x86_avx10_vpdpbssds_512:
5797 handleVectorPmaddIntrinsic(
I, 4, 8);
5844 case Intrinsic::x86_avx512_vpdpwssd_128:
5845 case Intrinsic::x86_avx512_vpdpwssd_256:
5846 case Intrinsic::x86_avx512_vpdpwssd_512:
5847 case Intrinsic::x86_avx512_vpdpwssds_128:
5848 case Intrinsic::x86_avx512_vpdpwssds_256:
5849 case Intrinsic::x86_avx512_vpdpwssds_512:
5850 handleVectorPmaddIntrinsic(
I, 2, 16);
5863 case Intrinsic::x86_sse_cmp_ss:
5864 case Intrinsic::x86_sse2_cmp_sd:
5865 case Intrinsic::x86_sse_comieq_ss:
5866 case Intrinsic::x86_sse_comilt_ss:
5867 case Intrinsic::x86_sse_comile_ss:
5868 case Intrinsic::x86_sse_comigt_ss:
5869 case Intrinsic::x86_sse_comige_ss:
5870 case Intrinsic::x86_sse_comineq_ss:
5871 case Intrinsic::x86_sse_ucomieq_ss:
5872 case Intrinsic::x86_sse_ucomilt_ss:
5873 case Intrinsic::x86_sse_ucomile_ss:
5874 case Intrinsic::x86_sse_ucomigt_ss:
5875 case Intrinsic::x86_sse_ucomige_ss:
5876 case Intrinsic::x86_sse_ucomineq_ss:
5877 case Intrinsic::x86_sse2_comieq_sd:
5878 case Intrinsic::x86_sse2_comilt_sd:
5879 case Intrinsic::x86_sse2_comile_sd:
5880 case Intrinsic::x86_sse2_comigt_sd:
5881 case Intrinsic::x86_sse2_comige_sd:
5882 case Intrinsic::x86_sse2_comineq_sd:
5883 case Intrinsic::x86_sse2_ucomieq_sd:
5884 case Intrinsic::x86_sse2_ucomilt_sd:
5885 case Intrinsic::x86_sse2_ucomile_sd:
5886 case Intrinsic::x86_sse2_ucomigt_sd:
5887 case Intrinsic::x86_sse2_ucomige_sd:
5888 case Intrinsic::x86_sse2_ucomineq_sd:
5889 handleVectorCompareScalarIntrinsic(
I);
5892 case Intrinsic::x86_avx_cmp_pd_256:
5893 case Intrinsic::x86_avx_cmp_ps_256:
5894 case Intrinsic::x86_sse2_cmp_pd:
5895 case Intrinsic::x86_sse_cmp_ps:
5896 handleVectorComparePackedIntrinsic(
I);
5899 case Intrinsic::x86_bmi_bextr_32:
5900 case Intrinsic::x86_bmi_bextr_64:
5901 case Intrinsic::x86_bmi_bzhi_32:
5902 case Intrinsic::x86_bmi_bzhi_64:
5903 case Intrinsic::x86_bmi_pdep_32:
5904 case Intrinsic::x86_bmi_pdep_64:
5905 case Intrinsic::x86_bmi_pext_32:
5906 case Intrinsic::x86_bmi_pext_64:
5907 handleBmiIntrinsic(
I);
5910 case Intrinsic::x86_pclmulqdq:
5911 case Intrinsic::x86_pclmulqdq_256:
5912 case Intrinsic::x86_pclmulqdq_512:
5913 handlePclmulIntrinsic(
I);
5916 case Intrinsic::x86_avx_round_pd_256:
5917 case Intrinsic::x86_avx_round_ps_256:
5918 case Intrinsic::x86_sse41_round_pd:
5919 case Intrinsic::x86_sse41_round_ps:
5920 handleRoundPdPsIntrinsic(
I);
5923 case Intrinsic::x86_sse41_round_sd:
5924 case Intrinsic::x86_sse41_round_ss:
5925 handleUnarySdSsIntrinsic(
I);
5928 case Intrinsic::x86_sse2_max_sd:
5929 case Intrinsic::x86_sse_max_ss:
5930 case Intrinsic::x86_sse2_min_sd:
5931 case Intrinsic::x86_sse_min_ss:
5932 handleBinarySdSsIntrinsic(
I);
5935 case Intrinsic::x86_avx_vtestc_pd:
5936 case Intrinsic::x86_avx_vtestc_pd_256:
5937 case Intrinsic::x86_avx_vtestc_ps:
5938 case Intrinsic::x86_avx_vtestc_ps_256:
5939 case Intrinsic::x86_avx_vtestnzc_pd:
5940 case Intrinsic::x86_avx_vtestnzc_pd_256:
5941 case Intrinsic::x86_avx_vtestnzc_ps:
5942 case Intrinsic::x86_avx_vtestnzc_ps_256:
5943 case Intrinsic::x86_avx_vtestz_pd:
5944 case Intrinsic::x86_avx_vtestz_pd_256:
5945 case Intrinsic::x86_avx_vtestz_ps:
5946 case Intrinsic::x86_avx_vtestz_ps_256:
5947 case Intrinsic::x86_avx_ptestc_256:
5948 case Intrinsic::x86_avx_ptestnzc_256:
5949 case Intrinsic::x86_avx_ptestz_256:
5950 case Intrinsic::x86_sse41_ptestc:
5951 case Intrinsic::x86_sse41_ptestnzc:
5952 case Intrinsic::x86_sse41_ptestz:
5953 handleVtestIntrinsic(
I);
5957 case Intrinsic::x86_ssse3_phadd_w:
5958 case Intrinsic::x86_ssse3_phadd_w_128:
5959 case Intrinsic::x86_avx2_phadd_w:
5960 case Intrinsic::x86_ssse3_phsub_w:
5961 case Intrinsic::x86_ssse3_phsub_w_128:
5962 case Intrinsic::x86_avx2_phsub_w: {
5963 handlePairwiseShadowOrIntrinsic(
I, 16);
5968 case Intrinsic::x86_ssse3_phadd_d:
5969 case Intrinsic::x86_ssse3_phadd_d_128:
5970 case Intrinsic::x86_avx2_phadd_d:
5971 case Intrinsic::x86_ssse3_phsub_d:
5972 case Intrinsic::x86_ssse3_phsub_d_128:
5973 case Intrinsic::x86_avx2_phsub_d: {
5974 handlePairwiseShadowOrIntrinsic(
I, 32);
5979 case Intrinsic::x86_ssse3_phadd_sw:
5980 case Intrinsic::x86_ssse3_phadd_sw_128:
5981 case Intrinsic::x86_avx2_phadd_sw:
5982 case Intrinsic::x86_ssse3_phsub_sw:
5983 case Intrinsic::x86_ssse3_phsub_sw_128:
5984 case Intrinsic::x86_avx2_phsub_sw: {
5985 handlePairwiseShadowOrIntrinsic(
I, 16);
5990 case Intrinsic::x86_sse3_hadd_ps:
5991 case Intrinsic::x86_sse3_hadd_pd:
5992 case Intrinsic::x86_avx_hadd_pd_256:
5993 case Intrinsic::x86_avx_hadd_ps_256:
5994 case Intrinsic::x86_sse3_hsub_ps:
5995 case Intrinsic::x86_sse3_hsub_pd:
5996 case Intrinsic::x86_avx_hsub_pd_256:
5997 case Intrinsic::x86_avx_hsub_ps_256: {
5998 handlePairwiseShadowOrIntrinsic(
I);
6002 case Intrinsic::x86_avx_maskstore_ps:
6003 case Intrinsic::x86_avx_maskstore_pd:
6004 case Intrinsic::x86_avx_maskstore_ps_256:
6005 case Intrinsic::x86_avx_maskstore_pd_256:
6006 case Intrinsic::x86_avx2_maskstore_d:
6007 case Intrinsic::x86_avx2_maskstore_q:
6008 case Intrinsic::x86_avx2_maskstore_d_256:
6009 case Intrinsic::x86_avx2_maskstore_q_256: {
6010 handleAVXMaskedStore(
I);
6014 case Intrinsic::x86_avx_maskload_ps:
6015 case Intrinsic::x86_avx_maskload_pd:
6016 case Intrinsic::x86_avx_maskload_ps_256:
6017 case Intrinsic::x86_avx_maskload_pd_256:
6018 case Intrinsic::x86_avx2_maskload_d:
6019 case Intrinsic::x86_avx2_maskload_q:
6020 case Intrinsic::x86_avx2_maskload_d_256:
6021 case Intrinsic::x86_avx2_maskload_q_256: {
6022 handleAVXMaskedLoad(
I);
6027 case Intrinsic::x86_avx512fp16_add_ph_512:
6028 case Intrinsic::x86_avx512fp16_sub_ph_512:
6029 case Intrinsic::x86_avx512fp16_mul_ph_512:
6030 case Intrinsic::x86_avx512fp16_div_ph_512:
6031 case Intrinsic::x86_avx512fp16_max_ph_512:
6032 case Intrinsic::x86_avx512fp16_min_ph_512:
6033 case Intrinsic::x86_avx512_min_ps_512:
6034 case Intrinsic::x86_avx512_min_pd_512:
6035 case Intrinsic::x86_avx512_max_ps_512:
6036 case Intrinsic::x86_avx512_max_pd_512: {
6041 [[maybe_unused]]
bool Success =
6042 maybeHandleSimpleNomemIntrinsic(
I, 1);
6047 case Intrinsic::x86_avx_vpermilvar_pd:
6048 case Intrinsic::x86_avx_vpermilvar_pd_256:
6049 case Intrinsic::x86_avx512_vpermilvar_pd_512:
6050 case Intrinsic::x86_avx_vpermilvar_ps:
6051 case Intrinsic::x86_avx_vpermilvar_ps_256:
6052 case Intrinsic::x86_avx512_vpermilvar_ps_512: {
6053 handleAVXVpermilvar(
I);
6057 case Intrinsic::x86_avx512_vpermi2var_d_128:
6058 case Intrinsic::x86_avx512_vpermi2var_d_256:
6059 case Intrinsic::x86_avx512_vpermi2var_d_512:
6060 case Intrinsic::x86_avx512_vpermi2var_hi_128:
6061 case Intrinsic::x86_avx512_vpermi2var_hi_256:
6062 case Intrinsic::x86_avx512_vpermi2var_hi_512:
6063 case Intrinsic::x86_avx512_vpermi2var_pd_128:
6064 case Intrinsic::x86_avx512_vpermi2var_pd_256:
6065 case Intrinsic::x86_avx512_vpermi2var_pd_512:
6066 case Intrinsic::x86_avx512_vpermi2var_ps_128:
6067 case Intrinsic::x86_avx512_vpermi2var_ps_256:
6068 case Intrinsic::x86_avx512_vpermi2var_ps_512:
6069 case Intrinsic::x86_avx512_vpermi2var_q_128:
6070 case Intrinsic::x86_avx512_vpermi2var_q_256:
6071 case Intrinsic::x86_avx512_vpermi2var_q_512:
6072 case Intrinsic::x86_avx512_vpermi2var_qi_128:
6073 case Intrinsic::x86_avx512_vpermi2var_qi_256:
6074 case Intrinsic::x86_avx512_vpermi2var_qi_512:
6075 handleAVXVpermi2var(
I);
6089 case Intrinsic::x86_avx2_pshuf_b:
6090 case Intrinsic::x86_sse_pshuf_w:
6091 case Intrinsic::x86_ssse3_pshuf_b_128:
6092 case Intrinsic::x86_ssse3_pshuf_b:
6093 case Intrinsic::x86_avx512_pshuf_b_512:
6094 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6100 case Intrinsic::x86_avx512_mask_pmov_dw_512:
6101 case Intrinsic::x86_avx512_mask_pmov_db_512:
6102 case Intrinsic::x86_avx512_mask_pmov_qb_512:
6103 case Intrinsic::x86_avx512_mask_pmov_qw_512: {
6106 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6114 case Intrinsic::x86_avx512_mask_pmovs_dw_512:
6115 case Intrinsic::x86_avx512_mask_pmovus_dw_512: {
6116 handleIntrinsicByApplyingToShadow(
I,
6117 Intrinsic::x86_avx512_mask_pmov_dw_512,
6122 case Intrinsic::x86_avx512_mask_pmovs_db_512:
6123 case Intrinsic::x86_avx512_mask_pmovus_db_512: {
6124 handleIntrinsicByApplyingToShadow(
I,
6125 Intrinsic::x86_avx512_mask_pmov_db_512,
6130 case Intrinsic::x86_avx512_mask_pmovs_qb_512:
6131 case Intrinsic::x86_avx512_mask_pmovus_qb_512: {
6132 handleIntrinsicByApplyingToShadow(
I,
6133 Intrinsic::x86_avx512_mask_pmov_qb_512,
6138 case Intrinsic::x86_avx512_mask_pmovs_qw_512:
6139 case Intrinsic::x86_avx512_mask_pmovus_qw_512: {
6140 handleIntrinsicByApplyingToShadow(
I,
6141 Intrinsic::x86_avx512_mask_pmov_qw_512,
6146 case Intrinsic::x86_avx512_mask_pmovs_qd_512:
6147 case Intrinsic::x86_avx512_mask_pmovus_qd_512:
6148 case Intrinsic::x86_avx512_mask_pmovs_wb_512:
6149 case Intrinsic::x86_avx512_mask_pmovus_wb_512: {
6153 handleAVX512VectorDownConvert(
I);
6193 case Intrinsic::x86_avx512_rsqrt14_ps_512:
6194 case Intrinsic::x86_avx512_rsqrt14_ps_256:
6195 case Intrinsic::x86_avx512_rsqrt14_ps_128:
6196 case Intrinsic::x86_avx512_rsqrt14_pd_512:
6197 case Intrinsic::x86_avx512_rsqrt14_pd_256:
6198 case Intrinsic::x86_avx512_rsqrt14_pd_128:
6199 case Intrinsic::x86_avx10_mask_rsqrt_bf16_512:
6200 case Intrinsic::x86_avx10_mask_rsqrt_bf16_256:
6201 case Intrinsic::x86_avx10_mask_rsqrt_bf16_128:
6202 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_512:
6203 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_256:
6204 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_128:
6205 handleAVX512VectorGenericMaskedFP(
I);
6244 case Intrinsic::x86_avx512_rcp14_ps_512:
6245 case Intrinsic::x86_avx512_rcp14_ps_256:
6246 case Intrinsic::x86_avx512_rcp14_ps_128:
6247 case Intrinsic::x86_avx512_rcp14_pd_512:
6248 case Intrinsic::x86_avx512_rcp14_pd_256:
6249 case Intrinsic::x86_avx512_rcp14_pd_128:
6250 case Intrinsic::x86_avx10_mask_rcp_bf16_512:
6251 case Intrinsic::x86_avx10_mask_rcp_bf16_256:
6252 case Intrinsic::x86_avx10_mask_rcp_bf16_128:
6253 case Intrinsic::x86_avx512fp16_mask_rcp_ph_512:
6254 case Intrinsic::x86_avx512fp16_mask_rcp_ph_256:
6255 case Intrinsic::x86_avx512fp16_mask_rcp_ph_128:
6256 handleAVX512VectorGenericMaskedFP(
I);
6260 case Intrinsic::x86_avx512fp16_mask_add_sh_round:
6261 case Intrinsic::x86_avx512fp16_mask_sub_sh_round:
6262 case Intrinsic::x86_avx512fp16_mask_mul_sh_round:
6263 case Intrinsic::x86_avx512fp16_mask_div_sh_round:
6264 case Intrinsic::x86_avx512fp16_mask_max_sh_round:
6265 case Intrinsic::x86_avx512fp16_mask_min_sh_round: {
6266 visitGenericScalarHalfwordInst(
I);
6271 case Intrinsic::x86_vgf2p8affineqb_128:
6272 case Intrinsic::x86_vgf2p8affineqb_256:
6273 case Intrinsic::x86_vgf2p8affineqb_512:
6274 handleAVXGF2P8Affine(
I);
6284 bool maybeHandleArmSIMDIntrinsic(IntrinsicInst &
I) {
6285 switch (
I.getIntrinsicID()) {
6286 case Intrinsic::aarch64_neon_rshrn:
6287 case Intrinsic::aarch64_neon_sqrshl:
6288 case Intrinsic::aarch64_neon_sqrshrn:
6289 case Intrinsic::aarch64_neon_sqrshrun:
6290 case Intrinsic::aarch64_neon_sqshl:
6291 case Intrinsic::aarch64_neon_sqshlu:
6292 case Intrinsic::aarch64_neon_sqshrn:
6293 case Intrinsic::aarch64_neon_sqshrun:
6294 case Intrinsic::aarch64_neon_srshl:
6295 case Intrinsic::aarch64_neon_sshl:
6296 case Intrinsic::aarch64_neon_uqrshl:
6297 case Intrinsic::aarch64_neon_uqrshrn:
6298 case Intrinsic::aarch64_neon_uqshl:
6299 case Intrinsic::aarch64_neon_uqshrn:
6300 case Intrinsic::aarch64_neon_urshl:
6301 case Intrinsic::aarch64_neon_ushl:
6303 handleVectorShiftIntrinsic(
I,
false);
6308 case Intrinsic::aarch64_neon_fmaxp:
6309 case Intrinsic::aarch64_neon_fminp:
6311 case Intrinsic::aarch64_neon_fmaxnmp:
6312 case Intrinsic::aarch64_neon_fminnmp:
6314 case Intrinsic::aarch64_neon_smaxp:
6315 case Intrinsic::aarch64_neon_sminp:
6316 case Intrinsic::aarch64_neon_umaxp:
6317 case Intrinsic::aarch64_neon_uminp:
6319 case Intrinsic::aarch64_neon_addp:
6321 case Intrinsic::aarch64_neon_faddp:
6323 case Intrinsic::aarch64_neon_saddlp:
6324 case Intrinsic::aarch64_neon_uaddlp: {
6325 handlePairwiseShadowOrIntrinsic(
I);
6330 case Intrinsic::aarch64_neon_fcvtas:
6331 case Intrinsic::aarch64_neon_fcvtau:
6333 case Intrinsic::aarch64_neon_fcvtms:
6334 case Intrinsic::aarch64_neon_fcvtmu:
6336 case Intrinsic::aarch64_neon_fcvtns:
6337 case Intrinsic::aarch64_neon_fcvtnu:
6339 case Intrinsic::aarch64_neon_fcvtps:
6340 case Intrinsic::aarch64_neon_fcvtpu:
6342 case Intrinsic::aarch64_neon_fcvtzs:
6343 case Intrinsic::aarch64_neon_fcvtzu:
6345 case Intrinsic::aarch64_neon_fcvtxn: {
6346 handleNEONVectorConvertIntrinsic(
I);
6351 case Intrinsic::aarch64_neon_faddv:
6352 case Intrinsic::aarch64_neon_saddv:
6353 case Intrinsic::aarch64_neon_uaddv:
6356 case Intrinsic::aarch64_neon_smaxv:
6357 case Intrinsic::aarch64_neon_sminv:
6358 case Intrinsic::aarch64_neon_umaxv:
6359 case Intrinsic::aarch64_neon_uminv:
6363 case Intrinsic::aarch64_neon_fmaxv:
6364 case Intrinsic::aarch64_neon_fminv:
6365 case Intrinsic::aarch64_neon_fmaxnmv:
6366 case Intrinsic::aarch64_neon_fminnmv:
6368 case Intrinsic::aarch64_neon_saddlv:
6369 case Intrinsic::aarch64_neon_uaddlv:
6370 handleVectorReduceIntrinsic(
I,
true);
6373 case Intrinsic::aarch64_neon_ld1x2:
6374 case Intrinsic::aarch64_neon_ld1x3:
6375 case Intrinsic::aarch64_neon_ld1x4:
6376 case Intrinsic::aarch64_neon_ld2:
6377 case Intrinsic::aarch64_neon_ld3:
6378 case Intrinsic::aarch64_neon_ld4:
6379 case Intrinsic::aarch64_neon_ld2r:
6380 case Intrinsic::aarch64_neon_ld3r:
6381 case Intrinsic::aarch64_neon_ld4r: {
6382 handleNEONVectorLoad(
I,
false);
6386 case Intrinsic::aarch64_neon_ld2lane:
6387 case Intrinsic::aarch64_neon_ld3lane:
6388 case Intrinsic::aarch64_neon_ld4lane: {
6389 handleNEONVectorLoad(
I,
true);
6394 case Intrinsic::aarch64_neon_sqxtn:
6395 case Intrinsic::aarch64_neon_sqxtun:
6396 case Intrinsic::aarch64_neon_uqxtn:
6403 case Intrinsic::aarch64_neon_st1x2:
6404 case Intrinsic::aarch64_neon_st1x3:
6405 case Intrinsic::aarch64_neon_st1x4:
6406 case Intrinsic::aarch64_neon_st2:
6407 case Intrinsic::aarch64_neon_st3:
6408 case Intrinsic::aarch64_neon_st4: {
6409 handleNEONVectorStoreIntrinsic(
I,
false);
6413 case Intrinsic::aarch64_neon_st2lane:
6414 case Intrinsic::aarch64_neon_st3lane:
6415 case Intrinsic::aarch64_neon_st4lane: {
6416 handleNEONVectorStoreIntrinsic(
I,
true);
6429 case Intrinsic::aarch64_neon_tbl1:
6430 case Intrinsic::aarch64_neon_tbl2:
6431 case Intrinsic::aarch64_neon_tbl3:
6432 case Intrinsic::aarch64_neon_tbl4:
6433 case Intrinsic::aarch64_neon_tbx1:
6434 case Intrinsic::aarch64_neon_tbx2:
6435 case Intrinsic::aarch64_neon_tbx3:
6436 case Intrinsic::aarch64_neon_tbx4: {
6438 handleIntrinsicByApplyingToShadow(
6439 I,
I.getIntrinsicID(),
6444 case Intrinsic::aarch64_neon_fmulx:
6445 case Intrinsic::aarch64_neon_pmul:
6446 case Intrinsic::aarch64_neon_pmull:
6447 case Intrinsic::aarch64_neon_smull:
6448 case Intrinsic::aarch64_neon_pmull64:
6449 case Intrinsic::aarch64_neon_umull: {
6450 handleNEONVectorMultiplyIntrinsic(
I);
6461 void visitIntrinsicInst(IntrinsicInst &
I) {
6462 if (maybeHandleCrossPlatformIntrinsic(
I))
6465 if (maybeHandleX86SIMDIntrinsic(
I))
6468 if (maybeHandleArmSIMDIntrinsic(
I))
6471 if (maybeHandleUnknownIntrinsic(
I))
6474 visitInstruction(
I);
6477 void visitLibAtomicLoad(CallBase &CB) {
6488 Value *NewOrdering =
6492 NextNodeIRBuilder NextIRB(&CB);
6493 Value *SrcShadowPtr, *SrcOriginPtr;
6494 std::tie(SrcShadowPtr, SrcOriginPtr) =
6495 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6497 Value *DstShadowPtr =
6498 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6502 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
6503 if (MS.TrackOrigins) {
6504 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
6506 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
6507 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
6511 void visitLibAtomicStore(CallBase &CB) {
6518 Value *NewOrdering =
6522 Value *DstShadowPtr =
6532 void visitCallBase(CallBase &CB) {
6540 visitAsmInstruction(CB);
6542 visitInstruction(CB);
6551 case LibFunc_atomic_load:
6553 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
6557 visitLibAtomicLoad(CB);
6559 case LibFunc_atomic_store:
6560 visitLibAtomicStore(CB);
6576 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
6580 Func->removeFnAttrs(
B);
6586 bool MayCheckCall = MS.EagerChecks;
6590 MayCheckCall &= !
Func->getName().starts_with(
"__sanitizer_unaligned_");
6593 unsigned ArgOffset = 0;
6596 if (!
A->getType()->isSized()) {
6597 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
6601 if (
A->getType()->isScalableTy()) {
6602 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is vscale: " << CB <<
"\n");
6604 insertCheckShadowOf(
A, &CB);
6609 const DataLayout &
DL =
F.getDataLayout();
6613 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
6616 insertCheckShadowOf(
A, &CB);
6617 Size =
DL.getTypeAllocSize(
A->getType());
6623 Value *ArgShadow = getShadow(
A);
6624 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
6626 <<
" Shadow: " << *ArgShadow <<
"\n");
6630 assert(
A->getType()->isPointerTy() &&
6631 "ByVal argument is not a pointer!");
6636 MaybeAlign Alignment = std::nullopt;
6639 Value *AShadowPtr, *AOriginPtr;
6640 std::tie(AShadowPtr, AOriginPtr) =
6641 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
6643 if (!PropagateShadow) {
6650 if (MS.TrackOrigins) {
6651 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
6665 Size =
DL.getTypeAllocSize(
A->getType());
6671 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
6673 getOriginPtrForArgument(IRB, ArgOffset));
6676 assert(Store !=
nullptr);
6685 if (FT->isVarArg()) {
6686 VAHelper->visitCallBase(CB, IRB);
6696 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
6697 setShadow(&CB, getCleanShadow(&CB));
6698 setOrigin(&CB, getCleanOrigin());
6704 Value *
Base = getShadowPtrForRetval(IRBBefore);
6705 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
6717 setShadow(&CB, getCleanShadow(&CB));
6718 setOrigin(&CB, getCleanOrigin());
6725 "Could not find insertion point for retval shadow load");
6728 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
6731 setShadow(&CB, RetvalShadow);
6732 if (MS.TrackOrigins)
6733 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
6738 RetVal =
I->getOperand(0);
6741 return I->isMustTailCall();
6746 void visitReturnInst(ReturnInst &
I) {
6748 Value *RetVal =
I.getReturnValue();
6754 Value *ShadowPtr = getShadowPtrForRetval(IRB);
6755 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
6756 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
6759 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
6761 Value *Shadow = getShadow(RetVal);
6762 bool StoreOrigin =
true;
6764 insertCheckShadowOf(RetVal, &
I);
6765 Shadow = getCleanShadow(RetVal);
6766 StoreOrigin =
false;
6773 if (MS.TrackOrigins && StoreOrigin)
6774 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
6778 void visitPHINode(PHINode &
I) {
6780 if (!PropagateShadow) {
6781 setShadow(&
I, getCleanShadow(&
I));
6782 setOrigin(&
I, getCleanOrigin());
6786 ShadowPHINodes.push_back(&
I);
6787 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
6789 if (MS.TrackOrigins)
6791 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
6794 Value *getLocalVarIdptr(AllocaInst &
I) {
6795 ConstantInt *IntConst =
6796 ConstantInt::get(Type::getInt32Ty((*
F.getParent()).getContext()), 0);
6797 return new GlobalVariable(*
F.getParent(), IntConst->
getType(),
6802 Value *getLocalVarDescription(AllocaInst &
I) {
6808 IRB.
CreateCall(MS.MsanPoisonStackFn, {&I, Len});
6810 Value *ShadowBase, *OriginBase;
6811 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
6815 IRB.
CreateMemSet(ShadowBase, PoisonValue, Len,
I.getAlign());
6818 if (PoisonStack && MS.TrackOrigins) {
6819 Value *Idptr = getLocalVarIdptr(
I);
6821 Value *Descr = getLocalVarDescription(
I);
6822 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
6823 {&I, Len, Idptr, Descr});
6825 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
6831 Value *Descr = getLocalVarDescription(
I);
6833 IRB.
CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
6835 IRB.
CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
6839 void instrumentAlloca(AllocaInst &
I, Instruction *InsPoint =
nullptr) {
6842 NextNodeIRBuilder IRB(InsPoint);
6843 const DataLayout &
DL =
F.getDataLayout();
6844 TypeSize TS =
DL.getTypeAllocSize(
I.getAllocatedType());
6846 if (
I.isArrayAllocation())
6850 if (MS.CompileKernel)
6851 poisonAllocaKmsan(
I, IRB, Len);
6853 poisonAllocaUserspace(
I, IRB, Len);
6856 void visitAllocaInst(AllocaInst &
I) {
6857 setShadow(&
I, getCleanShadow(&
I));
6858 setOrigin(&
I, getCleanOrigin());
6864 void visitSelectInst(SelectInst &
I) {
6870 handleSelectLikeInst(
I,
B,
C,
D);
6876 Value *Sb = getShadow(
B);
6877 Value *Sc = getShadow(
C);
6878 Value *Sd = getShadow(
D);
6880 Value *Ob = MS.TrackOrigins ? getOrigin(
B) : nullptr;
6881 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
6882 Value *Od = MS.TrackOrigins ? getOrigin(
D) : nullptr;
6887 if (
I.getType()->isAggregateType()) {
6891 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
6899 C = CreateAppToShadowCast(IRB,
C);
6900 D = CreateAppToShadowCast(IRB,
D);
6907 if (MS.TrackOrigins) {
6910 if (
B->getType()->isVectorTy()) {
6911 B = convertToBool(
B, IRB);
6912 Sb = convertToBool(Sb, IRB);
6920 void visitLandingPadInst(LandingPadInst &
I) {
6923 setShadow(&
I, getCleanShadow(&
I));
6924 setOrigin(&
I, getCleanOrigin());
6927 void visitCatchSwitchInst(CatchSwitchInst &
I) {
6928 setShadow(&
I, getCleanShadow(&
I));
6929 setOrigin(&
I, getCleanOrigin());
6932 void visitFuncletPadInst(FuncletPadInst &
I) {
6933 setShadow(&
I, getCleanShadow(&
I));
6934 setOrigin(&
I, getCleanOrigin());
6937 void visitGetElementPtrInst(GetElementPtrInst &
I) { handleShadowOr(
I); }
6939 void visitExtractValueInst(ExtractValueInst &
I) {
6941 Value *Agg =
I.getAggregateOperand();
6943 Value *AggShadow = getShadow(Agg);
6947 setShadow(&
I, ResShadow);
6948 setOriginForNaryOp(
I);
6951 void visitInsertValueInst(InsertValueInst &
I) {
6954 Value *AggShadow = getShadow(
I.getAggregateOperand());
6955 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
6961 setOriginForNaryOp(
I);
6964 void dumpInst(Instruction &
I) {
6968 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
6970 errs() <<
"QQQ " <<
I <<
"\n";
6973 void visitResumeInst(ResumeInst &
I) {
6978 void visitCleanupReturnInst(CleanupReturnInst &CRI) {
6983 void visitCatchReturnInst(CatchReturnInst &CRI) {
6988 void instrumentAsmArgument(
Value *Operand,
Type *ElemTy, Instruction &
I,
6997 insertCheckShadowOf(Operand, &
I);
7004 auto Size =
DL.getTypeStoreSize(ElemTy);
7006 if (MS.CompileKernel) {
7007 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
7013 auto [ShadowPtr,
_] =
7014 getShadowOriginPtrUserspace(Operand, IRB, IRB.
getInt8Ty(),
Align(1));
7024 int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
7025 int NumRetOutputs = 0;
7032 NumRetOutputs =
ST->getNumElements();
7037 for (
const InlineAsm::ConstraintInfo &
Info : Constraints) {
7038 switch (
Info.Type) {
7046 return NumOutputs - NumRetOutputs;
7049 void visitAsmInstruction(Instruction &
I) {
7065 const DataLayout &
DL =
F.getDataLayout();
7069 int OutputArgs = getNumOutputArgs(IA, CB);
7075 for (
int i = OutputArgs; i < NumOperands; i++) {
7083 for (
int i = 0; i < OutputArgs; i++) {
7089 setShadow(&
I, getCleanShadow(&
I));
7090 setOrigin(&
I, getCleanOrigin());
7093 void visitFreezeInst(FreezeInst &
I) {
7095 setShadow(&
I, getCleanShadow(&
I));
7096 setOrigin(&
I, getCleanOrigin());
7099 void visitInstruction(Instruction &
I) {
7104 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
7105 Value *Operand =
I.getOperand(i);
7107 insertCheckShadowOf(Operand, &
I);
7109 setShadow(&
I, getCleanShadow(&
I));
7110 setOrigin(&
I, getCleanOrigin());
7114struct VarArgHelperBase :
public VarArgHelper {
7116 MemorySanitizer &MS;
7117 MemorySanitizerVisitor &MSV;
7119 const unsigned VAListTagSize;
7121 VarArgHelperBase(Function &
F, MemorySanitizer &MS,
7122 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
7123 :
F(
F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
7127 return IRB.
CreateAdd(
Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
7143 return getShadowPtrForVAArgument(IRB, ArgOffset);
7157 unsigned BaseOffset) {
7166 TailSize,
Align(8));
7169 void unpoisonVAListTagForInst(IntrinsicInst &
I) {
7171 Value *VAListTag =
I.getArgOperand(0);
7173 auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
7174 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
7177 VAListTagSize, Alignment,
false);
7180 void visitVAStartInst(VAStartInst &
I)
override {
7181 if (
F.getCallingConv() == CallingConv::Win64)
7184 unpoisonVAListTagForInst(
I);
7187 void visitVACopyInst(VACopyInst &
I)
override {
7188 if (
F.getCallingConv() == CallingConv::Win64)
7190 unpoisonVAListTagForInst(
I);
7195struct VarArgAMD64Helper :
public VarArgHelperBase {
7198 static const unsigned AMD64GpEndOffset = 48;
7199 static const unsigned AMD64FpEndOffsetSSE = 176;
7201 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
7203 unsigned AMD64FpEndOffset;
7204 AllocaInst *VAArgTLSCopy =
nullptr;
7205 AllocaInst *VAArgTLSOriginCopy =
nullptr;
7206 Value *VAArgOverflowSize =
nullptr;
7208 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7210 VarArgAMD64Helper(Function &
F, MemorySanitizer &MS,
7211 MemorySanitizerVisitor &MSV)
7212 : VarArgHelperBase(
F, MS, MSV, 24) {
7213 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
7214 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
7215 if (Attr.isStringAttribute() &&
7216 (Attr.getKindAsString() ==
"target-features")) {
7217 if (Attr.getValueAsString().contains(
"-sse"))
7218 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
7224 ArgKind classifyArgument(
Value *arg) {
7227 if (
T->isX86_FP80Ty())
7229 if (
T->isFPOrFPVectorTy())
7230 return AK_FloatingPoint;
7231 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
7232 return AK_GeneralPurpose;
7233 if (
T->isPointerTy())
7234 return AK_GeneralPurpose;
7246 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7247 unsigned GpOffset = 0;
7248 unsigned FpOffset = AMD64GpEndOffset;
7249 unsigned OverflowOffset = AMD64FpEndOffset;
7250 const DataLayout &
DL =
F.getDataLayout();
7254 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7261 assert(
A->getType()->isPointerTy());
7263 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7264 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7265 unsigned BaseOffset = OverflowOffset;
7266 Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7267 Value *OriginBase =
nullptr;
7268 if (MS.TrackOrigins)
7269 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7270 OverflowOffset += AlignedSize;
7273 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7277 Value *ShadowPtr, *OriginPtr;
7278 std::tie(ShadowPtr, OriginPtr) =
7283 if (MS.TrackOrigins)
7287 ArgKind AK = classifyArgument(
A);
7288 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
7290 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
7292 Value *ShadowBase, *OriginBase =
nullptr;
7294 case AK_GeneralPurpose:
7295 ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
7296 if (MS.TrackOrigins)
7297 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
7301 case AK_FloatingPoint:
7302 ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
7303 if (MS.TrackOrigins)
7304 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
7311 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7312 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7313 unsigned BaseOffset = OverflowOffset;
7314 ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7315 if (MS.TrackOrigins) {
7316 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7318 OverflowOffset += AlignedSize;
7321 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7330 Value *Shadow = MSV.getShadow(
A);
7332 if (MS.TrackOrigins) {
7333 Value *Origin = MSV.getOrigin(
A);
7334 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
7335 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
7341 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
7342 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7345 void finalizeInstrumentation()
override {
7346 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7347 "finalizeInstrumentation called twice");
7348 if (!VAStartInstrumentationList.
empty()) {
7355 ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
7356 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7362 Intrinsic::umin, CopySize,
7366 if (MS.TrackOrigins) {
7367 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7376 for (CallInst *OrigInst : VAStartInstrumentationList) {
7377 NextNodeIRBuilder IRB(OrigInst);
7378 Value *VAListTag = OrigInst->getArgOperand(0);
7382 ConstantInt::get(MS.IntptrTy, 16)),
7385 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7387 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7388 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7390 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7392 if (MS.TrackOrigins)
7393 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
7394 Alignment, AMD64FpEndOffset);
7397 ConstantInt::get(MS.IntptrTy, 8)),
7399 Value *OverflowArgAreaPtr =
7400 IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
7401 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
7402 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
7403 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
7407 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
7409 if (MS.TrackOrigins) {
7412 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
7420struct VarArgAArch64Helper :
public VarArgHelperBase {
7421 static const unsigned kAArch64GrArgSize = 64;
7422 static const unsigned kAArch64VrArgSize = 128;
7424 static const unsigned AArch64GrBegOffset = 0;
7425 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
7427 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
7428 static const unsigned AArch64VrEndOffset =
7429 AArch64VrBegOffset + kAArch64VrArgSize;
7430 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
7432 AllocaInst *VAArgTLSCopy =
nullptr;
7433 Value *VAArgOverflowSize =
nullptr;
7435 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7437 VarArgAArch64Helper(Function &
F, MemorySanitizer &MS,
7438 MemorySanitizerVisitor &MSV)
7439 : VarArgHelperBase(
F, MS, MSV, 32) {}
7442 std::pair<ArgKind, uint64_t> classifyArgument(
Type *
T) {
7443 if (
T->isIntOrPtrTy() &&
T->getPrimitiveSizeInBits() <= 64)
7444 return {AK_GeneralPurpose, 1};
7445 if (
T->isFloatingPointTy() &&
T->getPrimitiveSizeInBits() <= 128)
7446 return {AK_FloatingPoint, 1};
7448 if (
T->isArrayTy()) {
7449 auto R = classifyArgument(
T->getArrayElementType());
7450 R.second *=
T->getScalarType()->getArrayNumElements();
7455 auto R = classifyArgument(FV->getScalarType());
7456 R.second *= FV->getNumElements();
7461 return {AK_Memory, 0};
7473 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7474 unsigned GrOffset = AArch64GrBegOffset;
7475 unsigned VrOffset = AArch64VrBegOffset;
7476 unsigned OverflowOffset = AArch64VAEndOffset;
7478 const DataLayout &
DL =
F.getDataLayout();
7481 auto [AK, RegNum] = classifyArgument(
A->getType());
7482 if (AK == AK_GeneralPurpose &&
7483 (GrOffset + RegNum * 8) > AArch64GrEndOffset)
7485 if (AK == AK_FloatingPoint &&
7486 (VrOffset + RegNum * 16) > AArch64VrEndOffset)
7490 case AK_GeneralPurpose:
7491 Base = getShadowPtrForVAArgument(IRB, GrOffset);
7492 GrOffset += 8 * RegNum;
7494 case AK_FloatingPoint:
7495 Base = getShadowPtrForVAArgument(IRB, VrOffset);
7496 VrOffset += 16 * RegNum;
7503 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7504 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7505 unsigned BaseOffset = OverflowOffset;
7506 Base = getShadowPtrForVAArgument(IRB, BaseOffset);
7507 OverflowOffset += AlignedSize;
7510 CleanUnusedTLS(IRB,
Base, BaseOffset);
7522 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
7523 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7530 ConstantInt::get(MS.IntptrTy, offset)),
7532 return IRB.
CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
7539 ConstantInt::get(MS.IntptrTy, offset)),
7542 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
7545 void finalizeInstrumentation()
override {
7546 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7547 "finalizeInstrumentation called twice");
7548 if (!VAStartInstrumentationList.empty()) {
7555 ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
7556 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7562 Intrinsic::umin, CopySize,
7568 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
7569 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
7573 for (CallInst *OrigInst : VAStartInstrumentationList) {
7574 NextNodeIRBuilder IRB(OrigInst);
7576 Value *VAListTag = OrigInst->getArgOperand(0);
7593 Value *StackSaveAreaPtr =
7594 IRB.
CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
7597 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
7598 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
7601 IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
7604 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
7605 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
7608 IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
7614 Value *GrRegSaveAreaShadowPtrOff =
7615 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
7617 Value *GrRegSaveAreaShadowPtr =
7618 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7624 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
7630 Value *VrRegSaveAreaShadowPtrOff =
7631 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
7633 Value *VrRegSaveAreaShadowPtr =
7634 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7641 VrRegSaveAreaShadowPtrOff);
7642 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
7648 Value *StackSaveAreaShadowPtr =
7649 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7654 VAArgTLSCopy, IRB.
getInt32(AArch64VAEndOffset));
7657 Align(16), VAArgOverflowSize);
7663struct VarArgPowerPC64Helper :
public VarArgHelperBase {
7664 AllocaInst *VAArgTLSCopy =
nullptr;
7665 Value *VAArgSize =
nullptr;
7667 VarArgPowerPC64Helper(Function &
F, MemorySanitizer &MS,
7668 MemorySanitizerVisitor &MSV)
7669 : VarArgHelperBase(
F, MS, MSV, 8) {}
7671 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7679 Triple TargetTriple(
F.getParent()->getTargetTriple());
7683 if (TargetTriple.isPPC64ELFv2ABI())
7687 unsigned VAArgOffset = VAArgBase;
7688 const DataLayout &
DL =
F.getDataLayout();
7691 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7693 assert(
A->getType()->isPointerTy());
7695 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7698 ArgAlign =
Align(8);
7699 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7702 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7704 Value *AShadowPtr, *AOriginPtr;
7705 std::tie(AShadowPtr, AOriginPtr) =
7706 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
7716 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7718 if (
A->getType()->isArrayTy()) {
7721 Type *ElementTy =
A->getType()->getArrayElementType();
7723 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
7724 }
else if (
A->getType()->isVectorTy()) {
7726 ArgAlign =
Align(ArgSize);
7729 ArgAlign =
Align(8);
7730 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7731 if (
DL.isBigEndian()) {
7735 VAArgOffset += (8 - ArgSize);
7739 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7743 VAArgOffset += ArgSize;
7747 VAArgBase = VAArgOffset;
7751 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
7754 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
7757 void finalizeInstrumentation()
override {
7758 assert(!VAArgSize && !VAArgTLSCopy &&
7759 "finalizeInstrumentation called twice");
7762 Value *CopySize = VAArgSize;
7764 if (!VAStartInstrumentationList.empty()) {
7768 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7774 Intrinsic::umin, CopySize,
7782 for (CallInst *OrigInst : VAStartInstrumentationList) {
7783 NextNodeIRBuilder IRB(OrigInst);
7784 Value *VAListTag = OrigInst->getArgOperand(0);
7787 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
7790 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7791 const DataLayout &
DL =
F.getDataLayout();
7792 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
7794 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7795 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7797 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7804struct VarArgPowerPC32Helper :
public VarArgHelperBase {
7805 AllocaInst *VAArgTLSCopy =
nullptr;
7806 Value *VAArgSize =
nullptr;
7808 VarArgPowerPC32Helper(Function &
F, MemorySanitizer &MS,
7809 MemorySanitizerVisitor &MSV)
7810 : VarArgHelperBase(
F, MS, MSV, 12) {}
7812 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7816 unsigned VAArgOffset = VAArgBase;
7817 const DataLayout &
DL =
F.getDataLayout();
7818 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
7821 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7823 assert(
A->getType()->isPointerTy());
7825 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7827 if (ArgAlign < IntptrSize)
7828 ArgAlign =
Align(IntptrSize);
7829 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7832 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7834 Value *AShadowPtr, *AOriginPtr;
7835 std::tie(AShadowPtr, AOriginPtr) =
7836 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
7846 Type *ArgTy =
A->getType();
7852 uint64_t ArgSize =
DL.getTypeAllocSize(ArgTy);
7859 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
7862 ArgAlign =
Align(ArgSize);
7864 if (ArgAlign < IntptrSize)
7865 ArgAlign =
Align(IntptrSize);
7866 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7867 if (
DL.isBigEndian()) {
7870 if (ArgSize < IntptrSize)
7871 VAArgOffset += (IntptrSize - ArgSize);
7874 Base = getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase,
7880 VAArgOffset += ArgSize;
7887 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
7890 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
7893 void finalizeInstrumentation()
override {
7894 assert(!VAArgSize && !VAArgTLSCopy &&
7895 "finalizeInstrumentation called twice");
7897 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
7898 Value *CopySize = VAArgSize;
7900 if (!VAStartInstrumentationList.empty()) {
7904 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7910 Intrinsic::umin, CopySize,
7918 for (CallInst *OrigInst : VAStartInstrumentationList) {
7919 NextNodeIRBuilder IRB(OrigInst);
7920 Value *VAListTag = OrigInst->getArgOperand(0);
7922 Value *RegSaveAreaSize = CopySize;
7926 IRB.
CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
7930 Intrinsic::umin, CopySize, ConstantInt::get(MS.IntptrTy, 32));
7932 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
7935 const DataLayout &
DL =
F.getDataLayout();
7936 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
7940 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7941 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7942 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7944 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy,
7945 Alignment, RegSaveAreaSize);
7947 RegSaveAreaShadowPtr =
7950 ConstantInt::get(MS.IntptrTy, 32));
7955 ConstantInt::get(MS.IntptrTy, 32), Alignment);
7960 Value *OverflowAreaSize = IRB.
CreateSub(CopySize, RegSaveAreaSize);
7963 OverflowAreaPtrPtr =
7964 IRB.
CreateAdd(OverflowAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 4));
7965 OverflowAreaPtrPtr = IRB.
CreateIntToPtr(OverflowAreaPtrPtr, MS.PtrTy);
7967 Value *OverflowAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowAreaPtrPtr);
7969 Value *OverflowAreaShadowPtr, *OverflowAreaOriginPtr;
7970 std::tie(OverflowAreaShadowPtr, OverflowAreaOriginPtr) =
7971 MSV.getShadowOriginPtr(OverflowAreaPtr, IRB, IRB.
getInt8Ty(),
7974 Value *OverflowVAArgTLSCopyPtr =
7976 OverflowVAArgTLSCopyPtr =
7977 IRB.
CreateAdd(OverflowVAArgTLSCopyPtr, RegSaveAreaSize);
7979 OverflowVAArgTLSCopyPtr =
7982 OverflowVAArgTLSCopyPtr, Alignment, OverflowAreaSize);
7989struct VarArgSystemZHelper :
public VarArgHelperBase {
7990 static const unsigned SystemZGpOffset = 16;
7991 static const unsigned SystemZGpEndOffset = 56;
7992 static const unsigned SystemZFpOffset = 128;
7993 static const unsigned SystemZFpEndOffset = 160;
7994 static const unsigned SystemZMaxVrArgs = 8;
7995 static const unsigned SystemZRegSaveAreaSize = 160;
7996 static const unsigned SystemZOverflowOffset = 160;
7997 static const unsigned SystemZVAListTagSize = 32;
7998 static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
7999 static const unsigned SystemZRegSaveAreaPtrOffset = 24;
8001 bool IsSoftFloatABI;
8002 AllocaInst *VAArgTLSCopy =
nullptr;
8003 AllocaInst *VAArgTLSOriginCopy =
nullptr;
8004 Value *VAArgOverflowSize =
nullptr;
8006 enum class ArgKind {
8014 enum class ShadowExtension {
None,
Zero, Sign };
8016 VarArgSystemZHelper(Function &
F, MemorySanitizer &MS,
8017 MemorySanitizerVisitor &MSV)
8018 : VarArgHelperBase(
F, MS, MSV, SystemZVAListTagSize),
8019 IsSoftFloatABI(
F.getFnAttribute(
"use-soft-float").getValueAsBool()) {}
8021 ArgKind classifyArgument(
Type *
T) {
8028 if (
T->isIntegerTy(128) ||
T->isFP128Ty())
8029 return ArgKind::Indirect;
8030 if (
T->isFloatingPointTy())
8031 return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
8032 if (
T->isIntegerTy() ||
T->isPointerTy())
8033 return ArgKind::GeneralPurpose;
8034 if (
T->isVectorTy())
8035 return ArgKind::Vector;
8036 return ArgKind::Memory;
8039 ShadowExtension getShadowExtension(
const CallBase &CB,
unsigned ArgNo) {
8049 return ShadowExtension::Zero;
8053 return ShadowExtension::Sign;
8055 return ShadowExtension::None;
8058 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8059 unsigned GpOffset = SystemZGpOffset;
8060 unsigned FpOffset = SystemZFpOffset;
8061 unsigned VrIndex = 0;
8062 unsigned OverflowOffset = SystemZOverflowOffset;
8063 const DataLayout &
DL =
F.getDataLayout();
8069 ArgKind AK = classifyArgument(
T);
8070 if (AK == ArgKind::Indirect) {
8072 AK = ArgKind::GeneralPurpose;
8074 if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
8075 AK = ArgKind::Memory;
8076 if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
8077 AK = ArgKind::Memory;
8078 if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
8079 AK = ArgKind::Memory;
8080 Value *ShadowBase =
nullptr;
8081 Value *OriginBase =
nullptr;
8082 ShadowExtension SE = ShadowExtension::None;
8084 case ArgKind::GeneralPurpose: {
8086 uint64_t ArgSize = 8;
8089 SE = getShadowExtension(CB, ArgNo);
8090 uint64_t GapSize = 0;
8091 if (SE == ShadowExtension::None) {
8092 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8093 assert(ArgAllocSize <= ArgSize);
8094 GapSize = ArgSize - ArgAllocSize;
8096 ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
8097 if (MS.TrackOrigins)
8098 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
8100 GpOffset += ArgSize;
8106 case ArgKind::FloatingPoint: {
8108 uint64_t ArgSize = 8;
8115 ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
8116 if (MS.TrackOrigins)
8117 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
8119 FpOffset += ArgSize;
8125 case ArgKind::Vector: {
8132 case ArgKind::Memory: {
8137 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8138 uint64_t ArgSize =
alignTo(ArgAllocSize, 8);
8140 SE = getShadowExtension(CB, ArgNo);
8142 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
8144 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
8145 if (MS.TrackOrigins)
8147 getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
8148 OverflowOffset += ArgSize;
8155 case ArgKind::Indirect:
8158 if (ShadowBase ==
nullptr)
8160 Value *Shadow = MSV.getShadow(
A);
8161 if (SE != ShadowExtension::None)
8162 Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.
getInt64Ty(),
8163 SE == ShadowExtension::Sign);
8164 ShadowBase = IRB.
CreateIntToPtr(ShadowBase, MS.PtrTy,
"_msarg_va_s");
8166 if (MS.TrackOrigins) {
8167 Value *Origin = MSV.getOrigin(
A);
8168 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
8169 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
8173 Constant *OverflowSize = ConstantInt::get(
8174 IRB.
getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
8175 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
8182 ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
8185 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8187 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8188 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(), Alignment,
8193 unsigned RegSaveAreaSize =
8194 IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
8195 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8197 if (MS.TrackOrigins)
8198 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
8199 Alignment, RegSaveAreaSize);
8208 ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
8210 Value *OverflowArgAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
8211 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
8213 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
8214 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
8217 SystemZOverflowOffset);
8218 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
8220 if (MS.TrackOrigins) {
8222 SystemZOverflowOffset);
8223 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
8228 void finalizeInstrumentation()
override {
8229 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
8230 "finalizeInstrumentation called twice");
8231 if (!VAStartInstrumentationList.empty()) {
8238 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
8240 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8246 Intrinsic::umin, CopySize,
8250 if (MS.TrackOrigins) {
8251 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8260 for (CallInst *OrigInst : VAStartInstrumentationList) {
8261 NextNodeIRBuilder IRB(OrigInst);
8262 Value *VAListTag = OrigInst->getArgOperand(0);
8263 copyRegSaveArea(IRB, VAListTag);
8264 copyOverflowArea(IRB, VAListTag);
8270struct VarArgI386Helper :
public VarArgHelperBase {
8271 AllocaInst *VAArgTLSCopy =
nullptr;
8272 Value *VAArgSize =
nullptr;
8274 VarArgI386Helper(Function &
F, MemorySanitizer &MS,
8275 MemorySanitizerVisitor &MSV)
8276 : VarArgHelperBase(
F, MS, MSV, 4) {}
8278 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8279 const DataLayout &
DL =
F.getDataLayout();
8280 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8281 unsigned VAArgOffset = 0;
8284 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8286 assert(
A->getType()->isPointerTy());
8288 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8290 if (ArgAlign < IntptrSize)
8291 ArgAlign =
Align(IntptrSize);
8292 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8294 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8296 Value *AShadowPtr, *AOriginPtr;
8297 std::tie(AShadowPtr, AOriginPtr) =
8298 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8308 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8310 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8311 if (
DL.isBigEndian()) {
8314 if (ArgSize < IntptrSize)
8315 VAArgOffset += (IntptrSize - ArgSize);
8318 Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8321 VAArgOffset += ArgSize;
8327 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8330 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8333 void finalizeInstrumentation()
override {
8334 assert(!VAArgSize && !VAArgTLSCopy &&
8335 "finalizeInstrumentation called twice");
8337 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8338 Value *CopySize = VAArgSize;
8340 if (!VAStartInstrumentationList.empty()) {
8343 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8349 Intrinsic::umin, CopySize,
8357 for (CallInst *OrigInst : VAStartInstrumentationList) {
8358 NextNodeIRBuilder IRB(OrigInst);
8359 Value *VAListTag = OrigInst->getArgOperand(0);
8360 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8361 Value *RegSaveAreaPtrPtr =
8363 PointerType::get(*MS.C, 0));
8364 Value *RegSaveAreaPtr =
8365 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8366 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8367 const DataLayout &
DL =
F.getDataLayout();
8368 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8370 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8371 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8373 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8381struct VarArgGenericHelper :
public VarArgHelperBase {
8382 AllocaInst *VAArgTLSCopy =
nullptr;
8383 Value *VAArgSize =
nullptr;
8385 VarArgGenericHelper(Function &
F, MemorySanitizer &MS,
8386 MemorySanitizerVisitor &MSV,
const unsigned VAListTagSize)
8387 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
8389 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8390 unsigned VAArgOffset = 0;
8391 const DataLayout &
DL =
F.getDataLayout();
8392 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8397 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8398 if (
DL.isBigEndian()) {
8401 if (ArgSize < IntptrSize)
8402 VAArgOffset += (IntptrSize - ArgSize);
8404 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8405 VAArgOffset += ArgSize;
8406 VAArgOffset =
alignTo(VAArgOffset, IntptrSize);
8412 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8415 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8418 void finalizeInstrumentation()
override {
8419 assert(!VAArgSize && !VAArgTLSCopy &&
8420 "finalizeInstrumentation called twice");
8422 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8423 Value *CopySize = VAArgSize;
8425 if (!VAStartInstrumentationList.empty()) {
8428 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8434 Intrinsic::umin, CopySize,
8442 for (CallInst *OrigInst : VAStartInstrumentationList) {
8443 NextNodeIRBuilder IRB(OrigInst);
8444 Value *VAListTag = OrigInst->getArgOperand(0);
8445 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8446 Value *RegSaveAreaPtrPtr =
8448 PointerType::get(*MS.C, 0));
8449 Value *RegSaveAreaPtr =
8450 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8451 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8452 const DataLayout &
DL =
F.getDataLayout();
8453 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8455 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8456 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8458 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8466using VarArgARM32Helper = VarArgGenericHelper;
8467using VarArgRISCVHelper = VarArgGenericHelper;
8468using VarArgMIPSHelper = VarArgGenericHelper;
8469using VarArgLoongArch64Helper = VarArgGenericHelper;
8472struct VarArgNoOpHelper :
public VarArgHelper {
8473 VarArgNoOpHelper(Function &
F, MemorySanitizer &MS,
8474 MemorySanitizerVisitor &MSV) {}
8476 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {}
8478 void visitVAStartInst(VAStartInst &
I)
override {}
8480 void visitVACopyInst(VACopyInst &
I)
override {}
8482 void finalizeInstrumentation()
override {}
8488 MemorySanitizerVisitor &Visitor) {
8491 Triple TargetTriple(Func.getParent()->getTargetTriple());
8494 return new VarArgI386Helper(Func, Msan, Visitor);
8497 return new VarArgAMD64Helper(Func, Msan, Visitor);
8499 if (TargetTriple.
isARM())
8500 return new VarArgARM32Helper(Func, Msan, Visitor, 4);
8503 return new VarArgAArch64Helper(Func, Msan, Visitor);
8506 return new VarArgSystemZHelper(Func, Msan, Visitor);
8511 return new VarArgPowerPC32Helper(Func, Msan, Visitor);
8514 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
8517 return new VarArgRISCVHelper(Func, Msan, Visitor, 4);
8520 return new VarArgRISCVHelper(Func, Msan, Visitor, 8);
8523 return new VarArgMIPSHelper(Func, Msan, Visitor, 4);
8526 return new VarArgMIPSHelper(Func, Msan, Visitor, 8);
8529 return new VarArgLoongArch64Helper(Func, Msan, Visitor,
8532 return new VarArgNoOpHelper(Func, Msan, Visitor);
8539 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
8542 MemorySanitizerVisitor Visitor(
F, *
this, TLI);
8549 return Visitor.runOnFunction();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MemoryMapParams Linux_LoongArch64_MemoryMapParams
const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< int > ClTrackOrigins("dfsan-track-origins", cl::desc("Track origins of labels"), cl::Hidden, cl::init(0))
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
const MemoryMapParams Linux_AArch64_MemoryMapParams
static bool isAMustTailRetVal(Value *RetVal)
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Module.h This file contains the declarations for the Module class.
Machine Check Debug Module
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams
static const Align kMinOriginAlignment
static cl::opt< uint64_t > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("Poison fully undef temporary values. " "Partially undefined constant vectors " "are unaffected by this flag (see " "-msan-poison-undef-vectors)."), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
static cl::opt< uint64_t > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
static const unsigned kOriginSize
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
static const Align kShadowTLSAlignment
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics i.e.," "check that all the inputs are fully initialized, and mark " "the output as fully initialized. These semantics are applied " "to instructions that could not be handled explicitly nor " "heuristically."), cl::Hidden, cl::init(false))
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
static cl::opt< bool > ClPreciseDisjointOr("msan-precise-disjoint-or", cl::desc("Precisely poison disjoint OR. If false (legacy behavior), " "disjointedness is ignored (i.e., 1|1 is initialized)."), cl::Hidden, cl::init(false))
static const MemoryMapParams Linux_S390X_MemoryMapParams
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_I386_MemoryMapParams
const char kMsanInitName[]
static cl::opt< bool > ClPoisonUndefVectors("msan-poison-undef-vectors", cl::desc("Precisely poison partially undefined constant vectors. " "If false (legacy behavior), the entire vector is " "considered fully initialized, which may lead to false " "negatives. Fully undefined constant vectors are " "unaffected by this flag (see -msan-poison-undef)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPrintStackNames("msan-print-stack-names", cl::desc("Print name of local stack variable"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleLifetimeIntrinsics("msan-handle-lifetime-intrinsics", cl::desc("when possible, poison scoped variables at the beginning of the scope " "(slower, but more precise)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
static GlobalVariable * createPrivateConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClEagerChecks("msan-eager-checks", cl::desc("check arguments and return values at function call boundaries"), cl::Hidden, cl::init(false))
static cl::opt< int > ClDisambiguateWarning("msan-disambiguate-warning-threshold", cl::desc("Define threshold for number of checks per " "debug location to force origin update."), cl::Hidden, cl::init(3))
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
static cl::opt< uint64_t > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams
static const unsigned kParamTLSSize
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
static cl::opt< bool > ClDumpHeuristicInstructions("msan-dump-heuristic-instructions", cl::desc("Prints 'unknown' instructions that were handled heuristically. " "Use -msan-dump-strict-instructions to print instructions that " "could not be handled explicitly nor heuristically."), cl::Hidden, cl::init(false))
static const unsigned kRetvalTLSSize
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams
const char kMsanModuleCtorName[]
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDisableChecks("msan-disable-checks", cl::desc("Apply no_sanitize to the whole file"), cl::Hidden, cl::init(false))
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
void setAlignment(Align Align)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
const T & front() const
front - Get the first element.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
This class stores enough information to efficiently remove some attributes from an existing AttrBuild...
AttributeMask & addAttribute(Attribute::AttrKind Val)
Add an attribute to the mask.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
void removeFnAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the function.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_SGT
signed greater than
@ ICMP_SGE
signed greater or equal
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static LLVM_ABI Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static bool shouldExecute(unsigned CounterName)
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
static FixedVectorType * getHalfElementsVectorType(FixedVectorType *VTy)
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
LLVM_ABI void setComdat(Comdat *C)
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ ExternalLinkage
Externally visible function.
Analysis pass providing a never-invalidated alias analysis result.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LLVM_ABI CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
LLVM_ABI CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LLVM_ABI CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
LLVM_ABI CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::vector< ConstraintInfo > ConstraintInfoVector
void visit(Iterator Start, Iterator End)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
bool remove(const value_type &X)
Remove an item from the set vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
bool isRISCV32() const
Tests whether the target is 32-bit RISC-V.
bool isPPC32() const
Tests whether the target is 32-bit PowerPC (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
bool isSystemZ() const
Tests whether the target is SystemZ.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Type * getElementType() const
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
initializer< Ty > init(const Ty &Val)
Function * Kernel
Summary of a kernel (=entry point for target offloading).
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
LLVM_ABI std::pair< Instruction *, Value * > SplitBlockAndInsertSimpleForLoop(Value *End, BasicBlock::iterator SplitBefore)
Insert a for (int i = 0; i < End; i++) loop structure (with the exception that End is assumed > 0,...
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Or
Bitwise or logical OR of integers.
@ And
Bitwise or logical AND of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
LLVM_ABI bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
std::string itostr(int64_t X)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A CRTP mix-in to automatically provide informational APIs needed for passes.