91using namespace PatternMatch;
93#define DEBUG_TYPE "dse"
95STATISTIC(NumRemainingStores,
"Number of stores remaining after DSE");
96STATISTIC(NumRedundantStores,
"Number of redundant stores deleted");
97STATISTIC(NumFastStores,
"Number of stores deleted");
98STATISTIC(NumFastOther,
"Number of other instrs removed");
99STATISTIC(NumCompletePartials,
"Number of stores dead by later partials");
100STATISTIC(NumModifiedStores,
"Number of stores modified");
105 "Number of times a valid candidate is returned from getDomMemoryDef");
107 "Number iterations check for reads in getDomMemoryDef");
110 "Controls which MemoryDefs are eliminated.");
115 cl::desc(
"Enable partial-overwrite tracking in DSE"));
120 cl::desc(
"Enable partial store merging in DSE"));
124 cl::desc(
"The number of memory instructions to scan for "
125 "dead store elimination (default = 150)"));
128 cl::desc(
"The maximum number of steps while walking upwards to find "
129 "MemoryDefs that may be killed (default = 90)"));
133 cl::desc(
"The maximum number candidates that only partially overwrite the "
134 "killing MemoryDef to consider"
139 cl::desc(
"The number of MemoryDefs we consider as candidates to eliminated "
140 "other stores per basic block (default = 5000)"));
145 "The cost of a step in the same basic block as the killing MemoryDef"
151 cl::desc(
"The cost of a step in a different basic "
152 "block than the killing MemoryDef"
157 cl::desc(
"The maximum number of blocks to check when trying to prove that "
158 "all paths to an exit go through a killing block (default = 50)"));
168 cl::desc(
"Allow DSE to optimize memory accesses."));
173 cl::desc(
"Enable the initializes attr improvement in DSE"));
185 if (isa<StoreInst>(
I))
189 switch (
II->getIntrinsicID()) {
190 default:
return false;
191 case Intrinsic::memset:
192 case Intrinsic::memcpy:
193 case Intrinsic::memcpy_element_unordered_atomic:
194 case Intrinsic::memset_element_unordered_atomic:
211 return isa<AnyMemSetInst>(
I);
229enum OverwriteResult {
233 OW_PartialEarlierWithFullLater,
247 const auto *KillingII = dyn_cast<IntrinsicInst>(KillingI);
248 const auto *DeadII = dyn_cast<IntrinsicInst>(DeadI);
249 if (KillingII ==
nullptr || DeadII ==
nullptr)
251 if (KillingII->getIntrinsicID() != DeadII->getIntrinsicID())
254 switch (KillingII->getIntrinsicID()) {
255 case Intrinsic::masked_store:
256 case Intrinsic::vp_store: {
258 auto *KillingTy = KillingII->getArgOperand(0)->getType();
259 auto *DeadTy = DeadII->getArgOperand(0)->getType();
260 if (
DL.getTypeSizeInBits(KillingTy) !=
DL.getTypeSizeInBits(DeadTy))
263 if (cast<VectorType>(KillingTy)->getElementCount() !=
264 cast<VectorType>(DeadTy)->getElementCount())
267 Value *KillingPtr = KillingII->getArgOperand(1);
268 Value *DeadPtr = DeadII->getArgOperand(1);
269 if (KillingPtr != DeadPtr && !AA.
isMustAlias(KillingPtr, DeadPtr))
271 if (KillingII->getIntrinsicID() == Intrinsic::masked_store) {
274 if (KillingII->getArgOperand(3) != DeadII->getArgOperand(3))
276 }
else if (KillingII->getIntrinsicID() == Intrinsic::vp_store) {
279 if (KillingII->getArgOperand(2) != DeadII->getArgOperand(2))
282 if (KillingII->getArgOperand(3) != DeadII->getArgOperand(3))
304 int64_t KillingOff, int64_t DeadOff,
315 KillingOff < int64_t(DeadOff + DeadSize) &&
316 int64_t(KillingOff + KillingSize) >= DeadOff) {
319 auto &IM = IOL[DeadI];
320 LLVM_DEBUG(
dbgs() <<
"DSE: Partial overwrite: DeadLoc [" << DeadOff <<
", "
321 << int64_t(DeadOff + DeadSize) <<
") KillingLoc ["
322 << KillingOff <<
", " << int64_t(KillingOff + KillingSize)
329 int64_t KillingIntStart = KillingOff;
330 int64_t KillingIntEnd = KillingOff + KillingSize;
334 auto ILI = IM.lower_bound(KillingIntStart);
335 if (ILI != IM.end() && ILI->second <= KillingIntEnd) {
339 KillingIntStart = std::min(KillingIntStart, ILI->second);
340 KillingIntEnd = std::max(KillingIntEnd, ILI->first);
349 while (ILI != IM.end() && ILI->second <= KillingIntEnd) {
350 assert(ILI->second > KillingIntStart &&
"Unexpected interval");
351 KillingIntEnd = std::max(KillingIntEnd, ILI->first);
356 IM[KillingIntEnd] = KillingIntStart;
359 if (ILI->second <= DeadOff && ILI->first >= int64_t(DeadOff + DeadSize)) {
360 LLVM_DEBUG(
dbgs() <<
"DSE: Full overwrite from partials: DeadLoc ["
361 << DeadOff <<
", " << int64_t(DeadOff + DeadSize)
362 <<
") Composite KillingLoc [" << ILI->second <<
", "
363 << ILI->first <<
")\n");
364 ++NumCompletePartials;
372 int64_t(DeadOff + DeadSize) > KillingOff &&
373 uint64_t(KillingOff - DeadOff) + KillingSize <= DeadSize) {
374 LLVM_DEBUG(
dbgs() <<
"DSE: Partial overwrite a dead load [" << DeadOff
375 <<
", " << int64_t(DeadOff + DeadSize)
376 <<
") by a killing store [" << KillingOff <<
", "
377 << int64_t(KillingOff + KillingSize) <<
")\n");
379 return OW_PartialEarlierWithFullLater;
392 (KillingOff > DeadOff && KillingOff < int64_t(DeadOff + DeadSize) &&
393 int64_t(KillingOff + KillingSize) >= int64_t(DeadOff + DeadSize)))
406 (KillingOff <= DeadOff && int64_t(KillingOff + KillingSize) > DeadOff)) {
407 assert(int64_t(KillingOff + KillingSize) < int64_t(DeadOff + DeadSize) &&
408 "Expect to be handled as OW_Complete");
428 using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
440 if (
auto *MemSet = dyn_cast<MemSetInst>(SecondI))
445 auto *MemLocPtr =
const_cast<Value *
>(MemLoc.
Ptr);
450 bool isFirstBlock =
true;
453 while (!WorkList.
empty()) {
465 assert(
B == SecondBB &&
"first block is not the store block");
467 isFirstBlock =
false;
473 for (; BI != EI; ++BI) {
475 if (
I->mayWriteToMemory() &&
I != SecondI)
481 "Should not hit the entry block because SI must be dominated by LI");
491 auto Inserted = Visited.
insert(std::make_pair(Pred, TranslatedPtr));
492 if (!Inserted.second) {
495 if (TranslatedPtr != Inserted.first->second)
500 WorkList.
push_back(std::make_pair(Pred, PredAddr));
509 uint64_t NewSizeInBits,
bool IsOverwriteEnd) {
511 uint64_t DeadSliceSizeInBits = OldSizeInBits - NewSizeInBits;
513 OldOffsetInBits + (IsOverwriteEnd ? NewSizeInBits : 0);
514 auto SetDeadFragExpr = [](
auto *Assign,
518 uint64_t RelativeOffset = DeadFragment.OffsetInBits -
519 Assign->getExpression()
524 Assign->getExpression(), RelativeOffset, DeadFragment.SizeInBits)) {
525 Assign->setExpression(*
NewExpr);
531 DIExpression::get(Assign->getContext(), {}), DeadFragment.OffsetInBits,
532 DeadFragment.SizeInBits);
533 Assign->setExpression(Expr);
534 Assign->setKillLocation();
541 auto GetDeadLink = [&Ctx, &LinkToNothing]() {
544 return LinkToNothing;
550 std::optional<DIExpression::FragmentInfo> NewFragment;
552 DeadSliceSizeInBits, Assign,
557 Assign->setKillAddress();
558 Assign->setAssignId(GetDeadLink());
562 if (NewFragment->SizeInBits == 0)
566 auto *NewAssign =
static_cast<decltype(Assign)
>(Assign->clone());
567 NewAssign->insertAfter(Assign->getIterator());
568 NewAssign->setAssignId(GetDeadLink());
570 SetDeadFragExpr(NewAssign, *NewFragment);
571 NewAssign->setKillAddress();
581 AttributeSet OldAttrs = Intrinsic->getParamAttributes(ArgNo);
585 for (
auto &Attr : OldAttrs) {
586 if (Attr.hasKindAsEnum()) {
587 switch (Attr.getKindAsEnum()) {
590 case Attribute::Alignment:
592 if (
isAligned(Attr.getAlignment().valueOrOne(), PtrOffset))
595 case Attribute::Dereferenceable:
596 case Attribute::DereferenceableOrNull:
600 case Attribute::NonNull:
601 case Attribute::NoUndef:
609 Intrinsic->removeParamAttrs(ArgNo, AttrsToRemove);
613 uint64_t &DeadSize, int64_t KillingStart,
614 uint64_t KillingSize,
bool IsOverwriteEnd) {
615 auto *DeadIntrinsic = cast<AnyMemIntrinsic>(DeadI);
616 Align PrefAlign = DeadIntrinsic->getDestAlign().valueOrOne();
632 int64_t ToRemoveStart = 0;
636 if (IsOverwriteEnd) {
641 ToRemoveStart = KillingStart + Off;
642 if (DeadSize <=
uint64_t(ToRemoveStart - DeadStart))
644 ToRemoveSize = DeadSize -
uint64_t(ToRemoveStart - DeadStart);
646 ToRemoveStart = DeadStart;
648 "Not overlapping accesses?");
649 ToRemoveSize = KillingSize -
uint64_t(DeadStart - KillingStart);
654 if (ToRemoveSize <= (PrefAlign.
value() - Off))
656 ToRemoveSize -= PrefAlign.
value() - Off;
659 "Should preserve selected alignment");
662 assert(ToRemoveSize > 0 &&
"Shouldn't reach here if nothing to remove");
663 assert(DeadSize > ToRemoveSize &&
"Can't remove more than original size");
665 uint64_t NewSize = DeadSize - ToRemoveSize;
666 if (DeadIntrinsic->isAtomic()) {
669 const uint32_t ElementSize = DeadIntrinsic->getElementSizeInBytes();
670 if (0 != NewSize % ElementSize)
675 << (IsOverwriteEnd ?
"END" :
"BEGIN") <<
": " << *DeadI
676 <<
"\n KILLER [" << ToRemoveStart <<
", "
677 << int64_t(ToRemoveStart + ToRemoveSize) <<
")\n");
679 DeadIntrinsic->setLength(NewSize);
680 DeadIntrinsic->setDestAlignment(PrefAlign);
682 Value *OrigDest = DeadIntrinsic->getRawDest();
683 if (!IsOverwriteEnd) {
684 Value *Indices[1] = {
685 ConstantInt::get(DeadIntrinsic->getLength()->getType(), ToRemoveSize)};
689 NewDestGEP->
setDebugLoc(DeadIntrinsic->getDebugLoc());
690 DeadIntrinsic->setDest(NewDestGEP);
700 DeadStart += ToRemoveSize;
707 int64_t &DeadStart,
uint64_t &DeadSize) {
712 int64_t KillingStart = OII->second;
713 uint64_t KillingSize = OII->first - KillingStart;
715 assert(OII->first - KillingStart >= 0 &&
"Size expected to be positive");
717 if (KillingStart > DeadStart &&
720 (
uint64_t)(KillingStart - DeadStart) < DeadSize &&
723 KillingSize >= DeadSize - (
uint64_t)(KillingStart - DeadStart)) {
724 if (
tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize,
735 int64_t &DeadStart,
uint64_t &DeadSize) {
740 int64_t KillingStart = OII->second;
741 uint64_t KillingSize = OII->first - KillingStart;
743 assert(OII->first - KillingStart >= 0 &&
"Size expected to be positive");
745 if (KillingStart <= DeadStart &&
748 KillingSize > (
uint64_t)(DeadStart - KillingStart)) {
751 assert(KillingSize - (
uint64_t)(DeadStart - KillingStart) < DeadSize &&
752 "Should have been handled as OW_Complete");
753 if (
tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize,
764 int64_t KillingOffset, int64_t DeadOffset,
791 unsigned BitOffsetDiff = (KillingOffset - DeadOffset) * 8;
792 unsigned LShiftAmount =
793 DL.isBigEndian() ? DeadValue.
getBitWidth() - BitOffsetDiff - KillingBits
796 LShiftAmount + KillingBits);
799 APInt Merged = (DeadValue & ~Mask) | (KillingValue << LShiftAmount);
801 <<
"\n Killing: " << *KillingI
802 <<
"\n Merged Value: " << Merged <<
'\n');
812 switch (
II->getIntrinsicID()) {
813 case Intrinsic::lifetime_start:
814 case Intrinsic::lifetime_end:
815 case Intrinsic::invariant_end:
816 case Intrinsic::launder_invariant_group:
817 case Intrinsic::assume:
819 case Intrinsic::dbg_declare:
820 case Intrinsic::dbg_label:
821 case Intrinsic::dbg_value:
831bool canSkipDef(
MemoryDef *
D,
bool DefVisibleToCaller) {
835 if (
auto *CB = dyn_cast<CallBase>(DI))
836 if (CB->onlyAccessesInaccessibleMemory())
841 if (DI->
mayThrow() && !DefVisibleToCaller)
849 if (isa<FenceInst>(DI))
853 if (isNoopIntrinsic(DI))
861struct MemoryLocationWrapper {
863 bool DefByInitializesAttr)
864 : MemLoc(MemLoc), MemDef(MemDef),
865 DefByInitializesAttr(DefByInitializesAttr) {
866 assert(MemLoc.
Ptr &&
"MemLoc should be not null");
875 bool DefByInitializesAttr =
false;
880struct MemoryDefWrapper {
882 ArrayRef<std::pair<MemoryLocation, bool>> MemLocations) {
884 for (
auto &[MemLoc, DefByInitializesAttr] : MemLocations)
885 DefinedLocations.push_back(
886 MemoryLocationWrapper(MemLoc, MemDef, DefByInitializesAttr));
897struct ArgumentInitInfo {
899 bool IsDeadOrInvisibleOnUnwind;
908 bool CallHasNoUnwindAttr) {
914 for (
const auto &Arg : Args) {
915 if (!CallHasNoUnwindAttr && !Arg.IsDeadOrInvisibleOnUnwind)
917 if (Arg.Inits.empty())
922 for (
auto &Arg :
Args.drop_front())
923 IntersectedIntervals = IntersectedIntervals.
intersectWith(Arg.Inits);
925 return IntersectedIntervals;
951 bool ContainsIrreducibleLoops;
974 bool AnyUnreachableExit;
979 bool ShouldIterateEndOfFunctionDSE;
985 DSEState(
const DSEState &) =
delete;
986 DSEState &operator=(
const DSEState &) =
delete;
991 :
F(
F), AA(AA), EA(DT, &LI), BatchAA(AA, &EA), MSSA(MSSA), DT(DT),
992 PDT(PDT), TLI(TLI),
DL(
F.getDataLayout()), LI(LI) {
997 PostOrderNumbers[BB] = PO++;
1000 if (
I.mayThrow() && !MA)
1001 ThrowingBlocks.
insert(
I.getParent());
1003 auto *MD = dyn_cast_or_null<MemoryDef>(MA);
1005 (getLocForWrite(&
I) || isMemTerminatorInst(&
I) ||
1014 if (AI.hasPassPointeeByValueCopyAttr() || AI.hasDeadOnReturnAttr())
1015 InvisibleToCallerAfterRet.
insert({&AI,
true});
1021 return isa<UnreachableInst>(E->getTerminator());
1029 auto *MA = cast<MemoryAccess>(
U.getUser());
1030 if (Visited.
insert(MA).second)
1037 if (
auto *CB = dyn_cast<CallBase>(
I)) {
1040 (F == LibFunc_memset_chk || F == LibFunc_memcpy_chk)) {
1049 if (
const auto *Len = dyn_cast<ConstantInt>(CB->
getArgOperand(2)))
1064 OverwriteResult isOverwrite(
const Instruction *KillingI,
1068 int64_t &KillingOff, int64_t &DeadOff) {
1072 if (!isGuaranteedLoopIndependent(DeadI, KillingI, DeadLoc))
1076 strengthenLocationSize(KillingI, KillingLoc.
Size);
1084 if (DeadUndObj == KillingUndObj && KillingLocSize.
isPrecise() &&
1086 std::optional<TypeSize> KillingUndObjSize =
1088 if (KillingUndObjSize && *KillingUndObjSize == KillingLocSize.
getValue())
1097 const auto *KillingMemI = dyn_cast<MemIntrinsic>(KillingI);
1098 const auto *DeadMemI = dyn_cast<MemIntrinsic>(DeadI);
1099 if (KillingMemI && DeadMemI) {
1100 const Value *KillingV = KillingMemI->getLength();
1101 const Value *DeadV = DeadMemI->getLength();
1102 if (KillingV == DeadV && BatchAA.
isMustAlias(DeadLoc, KillingLoc))
1115 const bool AnyScalable =
1127 if (KillingSize >= DeadSize)
1134 if (Off >= 0 && (
uint64_t)Off + DeadSize <= KillingSize)
1140 if (DeadUndObj != KillingUndObj) {
1156 const Value *DeadBasePtr =
1158 const Value *KillingBasePtr =
1163 if (DeadBasePtr != KillingBasePtr)
1181 if (DeadOff >= KillingOff) {
1184 if (
uint64_t(DeadOff - KillingOff) + DeadSize <= KillingSize)
1188 else if ((
uint64_t)(DeadOff - KillingOff) < KillingSize)
1189 return OW_MaybePartial;
1193 else if ((
uint64_t)(KillingOff - DeadOff) < DeadSize) {
1194 return OW_MaybePartial;
1201 bool isInvisibleToCallerAfterRet(
const Value *V) {
1202 if (isa<AllocaInst>(V))
1205 auto I = InvisibleToCallerAfterRet.
insert({
V,
false});
1206 if (
I.second && isInvisibleToCallerOnUnwind(V) &&
isNoAliasCall(V))
1208 V,
true, CaptureComponents::Provenance));
1209 return I.first->second;
1212 bool isInvisibleToCallerOnUnwind(
const Value *V) {
1213 bool RequiresNoCaptureBeforeUnwind;
1216 if (!RequiresNoCaptureBeforeUnwind)
1219 auto I = CapturedBeforeReturn.
insert({
V,
true});
1226 V,
false, CaptureComponents::Provenance));
1227 return !
I.first->second;
1230 std::optional<MemoryLocation> getLocForWrite(
Instruction *
I)
const {
1231 if (!
I->mayWriteToMemory())
1232 return std::nullopt;
1234 if (
auto *CB = dyn_cast<CallBase>(
I))
1243 getLocForInst(
Instruction *
I,
bool ConsiderInitializesAttr) {
1245 if (isMemTerminatorInst(
I)) {
1246 if (
auto Loc = getLocForTerminator(
I))
1247 Locations.push_back(std::make_pair(Loc->first,
false));
1251 if (
auto Loc = getLocForWrite(
I))
1252 Locations.push_back(std::make_pair(*Loc,
false));
1254 if (ConsiderInitializesAttr) {
1255 for (
auto &MemLoc : getInitializesArgMemLoc(
I)) {
1256 Locations.push_back(std::make_pair(MemLoc,
true));
1265 assert(getLocForWrite(
I) &&
"Must have analyzable write");
1269 return SI->isUnordered();
1271 if (
auto *CB = dyn_cast<CallBase>(
I)) {
1273 if (
auto *
MI = dyn_cast<MemIntrinsic>(CB))
1274 return !
MI->isVolatile();
1298 if (
auto *CB = dyn_cast<CallBase>(UseInst))
1302 int64_t InstWriteOffset, DepWriteOffset;
1303 if (
auto CC = getLocForWrite(UseInst))
1304 return isOverwrite(UseInst, DefInst, *CC, DefLoc, InstWriteOffset,
1305 DepWriteOffset) == OW_Complete;
1312 << *
Def->getMemoryInst()
1313 <<
") is at the end the function \n");
1317 pushMemUses(Def, WorkList, Visited);
1318 for (
unsigned I = 0;
I < WorkList.
size();
I++) {
1325 if (isa<MemoryPhi>(UseAccess)) {
1329 if (!isGuaranteedLoopInvariant(DefLoc.
Ptr))
1332 pushMemUses(cast<MemoryPhi>(UseAccess), WorkList, Visited);
1337 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1338 if (isReadClobber(DefLoc, UseInst)) {
1339 LLVM_DEBUG(
dbgs() <<
" ... hit read clobber " << *UseInst <<
".\n");
1343 if (
MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
1344 pushMemUses(UseDef, WorkList, Visited);
1352 std::optional<std::pair<MemoryLocation, bool>>
1354 if (
auto *CB = dyn_cast<CallBase>(
I)) {
1362 return std::nullopt;
1368 auto *CB = dyn_cast<CallBase>(
I);
1377 std::optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1378 getLocForTerminator(MaybeTerm);
1389 auto TermLoc = MaybeTermLoc->first;
1390 if (MaybeTermLoc->second) {
1394 int64_t InstWriteOffset = 0;
1395 int64_t DepWriteOffset = 0;
1396 return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, InstWriteOffset,
1397 DepWriteOffset) == OW_Complete;
1402 if (isNoopIntrinsic(UseInst))
1407 if (
auto SI = dyn_cast<StoreInst>(UseInst))
1413 if (
auto *CB = dyn_cast<CallBase>(UseInst))
1425 bool isGuaranteedLoopIndependent(
const Instruction *Current,
1435 if (!ContainsIrreducibleLoops && CurrentLI &&
1439 return isGuaranteedLoopInvariant(CurrentLoc.
Ptr);
1445 bool isGuaranteedLoopInvariant(
const Value *
Ptr) {
1446 Ptr =
Ptr->stripPointerCasts();
1447 if (
auto *
GEP = dyn_cast<GEPOperator>(
Ptr))
1448 if (
GEP->hasAllConstantIndices())
1449 Ptr =
GEP->getPointerOperand()->stripPointerCasts();
1451 if (
auto *
I = dyn_cast<Instruction>(
Ptr)) {
1452 return I->getParent()->isEntryBlock() ||
1453 (!ContainsIrreducibleLoops && !LI.
getLoopFor(
I->getParent()));
1464 std::optional<MemoryAccess *>
1467 unsigned &ScanLimit,
unsigned &WalkerStepLimit,
1468 bool IsMemTerm,
unsigned &PartialLimit,
1469 bool IsInitializesAttrMemLoc) {
1470 if (ScanLimit == 0 || WalkerStepLimit == 0) {
1472 return std::nullopt;
1489 std::optional<MemoryLocation> CurrentLoc;
1490 for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) {
1492 dbgs() <<
" visiting " << *Current;
1494 dbgs() <<
" (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()
1505 return std::nullopt;
1513 if (WalkerStepLimit <= StepCost) {
1515 return std::nullopt;
1517 WalkerStepLimit -= StepCost;
1521 if (isa<MemoryPhi>(Current)) {
1528 MemoryDef *CurrentDef = cast<MemoryDef>(Current);
1531 if (canSkipDef(CurrentDef, !isInvisibleToCallerOnUnwind(KillingUndObj))) {
1532 CanOptimize =
false;
1538 if (mayThrowBetween(KillingI, CurrentI, KillingUndObj)) {
1540 return std::nullopt;
1545 if (isDSEBarrier(KillingUndObj, CurrentI)) {
1547 return std::nullopt;
1554 if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(KillingLoc, CurrentI))
1555 return std::nullopt;
1558 if (
any_of(Current->
uses(), [
this, &KillingLoc, StartAccess](
Use &U) {
1559 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
1560 return !MSSA.dominates(StartAccess, UseOrDef) &&
1561 isReadClobber(KillingLoc, UseOrDef->getMemoryInst());
1565 return std::nullopt;
1570 CurrentLoc = getLocForWrite(CurrentI);
1571 if (!CurrentLoc || !isRemovable(CurrentI)) {
1572 CanOptimize =
false;
1579 if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) {
1581 CanOptimize =
false;
1589 if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) {
1590 CanOptimize =
false;
1594 int64_t KillingOffset = 0;
1595 int64_t DeadOffset = 0;
1596 auto OR = isOverwrite(KillingI, CurrentI, KillingLoc, *CurrentLoc,
1597 KillingOffset, DeadOffset);
1603 (OR == OW_Complete || OR == OW_MaybePartial))
1609 CanOptimize =
false;
1614 if (OR == OW_Unknown || OR == OW_None)
1616 else if (OR == OW_MaybePartial) {
1621 if (PartialLimit <= 1) {
1622 WalkerStepLimit -= 1;
1623 LLVM_DEBUG(
dbgs() <<
" ... reached partial limit ... continue with next access\n");
1640 Instruction *MaybeDeadI = cast<MemoryDef>(MaybeDeadAccess)->getMemoryInst();
1641 LLVM_DEBUG(
dbgs() <<
" Checking for reads of " << *MaybeDeadAccess <<
" ("
1642 << *MaybeDeadI <<
")\n");
1646 pushMemUses(MaybeDeadAccess, WorkList, Visited);
1649 for (
unsigned I = 0;
I < WorkList.
size();
I++) {
1654 if (ScanLimit < (WorkList.
size() -
I)) {
1656 return std::nullopt;
1659 NumDomMemDefChecks++;
1661 if (isa<MemoryPhi>(UseAccess)) {
1666 LLVM_DEBUG(
dbgs() <<
" ... skipping, dominated by killing block\n");
1670 pushMemUses(UseAccess, WorkList, Visited);
1674 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1680 LLVM_DEBUG(
dbgs() <<
" ... skipping, dominated by killing def\n");
1686 if (isMemTerminator(MaybeDeadLoc, MaybeDeadI, UseInst)) {
1689 <<
" ... skipping, memterminator invalidates following accesses\n");
1693 if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) {
1695 pushMemUses(UseAccess, WorkList, Visited);
1699 if (UseInst->
mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj)) {
1701 return std::nullopt;
1708 bool IsKillingDefFromInitAttr =
false;
1709 if (IsInitializesAttrMemLoc) {
1710 if (KillingI == UseInst &&
1712 IsKillingDefFromInitAttr =
true;
1715 if (isReadClobber(MaybeDeadLoc, UseInst) && !IsKillingDefFromInitAttr) {
1717 return std::nullopt;
1723 if (MaybeDeadAccess == UseAccess &&
1724 !isGuaranteedLoopInvariant(MaybeDeadLoc.
Ptr)) {
1725 LLVM_DEBUG(
dbgs() <<
" ... found not loop invariant self access\n");
1726 return std::nullopt;
1732 if (KillingDef == UseAccess || MaybeDeadAccess == UseAccess) {
1747 if (
MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
1748 if (isCompleteOverwrite(MaybeDeadLoc, MaybeDeadI, UseInst)) {
1750 if (PostOrderNumbers.
find(MaybeKillingBlock)->second <
1751 PostOrderNumbers.
find(MaybeDeadAccess->
getBlock())->second) {
1752 if (!isInvisibleToCallerAfterRet(KillingUndObj)) {
1754 <<
" ... found killing def " << *UseInst <<
"\n");
1755 KillingDefs.
insert(UseInst);
1759 <<
" ... found preceeding def " << *UseInst <<
"\n");
1760 return std::nullopt;
1763 pushMemUses(UseDef, WorkList, Visited);
1770 if (!isInvisibleToCallerAfterRet(KillingUndObj)) {
1773 KillingBlocks.
insert(KD->getParent());
1775 "Expected at least a single killing block");
1789 if (!AnyUnreachableExit)
1790 return std::nullopt;
1794 CommonPred =
nullptr;
1798 if (KillingBlocks.
count(CommonPred))
1799 return {MaybeDeadAccess};
1805 WorkList.
insert(CommonPred);
1808 if (!isa<UnreachableInst>(
R->getTerminator()))
1815 for (
unsigned I = 0;
I < WorkList.
size();
I++) {
1818 if (KillingBlocks.
count(Current))
1820 if (Current == MaybeDeadAccess->
getBlock())
1821 return std::nullopt;
1831 return std::nullopt;
1838 return {MaybeDeadAccess};
1851 while (!NowDeadInsts.
empty()) {
1861 bool IsMemDef = MA && isa<MemoryDef>(MA);
1864 auto *MD = cast<MemoryDef>(MA);
1868 if (
auto *SI = dyn_cast<StoreInst>(MD->getMemoryInst())) {
1869 if (
SI->getValueOperand()->getType()->isPointerTy()) {
1871 if (CapturedBeforeReturn.
erase(UO))
1872 ShouldIterateEndOfFunctionDSE =
true;
1873 InvisibleToCallerAfterRet.
erase(UO);
1878 Updater.removeMemoryAccess(MA);
1882 if (
I != IOLs.
end())
1883 I->second.erase(DeadInst);
1886 if (
Instruction *OpI = dyn_cast<Instruction>(O)) {
1910 const Value *KillingUndObj) {
1914 if (KillingUndObj && isInvisibleToCallerOnUnwind(KillingUndObj))
1919 return !ThrowingBlocks.
empty();
1930 if (DeadI->
mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj))
1936 if (
auto *LI = dyn_cast<LoadInst>(DeadI))
1938 if (
auto *SI = dyn_cast<StoreInst>(DeadI))
1940 if (
auto *ARMW = dyn_cast<AtomicRMWInst>(DeadI))
1942 if (
auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(DeadI))
1952 bool eliminateDeadWritesAtEndOfFunction() {
1953 bool MadeChange =
false;
1956 <<
"Trying to eliminate MemoryDefs at the end of the function\n");
1958 ShouldIterateEndOfFunctionDSE =
false;
1964 auto DefLoc = getLocForWrite(DefI);
1965 if (!DefLoc || !isRemovable(DefI)) {
1967 "instruction not removable.\n");
1977 if (!isInvisibleToCallerAfterRet(UO))
1980 if (isWriteAtEndOfFunction(Def, *DefLoc)) {
1982 LLVM_DEBUG(
dbgs() <<
" ... MemoryDef is not accessed until the end "
1983 "of the function\n");
1989 }
while (ShouldIterateEndOfFunctionDSE);
1997 MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI);
2002 if (!StoredConstant || !StoredConstant->
isNullValue())
2005 if (!isRemovable(DefI))
2009 if (
F.hasFnAttribute(Attribute::SanitizeMemory) ||
2010 F.hasFnAttribute(Attribute::SanitizeAddress) ||
2011 F.hasFnAttribute(Attribute::SanitizeHWAddress) ||
2012 F.getName() ==
"calloc")
2014 auto *
Malloc =
const_cast<CallInst *
>(dyn_cast<CallInst>(DefUO));
2017 auto *InnerCallee =
Malloc->getCalledFunction();
2022 if (!TLI.
getLibFunc(*InnerCallee, Func) || !TLI.
has(Func) ||
2023 Func != LibFunc_malloc) {
2028 if (ZeroedVariantName.
empty())
2040 auto *MallocBB =
Malloc->getParent(),
2041 *MemsetBB = Memset->getParent();
2042 if (MallocBB == MemsetBB)
2044 auto *
Ptr = Memset->getArgOperand(0);
2045 auto *TI = MallocBB->getTerminator();
2051 if (MemsetBB != FalseBB)
2062 assert(Func == LibFunc_malloc || !ZeroedVariantName.
empty());
2063 Value *Calloc =
nullptr;
2064 if (!ZeroedVariantName.
empty()) {
2068 Attrs.getFnAttr(Attribute::AllocKind).getAllocKind() |
2069 AllocFnKind::Zeroed;
2073 .removeFnAttribute(Ctx,
"alloc-variant-zeroed");
2075 ZeroedVariantName, InnerCallee->getFunctionType(), Attrs);
2078 Calloc = IRB.CreateCall(ZeroedVariant, Args, ZeroedVariantName);
2080 Type *SizeTTy =
Malloc->getArgOperand(0)->getType();
2083 IRB, TLI,
Malloc->getType()->getPointerAddressSpace());
2090 Updater.createMemoryAccessAfter(cast<Instruction>(Calloc),
nullptr,
2092 auto *NewAccessMD = cast<MemoryDef>(NewAccess);
2093 Updater.insertDef(NewAccessMD,
true);
2094 Malloc->replaceAllUsesWith(Calloc);
2101 bool dominatingConditionImpliesValue(
MemoryDef *Def) {
2102 auto *StoreI = cast<StoreInst>(
Def->getMemoryInst());
2104 Value *StorePtr = StoreI->getPointerOperand();
2105 Value *StoreVal = StoreI->getValueOperand();
2112 if (!BI || !BI->isConditional())
2118 if (BI->getSuccessor(0) == BI->getSuccessor(1))
2123 if (!
match(BI->getCondition(),
2133 if (Pred == ICmpInst::ICMP_EQ &&
2138 if (Pred == ICmpInst::ICMP_NE &&
2147 return MSSA.
dominates(ClobAcc, LoadAcc);
2155 MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI);
2156 Constant *StoredConstant =
nullptr;
2158 StoredConstant = dyn_cast<Constant>(
Store->getOperand(0));
2160 StoredConstant = dyn_cast<Constant>(MemSet->
getValue());
2164 if (!isRemovable(DefI))
2167 if (StoredConstant) {
2172 if (InitC && InitC == StoredConstant)
2180 if (dominatingConditionImpliesValue(Def))
2183 if (
auto *LoadI = dyn_cast<LoadInst>(
Store->getOperand(0))) {
2184 if (LoadI->getPointerOperand() ==
Store->getOperand(1)) {
2188 if (LoadAccess ==
Def->getDefiningAccess())
2203 for (
unsigned I = 1;
I < ToCheck.
size(); ++
I) {
2204 Current = ToCheck[
I];
2205 if (
auto PhiAccess = dyn_cast<MemoryPhi>(Current)) {
2207 for (
auto &
Use : PhiAccess->incoming_values())
2208 ToCheck.
insert(cast<MemoryAccess>(&
Use));
2214 assert(isa<MemoryDef>(Current) &&
2215 "Only MemoryDefs should reach here.");
2220 if (LoadAccess != Current)
2231 bool Changed =
false;
2232 for (
auto OI : IOL) {
2235 assert(isRemovable(DeadI) &&
"Expect only removable instruction");
2238 int64_t DeadStart = 0;
2252 bool eliminateRedundantStoresOfExistingValues() {
2253 bool MadeChange =
false;
2254 LLVM_DEBUG(
dbgs() <<
"Trying to eliminate MemoryDefs that write the "
2255 "already existing value\n");
2256 for (
auto *Def : MemDefs) {
2261 auto MaybeDefLoc = getLocForWrite(DefInst);
2262 if (!MaybeDefLoc || !isRemovable(DefInst))
2269 if (
Def->isOptimized())
2270 UpperDef = dyn_cast<MemoryDef>(
Def->getOptimized());
2272 UpperDef = dyn_cast<MemoryDef>(
Def->getDefiningAccess());
2277 auto IsRedundantStore = [&]() {
2282 if (
auto *MemSetI = dyn_cast<MemSetInst>(UpperInst)) {
2283 if (
auto *SI = dyn_cast<StoreInst>(DefInst)) {
2285 auto UpperLoc = getLocForWrite(UpperInst);
2288 int64_t InstWriteOffset = 0;
2289 int64_t DepWriteOffset = 0;
2290 auto OR = isOverwrite(UpperInst, DefInst, *UpperLoc, *MaybeDefLoc,
2291 InstWriteOffset, DepWriteOffset);
2293 return StoredByte && StoredByte == MemSetI->getOperand(1) &&
2300 if (!IsRedundantStore() || isReadClobber(*MaybeDefLoc, DefInst))
2302 LLVM_DEBUG(
dbgs() <<
"DSE: Remove No-Op Store:\n DEAD: " << *DefInst
2305 NumRedundantStores++;
2324 std::pair<bool, bool>
2325 eliminateDeadDefs(
const MemoryLocationWrapper &KillingLocWrapper);
2329 bool eliminateDeadDefs(
const MemoryDefWrapper &KillingDefWrapper);
2333bool isFuncLocalAndNotCaptured(
Value *Arg,
const CallBase *CB,
2342DSEState::getInitializesArgMemLoc(
const Instruction *
I) {
2343 const CallBase *CB = dyn_cast<CallBase>(
I);
2365 !isFuncLocalAndNotCaptured(CurArg, CB, EA))
2374 bool IsDeadOrInvisibleOnUnwind =
2376 (isa<CallInst>(CB) && isInvisibleToCallerOnUnwind(CurArg));
2377 ArgumentInitInfo InitInfo{
Idx, IsDeadOrInvisibleOnUnwind, Inits};
2378 bool FoundAliasing =
false;
2379 for (
auto &[Arg, AliasList] :
Arguments) {
2385 FoundAliasing =
true;
2386 AliasList.push_back(InitInfo);
2391 FoundAliasing =
true;
2392 AliasList.push_back(ArgumentInitInfo{
Idx, IsDeadOrInvisibleOnUnwind,
2402 auto IntersectedRanges =
2404 if (IntersectedRanges.empty())
2407 for (
const auto &Arg : Args) {
2408 for (
const auto &
Range : IntersectedRanges) {
2422std::pair<bool, bool>
2423DSEState::eliminateDeadDefs(
const MemoryLocationWrapper &KillingLocWrapper) {
2424 bool Changed =
false;
2425 bool DeletedKillingLoc =
false;
2436 [[maybe_unused]]
unsigned OrigNumSkipStores = SkipStores.
size();
2437 ToCheck.
insert(KillingLocWrapper.MemDef->getDefiningAccess());
2441 for (
unsigned I = 0;
I < ToCheck.
size();
I++) {
2443 if (
Deleted.contains(Current))
2445 std::optional<MemoryAccess *> MaybeDeadAccess = getDomMemoryDef(
2446 KillingLocWrapper.MemDef, Current, KillingLocWrapper.MemLoc,
2447 KillingLocWrapper.UnderlyingObject, ScanLimit, WalkerStepLimit,
2448 isMemTerminatorInst(KillingLocWrapper.DefInst), PartialLimit,
2449 KillingLocWrapper.DefByInitializesAttr);
2451 if (!MaybeDeadAccess) {
2456 LLVM_DEBUG(
dbgs() <<
" Checking if we can kill " << *DeadAccess);
2457 if (isa<MemoryPhi>(DeadAccess)) {
2458 LLVM_DEBUG(
dbgs() <<
"\n ... adding incoming values to worklist\n");
2459 for (
Value *V : cast<MemoryPhi>(DeadAccess)->incoming_values()) {
2467 if (PostOrderNumbers[IncomingBlock] > PostOrderNumbers[PhiBlock])
2468 ToCheck.
insert(IncomingAccess);
2479 MemoryDefWrapper DeadDefWrapper(
2480 cast<MemoryDef>(DeadAccess),
2481 getLocForInst(cast<MemoryDef>(DeadAccess)->getMemoryInst(),
2483 assert(DeadDefWrapper.DefinedLocations.size() == 1);
2484 MemoryLocationWrapper &DeadLocWrapper =
2485 DeadDefWrapper.DefinedLocations.front();
2487 ToCheck.
insert(DeadLocWrapper.MemDef->getDefiningAccess());
2488 NumGetDomMemoryDefPassed++;
2492 if (isMemTerminatorInst(KillingLocWrapper.DefInst)) {
2493 if (KillingLocWrapper.UnderlyingObject != DeadLocWrapper.UnderlyingObject)
2496 << *DeadLocWrapper.DefInst <<
"\n KILLER: "
2497 << *KillingLocWrapper.DefInst <<
'\n');
2503 int64_t KillingOffset = 0;
2504 int64_t DeadOffset = 0;
2505 OverwriteResult
OR =
2506 isOverwrite(KillingLocWrapper.DefInst, DeadLocWrapper.DefInst,
2507 KillingLocWrapper.MemLoc, DeadLocWrapper.MemLoc,
2508 KillingOffset, DeadOffset);
2509 if (OR == OW_MaybePartial) {
2510 auto &IOL = IOLs[DeadLocWrapper.DefInst->getParent()];
2512 KillingOffset, DeadOffset,
2513 DeadLocWrapper.DefInst, IOL);
2516 auto *DeadSI = dyn_cast<StoreInst>(DeadLocWrapper.DefInst);
2517 auto *KillingSI = dyn_cast<StoreInst>(KillingLocWrapper.DefInst);
2521 if (DeadSI && KillingSI && DT.
dominates(DeadSI, KillingSI)) {
2523 KillingSI, DeadSI, KillingOffset, DeadOffset,
DL, BatchAA,
2527 DeadSI->setOperand(0, Merged);
2528 ++NumModifiedStores;
2530 DeletedKillingLoc =
true;
2535 auto I = IOLs.
find(DeadSI->getParent());
2536 if (
I != IOLs.
end())
2537 I->second.erase(DeadSI);
2542 if (OR == OW_Complete) {
2544 << *DeadLocWrapper.DefInst <<
"\n KILLER: "
2545 << *KillingLocWrapper.DefInst <<
'\n');
2554 "SkipStores and Deleted out of sync?");
2556 return {Changed, DeletedKillingLoc};
2559bool DSEState::eliminateDeadDefs(
const MemoryDefWrapper &KillingDefWrapper) {
2560 if (KillingDefWrapper.DefinedLocations.empty()) {
2561 LLVM_DEBUG(
dbgs() <<
"Failed to find analyzable write location for "
2562 << *KillingDefWrapper.DefInst <<
"\n");
2566 bool MadeChange =
false;
2567 for (
auto &KillingLocWrapper : KillingDefWrapper.DefinedLocations) {
2569 << *KillingLocWrapper.MemDef <<
" ("
2570 << *KillingLocWrapper.DefInst <<
")\n");
2571 auto [Changed, DeletedKillingLoc] = eliminateDeadDefs(KillingLocWrapper);
2572 MadeChange |= Changed;
2575 if (!DeletedKillingLoc && storeIsNoop(KillingLocWrapper.MemDef,
2576 KillingLocWrapper.UnderlyingObject)) {
2578 << *KillingLocWrapper.DefInst <<
'\n');
2580 NumRedundantStores++;
2585 if (!DeletedKillingLoc &&
2586 tryFoldIntoCalloc(KillingLocWrapper.MemDef,
2587 KillingLocWrapper.UnderlyingObject)) {
2588 LLVM_DEBUG(
dbgs() <<
"DSE: Remove memset after forming calloc:\n"
2589 <<
" DEAD: " << *KillingLocWrapper.DefInst <<
'\n');
2602 bool MadeChange =
false;
2603 DSEState State(
F, AA, MSSA, DT, PDT, TLI, LI);
2605 for (
unsigned I = 0;
I < State.MemDefs.size();
I++) {
2607 if (State.SkipStores.count(KillingDef))
2610 MemoryDefWrapper KillingDefWrapper(
2611 KillingDef, State.getLocForInst(KillingDef->
getMemoryInst(),
2613 MadeChange |= State.eliminateDeadDefs(KillingDefWrapper);
2617 for (
auto &KV : State.IOLs)
2618 MadeChange |= State.removePartiallyOverlappedStores(KV.second);
2620 MadeChange |= State.eliminateRedundantStoresOfExistingValues();
2621 MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2623 while (!State.ToRemove.empty()) {
2624 Instruction *DeadInst = State.ToRemove.pop_back_val();
2643 bool Changed = eliminateDeadStores(
F, AA, MSSA, DT, PDT, TLI, LI);
2645#ifdef LLVM_ENABLE_STATS
2648 NumRemainingStores += isa<StoreInst>(&
I);
2673 if (skipFunction(
F))
2676 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2677 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2679 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
2680 MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2682 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
2683 LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2685 bool Changed = eliminateDeadStores(
F, AA, MSSA, DT, PDT, TLI, LI);
2687#ifdef LLVM_ENABLE_STATS
2690 NumRemainingStores += isa<StoreInst>(&
I);
2715char DSELegacyPass::ID = 0;
2733 return new DSELegacyPass();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static cl::opt< bool > EnableInitializesImprovement("enable-dse-initializes-attr-improvement", cl::init(true), cl::Hidden, cl::desc("Enable the initializes attr improvement in DSE"))
static void shortenAssignment(Instruction *Inst, Value *OriginalDest, uint64_t OldOffsetInBits, uint64_t OldSizeInBits, uint64_t NewSizeInBits, bool IsOverwriteEnd)
static bool isShortenableAtTheEnd(Instruction *I)
Returns true if the end of this instruction can be safely shortened in length.
static cl::opt< bool > EnablePartialStoreMerging("enable-dse-partial-store-merging", cl::init(true), cl::Hidden, cl::desc("Enable partial store merging in DSE"))
static bool tryToShortenBegin(Instruction *DeadI, OverlapIntervalsTy &IntervalMap, int64_t &DeadStart, uint64_t &DeadSize)
std::map< int64_t, int64_t > OverlapIntervalsTy
static bool isShortenableAtTheBeginning(Instruction *I)
Returns true if the beginning of this instruction can be safely shortened in length.
static cl::opt< unsigned > MemorySSADefsPerBlockLimit("dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden, cl::desc("The number of MemoryDefs we consider as candidates to eliminated " "other stores per basic block (default = 5000)"))
static Constant * tryToMergePartialOverlappingStores(StoreInst *KillingI, StoreInst *DeadI, int64_t KillingOffset, int64_t DeadOffset, const DataLayout &DL, BatchAAResults &AA, DominatorTree *DT)
static bool memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, BatchAAResults &AA, const DataLayout &DL, DominatorTree *DT)
Returns true if the memory which is accessed by the second instruction is not modified between the fi...
static OverwriteResult isMaskedStoreOverwrite(const Instruction *KillingI, const Instruction *DeadI, BatchAAResults &AA)
Check if two instruction are masked stores that completely overwrite one another.
static cl::opt< unsigned > MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5), cl::Hidden, cl::desc("The cost of a step in a different basic " "block than the killing MemoryDef" "(default = 5)"))
static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart, uint64_t &DeadSize, int64_t KillingStart, uint64_t KillingSize, bool IsOverwriteEnd)
static cl::opt< unsigned > MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden, cl::desc("The number of memory instructions to scan for " "dead store elimination (default = 150)"))
static cl::opt< unsigned > MemorySSASameBBStepCost("dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden, cl::desc("The cost of a step in the same basic block as the killing MemoryDef" "(default = 1)"))
static cl::opt< bool > EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking", cl::init(true), cl::Hidden, cl::desc("Enable partial-overwrite tracking in DSE"))
static OverwriteResult isPartialOverwrite(const MemoryLocation &KillingLoc, const MemoryLocation &DeadLoc, int64_t KillingOff, int64_t DeadOff, Instruction *DeadI, InstOverlapIntervalsTy &IOL)
Return 'OW_Complete' if a store to the 'KillingLoc' location completely overwrites a store to the 'De...
static cl::opt< unsigned > MemorySSAPartialStoreLimit("dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden, cl::desc("The maximum number candidates that only partially overwrite the " "killing MemoryDef to consider" " (default = 5)"))
static std::optional< TypeSize > getPointerSize(const Value *V, const DataLayout &DL, const TargetLibraryInfo &TLI, const Function *F)
static bool tryToShortenEnd(Instruction *DeadI, OverlapIntervalsTy &IntervalMap, int64_t &DeadStart, uint64_t &DeadSize)
static void adjustArgAttributes(AnyMemIntrinsic *Intrinsic, unsigned ArgNo, uint64_t PtrOffset)
Update the attributes given that a memory access is updated (the dereferenced pointer could be moved ...
static cl::opt< unsigned > MemorySSAUpwardsStepLimit("dse-memoryssa-walklimit", cl::init(90), cl::Hidden, cl::desc("The maximum number of steps while walking upwards to find " "MemoryDefs that may be killed (default = 90)"))
static cl::opt< bool > OptimizeMemorySSA("dse-optimize-memoryssa", cl::init(true), cl::Hidden, cl::desc("Allow DSE to optimize memory accesses."))
static cl::opt< unsigned > MemorySSAPathCheckLimit("dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden, cl::desc("The maximum number of blocks to check when trying to prove that " "all paths to an exit go through a killing block (default = 50)"))
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
static void deleteDeadInstruction(Instruction *I)
Machine Common Subexpression Elimination
This file implements a map that provides insertion order iteration.
This file provides utility analysis objects describing memory locations.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
A private abstract base class describing the concept of an individual alias analysis implementation.
Class for arbitrary precision integers.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
unsigned getBitWidth() const
Return the number of bits in the APInt.
int64_t getSExtValue() const
Get sign extended value.
The possible results of an alias query.
@ NoAlias
The two locations do not alias at all.
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
constexpr int32_t getOffset() const
constexpr bool hasOffset() const
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
An immutable pass that tracks lazily created AssumptionCache objects.
AttributeMask & addAttribute(Attribute::AttrKind Val)
Add an attribute to the mask.
static LLVM_ABI Attribute getWithAllocKind(LLVMContext &Context, AllocFnKind Kind)
LLVM_ABI ArrayRef< ConstantRange > getValueAsConstantRangeList() const
Return the attribute's value as a ConstantRange array.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Represents analyses that only rely on functions' control flow.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
LLVM_ABI bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
LLVM_ABI Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
This class represents a list of constant ranges.
bool empty() const
Return true if this list contains no members.
LLVM_ABI ConstantRangeList intersectWith(const ConstantRangeList &CRL) const
Return the range list that results from the intersection of this ConstantRangeList with another Const...
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
This is an important base class in LLVM.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static DIAssignID * getDistinct(LLVMContext &Context)
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
A parsed version of the target data layout string in and methods for querying it.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
iterator find(const_arg_type_t< KeyT > Val)
bool erase(const KeyT &Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
DomTreeNodeBase * getIDom() const
Analysis pass which computes a DominatorTree.
NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const
Find nearest common dominator basic block for basic block A and B.
iterator_range< root_iterator > roots()
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Context-sensitive CaptureAnalysis provider, which computes and caches the earliest common dominator c...
void removeInstruction(Instruction *I)
CaptureComponents getCapturesBefore(const Value *Object, const Instruction *I, bool OrAt) override
Return how Object may be captured before instruction I, considering only provenance captures.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
FunctionPass class - This class is used to implement most global optimizations.
const BasicBlock & getEntryBlock() const
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
bool isEquality() const
Return true if this predicate is either EQ or NE.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI bool mayThrow(bool IncludePhaseOneUnwind=false) const LLVM_READONLY
Return true if this instruction may throw an exception.
LLVM_ABI bool isLifetimeStartOrEnd() const LLVM_READONLY
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
bool isTerminator() const
LLVM_ABI bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
LLVM_ABI bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
const_iterator begin() const
bool empty() const
empty - Return true when no intervals are mapped.
const_iterator end() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
Analysis pass that exposes the LoopInfo for a function.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
This class implements a map that also provides access to all stored values in a deterministic order.
iterator find(const KeyT &Key)
Value * getLength() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
BasicBlock * getBlock() const
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
void setOptimized(MemoryAccess *MA)
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance.
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
MemoryLocation getWithNewPtr(const Value *NewPtr) const
const Value * Ptr
The address of the start of the location.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static LLVM_ABI std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
static LLVM_ABI MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo *TLI)
Return a location representing a particular argument of a call.
An analysis that produces MemorySSA for a function.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Legacy analysis pass which computes MemorySSA.
Encapsulates MemorySSA, including all data associated with memory accesses.
LLVM_ABI MemorySSAWalker * getSkipSelfWalker()
LLVM_ABI bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
LLVM_ABI MemorySSAWalker * getWalker()
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
PHITransAddr - An address value which tracks and handles phi translation.
LLVM_ABI Value * translateValue(BasicBlock *CurBB, BasicBlock *PredBB, const DominatorTree *DT, bool MustDominate)
translateValue - PHI translate the current address up the CFG from CurBB to Pred, updating our state ...
LLVM_ABI bool isPotentiallyPHITranslatable() const
isPotentiallyPHITranslatable - If this needs PHI translation, return true if we have some hope of doi...
bool needsPHITranslationFromBlock(BasicBlock *BB) const
needsPHITranslationFromBlock - Return true if moving from the specified BasicBlock to its predecessor...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
LLVM_ABI bool dominates(const Instruction *I1, const Instruction *I2) const
Return true if I1 dominates I2.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
void insert_range(Range &&R)
bool insert(const value_type &X)
Insert a new element into the SetVector.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Value * getValueOperand()
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
bool isVoidTy() const
Return true if this is 'void'.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< use_iterator > uses()
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
OneOps_match< OpTy, Instruction::Load > m_Load(const OpTy &Op)
Matches LoadInst.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Return a range of dbg_assign records for which Inst performs the assignment they encode.
LLVM_ABI bool calculateFragmentIntersect(const DataLayout &DL, const Value *Dest, uint64_t SliceOffsetInBits, uint64_t SliceSizeInBits, const DbgVariableRecord *DVRAssign, std::optional< DIExpression::FragmentInfo > &Result)
Calculate the fragment of the variable in DAI covered from (Dest + SliceOffsetInBits) to to (Dest + S...
initializer< Ty > init(const Ty &Val)
NodeAddr< DefNode * > Def
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI FunctionPass * createDeadStoreEliminationPass()
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI void initializeDSELegacyPassPass(PassRegistry &)
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool isStrongerThanMonotonic(AtomicOrdering AO)
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
iterator_range< po_iterator< T > > post_order(const T &G)
LLVM_ABI bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
auto reverse(ContainerTy &&C)
bool isModSet(const ModRefInfo MRI)
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI bool AreStatisticsEnabled()
Check if statistics are enabled.
LLVM_ABI bool isNotVisibleOnUnwind(const Value *Object, bool &RequiresNoCaptureBeforeUnwind)
Return true if Object memory is not visible after an unwind, in the sense that program semantics cann...
LLVM_ABI Value * emitCalloc(Value *Num, Value *Size, IRBuilderBase &B, const TargetLibraryInfo &TLI, unsigned AddrSpace)
Emit a call to the calloc function.
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
LLVM_ABI bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
LLVM_ABI Value * isBytewiseValue(Value *V, const DataLayout &DL)
If the specified value can be set by repeating the same byte in memory, return the i8 value that it i...
auto predecessors(const MachineBasicBlock *BB)
bool capturesAnything(CaptureComponents CC)
LLVM_ABI bool mayContainIrreducibleControl(const Function &F, const LoopInfo *LI)
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
bool capturesNothing(CaptureComponents CC)
LLVM_ABI bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
bool isStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
Returns true if ao is stronger than other as defined by the AtomicOrdering lattice,...
bool isRefSet(const ModRefInfo MRI)
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
A MapVector that performs no allocations if smaller than a certain size.