86using namespace PatternMatch;
88#define DEBUG_TYPE "gvn"
90STATISTIC(NumGVNInstr,
"Number of instructions deleted");
92STATISTIC(NumGVNPRE,
"Number of instructions PRE'd");
94STATISTIC(NumGVNSimpl,
"Number of instructions simplified");
95STATISTIC(NumGVNEqProp,
"Number of equalities propagated");
97STATISTIC(NumPRELoopLoad,
"Number of loop loads PRE'd");
99 "Number of loads moved to predecessor of a critical edge in PRE");
101STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
102 "Number of blocks speculated as available in "
103 "IsValueFullyAvailableInBlock(), max");
105 "Number of times we we reached gvn-max-block-speculations cut-off "
106 "preventing further exploration");
121 cl::desc(
"Max number of dependences to attempt Load PRE (default = 100)"));
126 cl::desc(
"Max number of blocks we're willing to speculate on (and recurse "
127 "into) when deducing if a value is fully available or not in GVN "
132 cl::desc(
"Max number of visited instructions when trying to find "
133 "dominating value of select dependency (default = 100)"));
137 cl::desc(
"Max number of instructions to scan in each basic block in GVN "
274 return cast<LoadInst>(
Val);
279 return cast<MemIntrinsic>(
Val);
284 return cast<SelectInst>(
Val);
305 Res.
AV = std::move(
AV);
336 e.type =
I->getType();
337 e.opcode =
I->getOpcode();
342 e.varargs.push_back(
lookupOrAdd(GCR->getOperand(0)));
343 e.varargs.push_back(
lookupOrAdd(GCR->getBasePtr()));
344 e.varargs.push_back(
lookupOrAdd(GCR->getDerivedPtr()));
346 for (
Use &
Op :
I->operands())
349 if (
I->isCommutative()) {
354 assert(
I->getNumOperands() >= 2 &&
"Unsupported commutative instruction!");
355 if (
e.varargs[0] >
e.varargs[1])
357 e.commutative =
true;
360 if (
auto *
C = dyn_cast<CmpInst>(
I)) {
363 if (
e.varargs[0] >
e.varargs[1]) {
368 e.commutative =
true;
369 }
else if (
auto *E = dyn_cast<InsertValueInst>(
I)) {
370 e.varargs.append(E->idx_begin(), E->idx_end());
371 }
else if (
auto *SVI = dyn_cast<ShuffleVectorInst>(
I)) {
373 e.varargs.append(ShuffleMask.
begin(), ShuffleMask.
end());
374 }
else if (
auto *CB = dyn_cast<CallBase>(
I)) {
375 e.attrs = CB->getAttributes();
383 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
384 "Not a comparison!");
387 e.varargs.push_back(lookupOrAdd(LHS));
388 e.varargs.push_back(lookupOrAdd(RHS));
391 if (
e.varargs[0] >
e.varargs[1]) {
396 e.commutative =
true;
402 assert(EI &&
"Not an ExtractValueInst?");
413 e.varargs.push_back(lookupOrAdd(WO->
getLHS()));
414 e.varargs.push_back(lookupOrAdd(WO->
getRHS()));
422 e.varargs.push_back(lookupOrAdd(
Op));
431 Type *PtrTy =
GEP->getType()->getScalarType();
433 unsigned BitWidth =
DL.getIndexTypeSizeInBits(PtrTy);
436 if (
GEP->collectOffset(
DL,
BitWidth, VariableOffsets, ConstantOffset)) {
440 E.opcode =
GEP->getOpcode();
442 E.varargs.push_back(lookupOrAdd(
GEP->getPointerOperand()));
443 for (
const auto &Pair : VariableOffsets) {
444 E.varargs.push_back(lookupOrAdd(Pair.first));
445 E.varargs.push_back(lookupOrAdd(ConstantInt::get(Context, Pair.second)));
447 if (!ConstantOffset.isZero())
449 lookupOrAdd(ConstantInt::get(Context, ConstantOffset)));
453 E.opcode =
GEP->getOpcode();
454 E.type =
GEP->getSourceElementType();
456 E.varargs.push_back(lookupOrAdd(
Op));
465GVNPass::ValueTable::ValueTable() =
default;
466GVNPass::ValueTable::ValueTable(
const ValueTable &) =
default;
467GVNPass::ValueTable::ValueTable(
ValueTable &&) =
default;
468GVNPass::ValueTable::~ValueTable() =
default;
474 valueNumbering.
insert(std::make_pair(V, num));
475 if (
PHINode *PN = dyn_cast<PHINode>(V))
476 NumberingPhi[num] = PN;
487 if (
C->getFunction()->isPresplitCoroutine()) {
488 valueNumbering[
C] = nextValueNumber;
489 return nextValueNumber++;
495 if (
C->isConvergent()) {
496 valueNumbering[
C] = nextValueNumber;
497 return nextValueNumber++;
500 if (AA->doesNotAccessMemory(
C)) {
502 uint32_t e = assignExpNewValueNum(exp).first;
503 valueNumbering[
C] = e;
507 if (MD && AA->onlyReadsMemory(
C)) {
509 auto ValNum = assignExpNewValueNum(exp);
511 valueNumbering[
C] = ValNum.first;
518 valueNumbering[
C] = nextValueNumber;
519 return nextValueNumber++;
522 if (local_dep.
isDef()) {
527 if (!local_cdep || local_cdep->
arg_size() !=
C->arg_size()) {
528 valueNumbering[
C] = nextValueNumber;
529 return nextValueNumber++;
532 for (
unsigned i = 0, e =
C->arg_size(); i < e; ++i) {
533 uint32_t c_vn = lookupOrAdd(
C->getArgOperand(i));
536 valueNumbering[
C] = nextValueNumber;
537 return nextValueNumber++;
542 valueNumbering[
C] =
v;
555 if (
I.getResult().isNonLocal())
560 if (!
I.getResult().isDef() || cdep !=
nullptr) {
565 CallInst *NonLocalDepCall = dyn_cast<CallInst>(
I.getResult().getInst());
568 cdep = NonLocalDepCall;
577 valueNumbering[
C] = nextValueNumber;
578 return nextValueNumber++;
582 valueNumbering[
C] = nextValueNumber;
583 return nextValueNumber++;
585 for (
unsigned i = 0, e =
C->arg_size(); i < e; ++i) {
586 uint32_t c_vn = lookupOrAdd(
C->getArgOperand(i));
589 valueNumbering[
C] = nextValueNumber;
590 return nextValueNumber++;
595 valueNumbering[
C] =
v;
599 valueNumbering[
C] = nextValueNumber;
600 return nextValueNumber++;
604bool GVNPass::ValueTable::exists(
Value *V)
const {
605 return valueNumbering.contains(V);
612 if (VI != valueNumbering.end())
615 auto *
I = dyn_cast<Instruction>(V);
617 valueNumbering[V] = nextValueNumber;
618 return nextValueNumber++;
622 switch (
I->getOpcode()) {
623 case Instruction::Call:
624 return lookupOrAddCall(cast<CallInst>(
I));
625 case Instruction::FNeg:
626 case Instruction::Add:
627 case Instruction::FAdd:
628 case Instruction::Sub:
629 case Instruction::FSub:
630 case Instruction::Mul:
631 case Instruction::FMul:
632 case Instruction::UDiv:
633 case Instruction::SDiv:
634 case Instruction::FDiv:
635 case Instruction::URem:
636 case Instruction::SRem:
637 case Instruction::FRem:
638 case Instruction::Shl:
639 case Instruction::LShr:
640 case Instruction::AShr:
641 case Instruction::And:
642 case Instruction::Or:
643 case Instruction::Xor:
644 case Instruction::ICmp:
645 case Instruction::FCmp:
646 case Instruction::Trunc:
647 case Instruction::ZExt:
648 case Instruction::SExt:
649 case Instruction::FPToUI:
650 case Instruction::FPToSI:
651 case Instruction::UIToFP:
652 case Instruction::SIToFP:
653 case Instruction::FPTrunc:
654 case Instruction::FPExt:
655 case Instruction::PtrToInt:
656 case Instruction::IntToPtr:
657 case Instruction::AddrSpaceCast:
658 case Instruction::BitCast:
659 case Instruction::Select:
660 case Instruction::Freeze:
661 case Instruction::ExtractElement:
662 case Instruction::InsertElement:
663 case Instruction::ShuffleVector:
664 case Instruction::InsertValue:
667 case Instruction::GetElementPtr:
668 exp = createGEPExpr(cast<GetElementPtrInst>(
I));
670 case Instruction::ExtractValue:
671 exp = createExtractvalueExpr(cast<ExtractValueInst>(
I));
673 case Instruction::PHI:
674 valueNumbering[V] = nextValueNumber;
675 NumberingPhi[nextValueNumber] = cast<PHINode>(V);
676 return nextValueNumber++;
678 valueNumbering[V] = nextValueNumber;
679 return nextValueNumber++;
682 uint32_t e = assignExpNewValueNum(exp).first;
683 valueNumbering[V] = e;
692 assert(VI != valueNumbering.end() &&
"Value not numbered?");
695 return (VI != valueNumbering.end()) ? VI->second : 0;
702uint32_t GVNPass::ValueTable::lookupOrAddCmp(
unsigned Opcode,
706 return assignExpNewValueNum(exp).first;
711 valueNumbering.clear();
712 expressionNumbering.clear();
713 NumberingPhi.clear();
714 PhiTranslateTable.clear();
723 uint32_t Num = valueNumbering.lookup(V);
724 valueNumbering.erase(V);
727 NumberingPhi.erase(Num);
732void GVNPass::ValueTable::verifyRemoved(
const Value *V)
const {
733 assert(!valueNumbering.contains(V) &&
734 "Inst still occurs in value numbering map!");
743 LeaderListNode &Curr = NumToLeaders[
N];
744 if (!Curr.Entry.Val) {
750 LeaderListNode *Node = TableAllocator.Allocate<LeaderListNode>();
753 Node->Next = Curr.Next;
761 LeaderListNode *Prev =
nullptr;
762 LeaderListNode *Curr = &NumToLeaders[
N];
764 while (Curr && (Curr->Entry.Val !=
I || Curr->Entry.BB != BB)) {
773 Prev->Next = Curr->Next;
776 Curr->Entry.Val =
nullptr;
777 Curr->Entry.BB =
nullptr;
779 LeaderListNode *Next = Curr->Next;
780 Curr->Entry.Val = Next->Entry.Val;
781 Curr->Entry.BB = Next->Entry.BB;
782 Curr->Next = Next->Next;
787void GVNPass::LeaderMap::verifyRemoved(
const Value *V)
const {
790 for (
const auto &
I : NumToLeaders) {
792 assert(
I.second.Entry.Val != V &&
"Inst still in value numbering scope!");
794 std::none_of(leader_iterator(&
I.second), leader_iterator(
nullptr),
795 [=](
const LeaderTableEntry &E) {
return E.Val ==
V; }) &&
796 "Inst still in value numbering scope!");
844 "On-demand computation of MemSSA implies that MemDep is disabled!");
848 bool Changed = runImpl(
F, AC, DT, TLI, AA, MemDep, LI, &ORE,
849 MSSA ? &MSSA->getMSSA() :
nullptr);
864 OS, MapClassName2PassName);
867 if (Options.
AllowPRE != std::nullopt)
868 OS << (*Options.
AllowPRE ?
"" :
"no-") <<
"pre;";
873 <<
"split-backedge-load-pre;";
881#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
885 errs() <<
I.first <<
"\n";
916 std::optional<BasicBlock *> UnavailableBB;
920 unsigned NumNewNewSpeculativelyAvailableBBs = 0;
928 while (!Worklist.
empty()) {
932 std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator,
bool>
IV =
934 CurrBB, AvailabilityState::SpeculativelyAvailable);
939 if (State == AvailabilityState::Unavailable) {
940 UnavailableBB = CurrBB;
951 ++NumNewNewSpeculativelyAvailableBBs;
957 MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
958 State = AvailabilityState::Unavailable;
959 UnavailableBB = CurrBB;
965 NewSpeculativelyAvailableBBs.
insert(CurrBB);
972 IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
973 NumNewNewSpeculativelyAvailableBBs);
978 auto MarkAsFixpointAndEnqueueSuccessors =
980 auto It = FullyAvailableBlocks.
find(BB);
981 if (It == FullyAvailableBlocks.
end())
984 case AvailabilityState::Unavailable:
985 case AvailabilityState::Available:
987 case AvailabilityState::SpeculativelyAvailable:
988 State = FixpointState;
991 "Found a speculatively available successor leftover?");
1006 while (!Worklist.
empty())
1007 MarkAsFixpointAndEnqueueSuccessors(Worklist.
pop_back_val(),
1008 AvailabilityState::Unavailable);
1015 while (!Worklist.
empty())
1016 MarkAsFixpointAndEnqueueSuccessors(Worklist.
pop_back_val(),
1017 AvailabilityState::Available);
1020 "Must have fixed all the new speculatively available blocks.");
1023 return !UnavailableBB;
1032 if (V.AV.Val == OldValue)
1033 V.AV.Val = NewValue;
1034 if (V.AV.isSelectValue()) {
1035 if (V.AV.V1 == OldValue)
1037 if (V.AV.V2 == OldValue)
1052 if (ValuesPerBlock.
size() == 1 &&
1054 Load->getParent())) {
1055 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
1056 "Dead BB dominate this block");
1057 return ValuesPerBlock[0].MaterializeAdjustedValue(Load, gvn);
1063 SSAUpdate.
Initialize(Load->getType(), Load->getName());
1068 if (AV.AV.isUndefValue())
1078 if (BB == Load->getParent() &&
1079 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
1080 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
1094 Type *LoadTy = Load->getType();
1096 if (isSimpleValue()) {
1097 Res = getSimpleValue();
1098 if (Res->
getType() != LoadTy) {
1102 <<
" " << *getSimpleValue() <<
'\n'
1106 }
else if (isCoercedLoadValue()) {
1107 LoadInst *CoercedLoad = getCoercedLoadValue();
1113 Load->getFunction());
1123 if (!CoercedLoad->
hasMetadata(LLVMContext::MD_noundef))
1125 {LLVMContext::MD_dereferenceable,
1126 LLVMContext::MD_dereferenceable_or_null,
1127 LLVMContext::MD_invariant_load, LLVMContext::MD_invariant_group});
1129 <<
" " << *getCoercedLoadValue() <<
'\n'
1133 }
else if (isMemIntrinValue()) {
1137 <<
" " << *getMemIntrinValue() <<
'\n'
1140 }
else if (isSelectValue()) {
1143 assert(V1 && V2 &&
"both value operands of the select must be present");
1148 cast<SelectInst>(Res)->setDebugLoc(Load->getDebugLoc());
1152 assert(Res &&
"failed to materialize?");
1158 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1178 using namespace ore;
1183 R <<
"load of type " << NV(
"Type", Load->getType()) <<
" not eliminated"
1186 for (
auto *U : Load->getPointerOperand()->users()) {
1187 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1188 auto *
I = cast<Instruction>(U);
1189 if (
I->getFunction() == Load->getFunction() && DT->
dominates(
I, Load)) {
1205 for (
auto *U : Load->getPointerOperand()->users()) {
1206 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1207 auto *
I = cast<Instruction>(U);
1208 if (
I->getFunction() == Load->getFunction() &&
1216 OtherAccess =
nullptr;
1228 R <<
" in favor of " << NV(
"OtherAccess", OtherAccess);
1230 R <<
" because it is clobbered by " << NV(
"ClobberedBy", DepInfo.
getInst());
1250 if (
auto *LI = dyn_cast<LoadInst>(Inst))
1251 if (LI->getPointerOperand() == Loc.
Ptr && LI->getType() == LoadTy)
1257std::optional<AvailableValue>
1260 assert(
Load->isUnordered() &&
"rules below are incorrect for ordered access");
1270 if (
StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1272 if (
Address &&
Load->isAtomic() <= DepSI->isAtomic()) {
1284 if (
LoadInst *DepLoad = dyn_cast<LoadInst>(DepInst)) {
1288 if (DepLoad != Load &&
Address &&
1289 Load->isAtomic() <= DepLoad->isAtomic()) {
1296 DepLoad->getFunction())) {
1299 Offset = (ClobberOff == std::nullopt || *ClobberOff < 0)
1313 if (
MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1326 dbgs() <<
" is clobbered by " << *DepInst <<
'\n';);
1330 return std::nullopt;
1343 if (
StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1349 return std::nullopt;
1352 if (S->isAtomic() <
Load->isAtomic())
1353 return std::nullopt;
1358 if (
LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1364 return std::nullopt;
1367 if (
LD->isAtomic() <
Load->isAtomic())
1368 return std::nullopt;
1376 if (
auto *Sel = dyn_cast<SelectInst>(DepInst)) {
1377 assert(Sel->getType() ==
Load->getPointerOperandType());
1383 return std::nullopt;
1388 return std::nullopt;
1396 dbgs() <<
" has unknown def " << *DepInst <<
'\n';);
1397 return std::nullopt;
1400void GVNPass::AnalyzeLoadAvailability(
LoadInst *Load, LoadDepVect &Deps,
1401 AvailValInBlkVect &ValuesPerBlock,
1402 UnavailBlkVect &UnavailableBlocks) {
1407 for (
const auto &Dep : Deps) {
1411 if (DeadBlocks.count(DepBB)) {
1419 UnavailableBlocks.push_back(DepBB);
1426 if (
auto AV = AnalyzeLoadAvailability(Load, DepInfo, Dep.getAddress())) {
1430 ValuesPerBlock.push_back(
1433 UnavailableBlocks.push_back(DepBB);
1437 assert(Deps.size() == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1438 "post condition violation");
1464 if (
Term->getNumSuccessors() != 2 ||
Term->isSpecialTerminator())
1466 auto *SuccBB =
Term->getSuccessor(0);
1467 if (SuccBB == LoadBB)
1468 SuccBB =
Term->getSuccessor(1);
1469 if (!SuccBB->getSinglePredecessor())
1474 if (Inst.isDebugOrPseudoInst())
1476 if (--NumInsts == 0)
1479 if (!Inst.isIdenticalTo(Load))
1488 return cast<LoadInst>(&Inst);
1498void GVNPass::eliminatePartiallyRedundantLoad(
1499 LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1502 for (
const auto &AvailableLoad : AvailableLoads) {
1503 BasicBlock *UnavailableBlock = AvailableLoad.first;
1504 Value *LoadPtr = AvailableLoad.second;
1507 Load->getType(), LoadPtr,
Load->getName() +
".pre",
Load->isVolatile(),
1508 Load->getAlign(),
Load->getOrdering(),
Load->getSyncScopeID(),
1510 NewLoad->setDebugLoc(
Load->getDebugLoc());
1514 if (
auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
1517 MSSAU->
insertUse(cast<MemoryUse>(NewAccess),
true);
1523 NewLoad->setAAMetadata(Tags);
1525 if (
auto *MD =
Load->getMetadata(LLVMContext::MD_invariant_load))
1526 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1527 if (
auto *InvGroupMD =
Load->getMetadata(LLVMContext::MD_invariant_group))
1528 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1529 if (
auto *RangeMD =
Load->getMetadata(LLVMContext::MD_range))
1530 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1531 if (
auto *AccessMD =
Load->getMetadata(LLVMContext::MD_access_group))
1533 NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1542 ValuesPerBlock.push_back(
1549 if (CriticalEdgePredAndLoad) {
1550 auto I = CriticalEdgePredAndLoad->
find(UnavailableBlock);
1551 if (
I != CriticalEdgePredAndLoad->
end()) {
1552 ++NumPRELoadMoved2CEPred;
1559 LeaderTable.erase(ValNo, OldLoad, OldLoad->
getParent());
1561 removeInstruction(OldLoad);
1570 Load->replaceAllUsesWith(V);
1571 if (isa<PHINode>(V))
1574 I->setDebugLoc(
Load->getDebugLoc());
1575 if (
V->getType()->isPtrOrPtrVectorTy())
1580 <<
"load eliminated by PRE";
1584bool GVNPass::PerformLoadPRE(
LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1585 UnavailBlkVect &UnavailableBlocks) {
1595 UnavailableBlocks.end());
1617 bool MustEnsureSafetyOfSpeculativeExecution =
1622 if (TmpBB == LoadBB)
1624 if (Blockers.count(TmpBB))
1636 MustEnsureSafetyOfSpeculativeExecution =
1637 MustEnsureSafetyOfSpeculativeExecution || ICF->
hasICF(TmpBB);
1648 FullyAvailableBlocks[AV.BB] = AvailabilityState::Available;
1649 for (
BasicBlock *UnavailableBB : UnavailableBlocks)
1650 FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable;
1663 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1664 << Pred->
getName() <<
"': " << *Load <<
'\n');
1675 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1676 << Pred->
getName() <<
"': " << *Load <<
'\n');
1682 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1683 << Pred->
getName() <<
"': " << *Load <<
'\n');
1692 <<
"COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1693 << Pred->
getName() <<
"': " << *Load <<
'\n');
1697 if (
LoadInst *LI = findLoadToHoistIntoPred(Pred, LoadBB, Load))
1698 CriticalEdgePredAndLoad[Pred] = LI;
1703 PredLoads[Pred] =
nullptr;
1708 unsigned NumInsertPreds = PredLoads.
size() + CriticalEdgePredSplit.
size();
1709 unsigned NumUnavailablePreds = NumInsertPreds +
1710 CriticalEdgePredAndLoad.
size();
1711 assert(NumUnavailablePreds != 0 &&
1712 "Fully available value should already be eliminated!");
1713 (void)NumUnavailablePreds;
1719 if (NumInsertPreds > 1)
1724 if (MustEnsureSafetyOfSpeculativeExecution) {
1725 if (CriticalEdgePredSplit.
size())
1729 for (
auto &PL : PredLoads)
1733 for (
auto &CEP : CriticalEdgePredAndLoad)
1740 for (
BasicBlock *OrigPred : CriticalEdgePredSplit) {
1741 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1742 assert(!PredLoads.count(OrigPred) &&
"Split edges shouldn't be in map!");
1743 PredLoads[NewPred] =
nullptr;
1744 LLVM_DEBUG(
dbgs() <<
"Split critical edge " << OrigPred->getName() <<
"->"
1745 << LoadBB->
getName() <<
'\n');
1748 for (
auto &CEP : CriticalEdgePredAndLoad)
1749 PredLoads[CEP.first] =
nullptr;
1752 bool CanDoPRE =
true;
1755 for (
auto &PredLoad : PredLoads) {
1756 BasicBlock *UnavailablePred = PredLoad.first;
1766 Value *LoadPtr =
Load->getPointerOperand();
1768 while (Cur != LoadBB) {
1781 LoadPtr =
Address.translateWithInsertion(LoadBB, UnavailablePred, *DT,
1788 << *
Load->getPointerOperand() <<
"\n");
1793 PredLoad.second = LoadPtr;
1797 while (!NewInsts.
empty()) {
1807 return !CriticalEdgePredSplit.empty();
1813 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING PRE LOAD: " << *Load <<
'\n');
1815 <<
" INSTS: " << *NewInsts.
back()
1823 I->updateLocationAfterHoist();
1832 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads,
1833 &CriticalEdgePredAndLoad);
1838bool GVNPass::performLoopLoadPRE(
LoadInst *Load,
1839 AvailValInBlkVect &ValuesPerBlock,
1840 UnavailBlkVect &UnavailableBlocks) {
1843 if (!L ||
L->getHeader() !=
Load->getParent())
1848 if (!Preheader || !Latch)
1851 Value *LoadPtr =
Load->getPointerOperand();
1853 if (!
L->isLoopInvariant(LoadPtr))
1863 for (
auto *Blocker : UnavailableBlocks) {
1865 if (!
L->contains(Blocker))
1889 if (Blocker->getTerminator()->mayWriteToMemory())
1892 LoopBlock = Blocker;
1905 AvailableLoads[LoopBlock] = LoadPtr;
1906 AvailableLoads[Preheader] = LoadPtr;
1908 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING PRE LOOP LOAD: " << *Load <<
'\n');
1909 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads,
1917 using namespace ore;
1921 <<
"load of type " << NV(
"Type", Load->getType()) <<
" eliminated"
1922 << setExtraArgs() <<
" in favor of "
1929bool GVNPass::processNonLocalLoad(
LoadInst *Load) {
1931 if (
Load->getParent()->getParent()->hasFnAttribute(
1932 Attribute::SanitizeAddress) ||
1933 Load->getParent()->getParent()->hasFnAttribute(
1934 Attribute::SanitizeHWAddress))
1944 unsigned NumDeps = Deps.size();
1951 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1953 dbgs() <<
" has unknown dependencies\n";);
1957 bool Changed =
false;
1960 dyn_cast<GetElementPtrInst>(
Load->getOperand(0))) {
1961 for (
Use &U :
GEP->indices())
1963 Changed |= performScalarPRE(
I);
1967 AvailValInBlkVect ValuesPerBlock;
1968 UnavailBlkVect UnavailableBlocks;
1969 AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
1973 if (ValuesPerBlock.empty())
1981 if (UnavailableBlocks.empty()) {
1982 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING NONLOCAL LOAD: " << *Load <<
'\n');
1988 Load->replaceAllUsesWith(V);
1990 if (isa<PHINode>(V))
1996 if (
Load->getDebugLoc() &&
Load->getParent() ==
I->getParent())
1997 I->setDebugLoc(
Load->getDebugLoc());
1998 if (
V->getType()->isPtrOrPtrVectorTy())
2012 if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
2013 PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
2021 auto *I = dyn_cast<Instruction>(U);
2022 return I && I->getParent() == BB;
2026bool GVNPass::processAssumeIntrinsic(
AssumeInst *IntrinsicI) {
2030 if (
Cond->isZero()) {
2049 for (
const auto &Acc : *AL) {
2050 if (
auto *Current = dyn_cast<MemoryUseOrDef>(&Acc))
2051 if (!Current->getMemoryInst()->comesBefore(NewS)) {
2052 FirstNonDom = Current;
2066 MSSAU->
insertDef(cast<MemoryDef>(NewDef),
false);
2076 if (isa<Constant>(V)) {
2084 bool Changed =
false;
2091 Changed |= propagateEquality(V, True, Edge,
false);
2097 ReplaceOperandsWithMap[
V] = True;
2119 if (
auto *CmpI = dyn_cast<CmpInst>(V)) {
2120 if (CmpI->isEquivalence()) {
2121 Value *CmpLHS = CmpI->getOperand(0);
2122 Value *CmpRHS = CmpI->getOperand(1);
2128 if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
2130 if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
2132 if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
2133 (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
2144 if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
2148 << *CmpLHS <<
" with "
2149 << *CmpRHS <<
" in block "
2150 << IntrinsicI->
getParent()->getName() <<
"\n");
2155 ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
2168 I->replaceAllUsesWith(Repl);
2173bool GVNPass::processLoad(
LoadInst *L) {
2178 if (!
L->isUnordered())
2181 if (
L->use_empty()) {
2191 return processNonLocalLoad(L);
2198 dbgs() <<
"GVN: load ";
L->printAsOperand(
dbgs());
2199 dbgs() <<
" has unknown dependence\n";);
2203 auto AV = AnalyzeLoadAvailability(L, Dep,
L->getPointerOperand());
2226std::pair<uint32_t, bool>
2227GVNPass::ValueTable::assignExpNewValueNum(
Expression &Exp) {
2229 bool CreateNewValNum = !
e;
2230 if (CreateNewValNum) {
2231 Expressions.push_back(Exp);
2232 if (ExprIdx.size() < nextValueNumber + 1)
2233 ExprIdx.resize(nextValueNumber * 2);
2234 e = nextValueNumber;
2235 ExprIdx[nextValueNumber++] = nextExprNumber++;
2237 return {
e, CreateNewValNum};
2245 Gvn.LeaderTable.getLeaders(Num),
2246 [=](
const LeaderMap::LeaderTableEntry &L) { return L.BB == BB; });
2253 auto FindRes = PhiTranslateTable.find({Num, Pred});
2254 if (FindRes != PhiTranslateTable.end())
2255 return FindRes->second;
2256 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
2257 PhiTranslateTable.insert({{Num, Pred}, NewNum});
2268 auto Leaders = Gvn.LeaderTable.getLeaders(Num);
2269 for (
const auto &Entry : Leaders) {
2270 Call = dyn_cast<CallInst>(Entry.Val);
2271 if (Call && Call->getParent() == PhiBlock)
2275 if (AA->doesNotAccessMemory(Call))
2278 if (!MD || !AA->onlyReadsMemory(Call))
2290 if (
D.getResult().isNonFuncLocal())
2301 if (
PHINode *PN = NumberingPhi[Num]) {
2302 for (
unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
2303 if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
2313 if (!areAllValsInBB(Num, PhiBlock, Gvn))
2316 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2320 for (
unsigned i = 0; i <
Exp.varargs.size(); i++) {
2324 if ((i > 1 &&
Exp.opcode == Instruction::InsertValue) ||
2325 (i > 0 &&
Exp.opcode == Instruction::ExtractValue) ||
2326 (i > 1 &&
Exp.opcode == Instruction::ShuffleVector))
2328 Exp.varargs[i] = phiTranslate(Pred, PhiBlock,
Exp.varargs[i], Gvn);
2331 if (
Exp.commutative) {
2332 assert(
Exp.varargs.size() >= 2 &&
"Unsupported commutative instruction!");
2333 if (
Exp.varargs[0] >
Exp.varargs[1]) {
2336 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2337 Exp.opcode = (Opcode << 8) |
2343 if (
uint32_t NewNum = expressionNumbering[Exp]) {
2344 if (
Exp.opcode == Instruction::Call && NewNum != Num)
2345 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
2353void GVNPass::ValueTable::eraseTranslateCacheEntry(
2356 PhiTranslateTable.erase({Num, Pred});
2365 auto Leaders = LeaderTable.getLeaders(num);
2366 if (Leaders.empty())
2369 Value *Val =
nullptr;
2370 for (
const auto &Entry : Leaders) {
2373 if (isa<Constant>(Val))
2393 "No edge between these basic blocks!");
2394 return Pred !=
nullptr;
2397void GVNPass::assignBlockRPONumber(
Function &
F) {
2398 BlockRPONumber.clear();
2402 BlockRPONumber[BB] = NextBlockNumber++;
2403 InvalidBlockRPONumbers =
false;
2406bool GVNPass::replaceOperandsForInBlockEquality(
Instruction *Instr)
const {
2407 bool Changed =
false;
2408 for (
unsigned OpNum = 0; OpNum <
Instr->getNumOperands(); ++OpNum) {
2410 auto it = ReplaceOperandsWithMap.find(Operand);
2411 if (it != ReplaceOperandsWithMap.end()) {
2413 << *it->second <<
" in instruction " << *Instr <<
'\n');
2414 Instr->setOperand(OpNum, it->second);
2426bool GVNPass::propagateEquality(
Value *LHS,
Value *RHS,
2428 bool DominatesByEdge) {
2430 Worklist.
push_back(std::make_pair(LHS, RHS));
2431 bool Changed =
false;
2436 while (!Worklist.
empty()) {
2437 std::pair<Value*, Value*> Item = Worklist.
pop_back_val();
2438 LHS = Item.first;
RHS = Item.second;
2445 if (isa<Constant>(LHS) && isa<Constant>(RHS))
2449 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2451 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) &&
"Unexpected value!");
2455 : cast<Instruction>(LHS)->getDataLayout();
2462 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2463 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2482 if (RootDominatesEnd && !isa<Instruction>(RHS) &&
2484 LeaderTable.insert(LVN, RHS, Root.
getEnd());
2491 auto canReplacePointersCallBack = [&
DL](
const Use &
U,
const Value *To) {
2494 unsigned NumReplacements =
2497 canReplacePointersCallBack)
2499 canReplacePointersCallBack);
2501 if (NumReplacements > 0) {
2503 NumGVNEqProp += NumReplacements;
2523 bool isKnownFalse = !isKnownTrue;
2538 if (
CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2539 Value *Op0 =
Cmp->getOperand(0), *Op1 =
Cmp->getOperand(1);
2544 if (
Cmp->isEquivalence(isKnownFalse))
2545 Worklist.
push_back(std::make_pair(Op0, Op1));
2549 Constant *NotVal = ConstantInt::get(
Cmp->getType(), isKnownFalse);
2557 if (Num < NextNum) {
2559 if (NotCmp && isa<Instruction>(NotCmp)) {
2560 unsigned NumReplacements =
2565 Changed |= NumReplacements > 0;
2566 NumGVNEqProp += NumReplacements;
2576 if (RootDominatesEnd)
2577 LeaderTable.insert(Num, NotVal, Root.
getEnd());
2590 if (isa<DbgInfoIntrinsic>(
I))
2599 bool Changed =
false;
2600 if (!
I->use_empty()) {
2604 I->replaceAllUsesWith(V);
2612 if (MD &&
V->getType()->isPtrOrPtrVectorTy())
2619 if (
auto *Assume = dyn_cast<AssumeInst>(
I))
2620 return processAssumeIntrinsic(Assume);
2622 if (
LoadInst *Load = dyn_cast<LoadInst>(
I)) {
2623 if (processLoad(Load))
2627 LeaderTable.insert(Num, Load,
Load->getParent());
2634 if (!BI->isConditional())
2637 if (isa<Constant>(BI->getCondition()))
2638 return processFoldableCondBr(BI);
2640 Value *BranchCond = BI->getCondition();
2644 if (TrueSucc == FalseSucc)
2648 bool Changed =
false;
2652 Changed |= propagateEquality(BranchCond, TrueVal, TrueE,
true);
2656 Changed |= propagateEquality(BranchCond, FalseVal, FalseE,
true);
2663 Value *SwitchCond =
SI->getCondition();
2665 bool Changed =
false;
2670 ++SwitchEdges[Succ];
2676 if (SwitchEdges.
lookup(Dst) == 1) {
2678 Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E,
true);
2686 if (
I->getType()->isVoidTy())
2694 if (isa<AllocaInst>(
I) ||
I->isTerminator() || isa<PHINode>(
I)) {
2695 LeaderTable.insert(Num,
I,
I->getParent());
2702 if (Num >= NextNum) {
2703 LeaderTable.insert(Num,
I,
I->getParent());
2709 Value *Repl = findLeader(
I->getParent(), Num);
2712 LeaderTable.insert(Num,
I,
I->getParent());
2746 InvalidBlockRPONumbers =
true;
2748 MSSAU = MSSA ? &Updater :
nullptr;
2750 bool Changed =
false;
2751 bool ShouldContinue =
true;
2761 Changed |= removedBlock;
2765 unsigned Iteration = 0;
2766 while (ShouldContinue) {
2769 ShouldContinue = iterateOnFunction(
F);
2770 Changed |= ShouldContinue;
2777 assignValNumForDeadCode();
2778 bool PREChanged =
true;
2779 while (PREChanged) {
2780 PREChanged = performPRE(
F);
2781 Changed |= PREChanged;
2790 cleanupGlobalSets();
2805 "We expect InstrsToErase to be empty across iterations");
2806 if (DeadBlocks.count(BB))
2810 ReplaceOperandsWithMap.clear();
2811 bool ChangedFunction =
false;
2819 for (
PHINode *PN : PHINodesToRemove) {
2821 removeInstruction(PN);
2826 if (!ReplaceOperandsWithMap.empty())
2827 ChangedFunction |= replaceOperandsForInBlockEquality(&*BI);
2828 ChangedFunction |= processInstruction(&*BI);
2830 if (InstrsToErase.
empty()) {
2836 NumGVNInstr += InstrsToErase.
size();
2839 bool AtStart = BI == BB->
begin();
2843 for (
auto *
I : InstrsToErase) {
2844 assert(
I->getParent() == BB &&
"Removing instruction from wrong block?");
2848 removeInstruction(
I);
2850 InstrsToErase.clear();
2858 return ChangedFunction;
2869 for (
unsigned i = 0, e =
Instr->getNumOperands(); i != e; ++i) {
2871 if (isa<Argument>(
Op) || isa<Constant>(
Op) || isa<GlobalValue>(
Op))
2883 if (
Value *V = findLeader(Pred, TValNo)) {
2884 Instr->setOperand(i, V);
2907 LeaderTable.insert(Num, Instr, Pred);
2911bool GVNPass::performScalarPRE(
Instruction *CurInst) {
2912 if (isa<AllocaInst>(CurInst) || CurInst->
isTerminator() ||
2915 isa<DbgInfoIntrinsic>(CurInst))
2922 if (isa<CmpInst>(CurInst))
2932 if (isa<GetElementPtrInst>(CurInst))
2935 if (
auto *CallB = dyn_cast<CallBase>(CurInst)) {
2937 if (CallB->isInlineAsm())
2949 unsigned NumWith = 0;
2950 unsigned NumWithout = 0;
2955 if (InvalidBlockRPONumbers)
2956 assignBlockRPONumber(*CurrentBlock->
getParent());
2967 assert(BlockRPONumber.count(
P) && BlockRPONumber.count(CurrentBlock) &&
2968 "Invalid BlockRPONumber map.");
2969 if (BlockRPONumber[
P] >= BlockRPONumber[CurrentBlock]) {
2975 Value *predV = findLeader(
P, TValNo);
2980 }
else if (predV == CurInst) {
2992 if (NumWithout > 1 || NumWith == 0)
3000 if (NumWithout != 0) {
3019 toSplit.push_back(std::make_pair(PREPred->
getTerminator(), SuccNum));
3023 PREInstr = CurInst->
clone();
3024 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
3027 verifyRemoved(PREInstr);
3036 assert(PREInstr !=
nullptr || NumWithout == 0);
3042 CurInst->
getName() +
".pre-phi");
3043 Phi->insertBefore(CurrentBlock->begin());
3044 for (
unsigned i = 0, e = predMap.
size(); i != e; ++i) {
3045 if (
Value *V = predMap[i].first) {
3049 Phi->addIncoming(V, predMap[i].second);
3051 Phi->addIncoming(PREInstr, PREPred);
3058 LeaderTable.insert(ValNo, Phi, CurrentBlock);
3061 if (MD &&
Phi->getType()->isPtrOrPtrVectorTy())
3064 LeaderTable.erase(ValNo, CurInst, CurrentBlock);
3067 removeInstruction(CurInst);
3076 bool Changed =
false;
3079 if (CurrentBlock == &
F.getEntryBlock())
3083 if (CurrentBlock->isEHPad())
3087 BE = CurrentBlock->end();
3090 Changed |= performScalarPRE(CurInst);
3094 if (splitCriticalEdges())
3111 InvalidBlockRPONumbers =
true;
3118bool GVNPass::splitCriticalEdges() {
3119 if (toSplit.empty())
3122 bool Changed =
false;
3124 std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
3128 }
while (!toSplit.empty());
3132 InvalidBlockRPONumbers =
true;
3138bool GVNPass::iterateOnFunction(
Function &
F) {
3139 cleanupGlobalSets();
3142 bool Changed =
false;
3149 Changed |= processBlock(BB);
3154void GVNPass::cleanupGlobalSets() {
3156 LeaderTable.clear();
3157 BlockRPONumber.clear();
3159 InvalidBlockRPONumbers =
true;
3170 I->eraseFromParent();
3175void GVNPass::verifyRemoved(
const Instruction *Inst)
const {
3177 LeaderTable.verifyRemoved(Inst);
3189 while (!NewDead.
empty()) {
3191 if (DeadBlocks.count(
D))
3197 DeadBlocks.insert(Dom.
begin(), Dom.
end());
3202 if (DeadBlocks.count(S))
3205 bool AllPredDead =
true;
3207 if (!DeadBlocks.count(
P)) {
3208 AllPredDead =
false;
3229 if (DeadBlocks.count(
B))
3236 if (!DeadBlocks.count(
P))
3242 DeadBlocks.insert(
P = S);
3248 if (!DeadBlocks.count(
P))
3272bool GVNPass::processFoldableCondBr(
BranchInst *BI) {
3286 if (DeadBlocks.count(DeadRoot))
3290 DeadRoot = splitCriticalEdges(BI->
getParent(), DeadRoot);
3292 addDeadBlock(DeadRoot);
3300void GVNPass::assignValNumForDeadCode() {
3304 LeaderTable.insert(ValNum, &Inst, BB);
3316 .setMemDep(MemDepAnalysis)
3317 .setMemorySSA(MemSSAAnalysis)) {
3322 if (skipFunction(
F))
3325 auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
3326 if (Impl.isMemorySSAEnabled() && !MSSAWP)
3327 MSSAWP = &getAnalysis<MemorySSAWrapperPass>();
3329 return Impl.runImpl(
3330 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F),
3331 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
3332 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F),
3333 getAnalysis<AAResultsWrapperPass>().getAAResults(),
3334 Impl.isMemDepEnabled()
3335 ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()
3337 getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
3338 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(),
3339 MSSAWP ? &MSSAWP->getMSSA() :
nullptr);
3347 if (Impl.isMemDepEnabled())
3356 if (Impl.isMemorySSAEnabled())
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool hasUsersIn(Value *V, BasicBlock *BB)
static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo, DominatorTree *DT, OptimizationRemarkEmitter *ORE)
Try to locate the three instruction involved in a missed load-elimination case that is due to an inte...
static void reportLoadElim(LoadInst *Load, Value *AvailableValue, OptimizationRemarkEmitter *ORE)
static cl::opt< uint32_t > MaxNumInsnsPerBlock("gvn-max-num-insns", cl::Hidden, cl::init(100), cl::desc("Max number of instructions to scan in each basic block in GVN " "(default = 100)"))
static cl::opt< bool > GVNEnableMemDep("enable-gvn-memdep", cl::init(true))
static cl::opt< bool > GVNEnableLoadInLoopPRE("enable-load-in-loop-pre", cl::init(true))
static cl::opt< uint32_t > MaxNumDeps("gvn-max-num-deps", cl::Hidden, cl::init(100), cl::desc("Max number of dependences to attempt Load PRE (default = 100)"))
static cl::opt< bool > GVNEnableMemorySSA("enable-gvn-memoryssa", cl::init(false))
static Value * ConstructSSAForLoadSet(LoadInst *Load, SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, GVNPass &gvn)
Given a set of loads specified by ValuesPerBlock, construct SSA form, allowing us to eliminate Load.
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT)
There is an edge from 'Src' to 'Dst'.
static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap< BasicBlock *, AvailabilityState > &FullyAvailableBlocks)
Return true if we can prove that the value we're analyzing is fully available in the specified block.
static Value * findDominatingValue(const MemoryLocation &Loc, Type *LoadTy, Instruction *From, AAResults *AA)
static bool isLifetimeStart(const Instruction *Inst)
static cl::opt< bool > GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre", cl::init(false))
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl)
static void replaceValuesPerBlockEntry(SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, Value *OldValue, Value *NewValue)
If the specified OldValue exists in ValuesPerBlock, replace its value with NewValue.
@ Unavailable
We know the block is not fully available. This is a fixpoint.
@ Available
We know the block is fully available. This is a fixpoint.
@ SpeculativelyAvailable
We do not know whether the block is fully available or not, but we are currently speculating that it ...
static cl::opt< bool > GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden)
static cl::opt< uint32_t > MaxNumVisitedInsts("gvn-max-num-visited-insts", cl::Hidden, cl::init(100), cl::desc("Max number of visited instructions when trying to find " "dominating value of select dependency (default = 100)"))
static cl::opt< uint32_t > MaxBBSpeculations("gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::desc("Max number of blocks we're willing to speculate on (and recurse " "into) when deducing if a value is fully available or not in GVN " "(default = 600)"))
static bool liesBetween(const Instruction *From, Instruction *Between, const Instruction *To, DominatorTree *DT)
Assuming To can be reached from both From and Between, does Between lie on every path from From to To...
static cl::opt< bool > GVNEnableLoadPRE("enable-load-pre", cl::init(true))
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
This is the interface for a simple mod/ref and alias analysis over globals.
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
This file implements a map that provides insertion order iteration.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
uint64_t IntrinsicInst * II
ppc ctr loops PowerPC CTR Loops Verify
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static const uint32_t IV[8]
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
A container for analyses that lazily runs them and caches their results.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
bool isEmpty() const
Return true if there are no attributes.
std::optional< AttributeList > intersectWith(LLVMContext &C, AttributeList Other) const
Try to intersect this AttributeList with Other.
const BasicBlock * getEnd() const
const BasicBlock * getStart() const
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
LLVMContext & getContext() const
Get the context in which this basic block lives.
bool isEHPad() const
Return true if this basic block is an exception handling block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Analysis pass which computes a DominatorTree.
void getDescendants(NodeT *R, SmallVectorImpl< NodeT * > &Result) const
Get all nodes dominated by R, including R itself.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Class representing an expression and its matching format.
FunctionPass class - This class is used to implement most global optimizations.
Represents calls to the gc.relocate intrinsic.
This class holds the mapping between values and value numbers.
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred, Value *LHS, Value *RHS)
Returns the value number of the given comparison, assigning it a new number if it did not have one be...
uint32_t getNextUnusedValueNumber()
uint32_t lookupOrAdd(Value *V)
lookup_or_add - Returns the value number for the specified value, assigning it a new number if it did...
uint32_t lookup(Value *V, bool Verify=true) const
Returns the value number of the specified value.
void setAliasAnalysis(AAResults *A)
void clear()
Remove all entries from the ValueTable.
bool exists(Value *V) const
Returns true if a value number exists for the specified value.
uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock, uint32_t Num, GVNPass &Gvn)
Wrap phiTranslateImpl to provide caching functionality.
void setMemDep(MemoryDependenceResults *M)
void erase(Value *v)
Remove a value from the value numbering.
void add(Value *V, uint32_t num)
add - Insert a value into the table with a specified value number.
void setDomTree(DominatorTree *D)
void eraseTranslateCacheEntry(uint32_t Num, const BasicBlock &CurrBlock)
Erase stale entry from phiTranslate cache so phiTranslate can be computed again.
void verifyRemoved(const Value *) const
verifyRemoved - Verify that the value is removed from all internal data structures.
The core GVN pass object.
friend class gvn::GVNLegacyPass
bool isPREEnabled() const
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
AAResults * getAliasAnalysis() const
bool isLoadPREEnabled() const
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
bool isMemorySSAEnabled() const
DominatorTree & getDominatorTree() const
bool isLoadInLoopPREEnabled() const
bool isLoadPRESplitBackedgeEnabled() const
void markInstructionForDeletion(Instruction *I)
This removes the specified instruction from our various maps and marks it for deletion.
bool isMemDepEnabled() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Legacy wrapper pass to provide the GlobalsAAResult object.
This class allows to keep track on instructions with implicit control flow.
bool isDominatedByICFIFromSameBlock(const Instruction *Insn)
Returns true if the first ICFI of Insn's block exists and dominates Insn.
bool hasICF(const BasicBlock *BB)
Returns true if at least one instruction from the given basic block has implicit control flow.
void clear()
Invalidates all information from this tracking.
void removeUsersOf(const Instruction *Inst)
Notifies this tracking that we are going to replace all uses of Inst.
void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB)
Notifies this tracking that we are going to insert a new instruction Inst to the basic block BB.
void removeInstruction(const Instruction *Inst)
Notifies this tracking that we are going to remove the instruction Inst It makes all necessary update...
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Instruction * getPrevNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the previous non-debug instruction in the same basic block as 'this',...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
bool isTerminator() const
bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
void dropUnknownNonDebugMetadata(ArrayRef< unsigned > KnownIDs={})
Drop all unknown metadata except for debug locations.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
This class implements a map that also provides access to all stored values in a deterministic order.
iterator find(const KeyT &Key)
A memory dependence query can return one of three different answers.
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block,...
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
bool isLocal() const
Tests if this MemDepResult represents a valid local query (Clobber/Def).
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
This is the common base class for memset/memcpy/memmove.
An analysis that produces MemoryDependenceResults for a function.
Provides a lazy, caching interface for making common memory aliasing information queries,...
std::vector< NonLocalDepEntry > NonLocalDepInfo
void invalidateCachedPredecessors()
Clears the PredIteratorCache info.
void invalidateCachedPointerInfo(Value *Ptr)
Invalidates cached information about the specified pointer, because it may be too conservative in mem...
std::optional< int32_t > getClobberOffset(LoadInst *DepInst) const
Return the clobber offset to dependent instruction.
void removeInstruction(Instruction *InstToRemove)
Removes an instruction from the dependence analysis, updating the dependence of instructions that pre...
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.
const NonLocalDepInfo & getNonLocalCallDependency(CallBase *QueryCall)
Perform a full dependency query for the specified call, returning the set of blocks that the value is...
void getNonLocalPointerDependency(Instruction *QueryInst, SmallVectorImpl< NonLocalDepResult > &Result)
Perform a full dependency query for an access to the QueryInst's specified memory location,...
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance.
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const Value * Ptr
The address of the start of the location.
An analysis that produces MemorySSA for a function.
MemorySSA * getMemorySSA() const
Get handle on MemorySSA.
MemoryUseOrDef * createMemoryAccessBefore(Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt)
Create a MemoryAccess in MemorySSA before an existing MemoryAccess.
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
void insertUse(MemoryUse *Use, bool RenameUses=false)
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point, bool CreationMustSucceed=true)
Create a MemoryAccess in MemorySSA at a specified point in a block.
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Legacy analysis pass which computes MemorySSA.
Encapsulates MemorySSA, including all data associated with memory accesses.
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Class that has the common methods + fields of memory uses/defs.
This is an entry in the NonLocalDepInfo cache.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PHITransAddr - An address value which tracks and handles phi translation.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserve()
Mark an analysis as preserved.
Helper class for SSA formation on a set of values defined in multiple blocks.
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type 'Ty'.
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block.
bool HasValueForBlock(BasicBlock *BB) const
Return true if the SSAUpdater already has a value for the specified block.
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
const Value * getCondition() const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
SmallVector & operator=(const SmallVector &RHS)
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
void deleteValue()
Delete a pointer to a generic Value.
StringRef getName() const
Return a constant reference to the value's name.
Represents an op.with.overflow intrinsic.
An efficient, type-erasing, non-owning reference to a callable.
GVNLegacyPass(bool MemDepAnalysis=GVNEnableMemDep, bool MemSSAAnalysis=GVNEnableMemorySSA)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
An opaque object representing a hash code.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
bool match(Val *V, const Pattern &P)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the store at D...
Value * getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingMemInst returned an offset, this function can be used to actually perform...
int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the load at De...
Value * getValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, Function *F)
If analyzeLoadFromClobberingStore/Load returned an offset, this function can be used to actually perf...
int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *DepMI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the memory int...
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, Function *F)
Return true if CoerceAvailableValueToLoadType would succeed if it was called.
initializer< Ty > init(const Ty &Val)
A private "module" namespace for types and utilities used by GVN.
NodeAddr< InstrNode * > Instr
NodeAddr< PhiNode * > Phi
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
hash_code hash_value(const FixedPointSemantics &Val)
Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
unsigned replaceDominatedUsesWithIf(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge, function_ref< bool(const Use &U, const Value *To)> ShouldReplace)
Replace each use of 'From' with 'To' if that use is dominated by the given edge and the callback Shou...
unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ)
Search for the specified successor of basic block BB and return its position in the terminator instru...
auto pred_end(const MachineBasicBlock *BB)
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
auto successors(const MachineBasicBlock *BB)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
void initializeGVNLegacyPassPass(PassRegistry &)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)
bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
bool isModSet(const ModRefInfo MRI)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
@ Global
Append to llvm.global_dtors.
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
bool VerifyMemorySSA
Enables verification of MemorySSA.
RNSuccIterator< NodeRef, BlockT, RegionT > succ_end(NodeRef Node)
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, MemoryDependenceResults *MemDep=nullptr, bool PredecessorWithTwoSuccessors=false, DominatorTree *DT=nullptr)
Attempts to merge a block into its predecessor, if possible.
FunctionPass * createGVNPass()
Create a legacy GVN pass.
BasicBlock * SplitCriticalEdge(Instruction *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
If this edge is a critical edge, insert a new node to split the critical edge.
bool isCriticalEdge(const Instruction *TI, unsigned SuccNum, bool AllowIdenticalEdges=false)
Return true if the specified edge is a critical edge.
constexpr unsigned BitWidth
auto pred_begin(const MachineBasicBlock *BB)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool pred_empty(const BasicBlock *BB)
iterator_range< df_iterator< T > > depth_first(const T &G)
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Option class for critical edge splitting.
static GVNPass::Expression getTombstoneKey()
static bool isEqual(const GVNPass::Expression &LHS, const GVNPass::Expression &RHS)
static unsigned getHashValue(const GVNPass::Expression &e)
static GVNPass::Expression getEmptyKey()
An information struct used to provide DenseMap with the various necessary components for a given valu...
A set of parameters to control various transforms performed by GVN pass.
std::optional< bool > AllowLoadPRESplitBackedge
std::optional< bool > AllowPRE
std::optional< bool > AllowLoadInLoopPRE
std::optional< bool > AllowMemDep
std::optional< bool > AllowLoadPRE
std::optional< bool > AllowMemorySSA
SmallVector< uint32_t, 4 > varargs
bool operator==(const Expression &other) const
friend hash_code hash_value(const Expression &Value)
Expression(uint32_t o=~2U)
A CRTP mix-in to automatically provide informational APIs needed for passes.
A MapVector that performs no allocations if smaller than a certain size.
Represents an AvailableValue which can be rematerialized at the end of the associated BasicBlock.
static AvailableValueInBlock get(BasicBlock *BB, Value *V, unsigned Offset=0)
static AvailableValueInBlock getUndef(BasicBlock *BB)
static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV)
Value * MaterializeAdjustedValue(LoadInst *Load, GVNPass &gvn) const
Emit code at the end of this block to adjust the value defined here to the specified type.
AvailableValue AV
AV - The actual available value.
static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel, Value *V1, Value *V2)
BasicBlock * BB
BB - The basic block in question.
Represents a particular available value that we know how to materialize.
unsigned Offset
Offset - The byte offset in Val that is interesting for the load query.
bool isSimpleValue() const
static AvailableValue getSelect(SelectInst *Sel, Value *V1, Value *V2)
bool isCoercedLoadValue() const
static AvailableValue get(Value *V, unsigned Offset=0)
ValType Kind
Kind of the live-out value.
LoadInst * getCoercedLoadValue() const
static AvailableValue getLoad(LoadInst *Load, unsigned Offset=0)
static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset=0)
bool isUndefValue() const
bool isSelectValue() const
Value * Val
Val - The value that is live out of the block.
Value * V1
V1, V2 - The dominating non-clobbered values of SelectVal.
Value * MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt, GVNPass &gvn) const
Emit code at the specified insertion point to adjust the value defined here to the specified type.
static AvailableValue getUndef()
SelectInst * getSelectValue() const
Value * getSimpleValue() const
bool isMemIntrinValue() const
MemIntrinsic * getMemIntrinValue() const