32#if LLVM_ENABLE_ABI_BREAKING_CHECKS
33#define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X)
35#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
42 cl::desc(
"When performing SCEV expansion only if it is cheap to do, this "
43 "controls the budget that is considered cheap (default = 4)"));
57 NUW = OBO->hasNoUnsignedWrap();
58 NSW = OBO->hasNoSignedWrap();
61 Exact = PEO->isExact();
65 NNeg = PNI->hasNonNeg();
67 NUW = TI->hasNoUnsignedWrap();
68 NSW = TI->hasNoSignedWrap();
78 I->setHasNoUnsignedWrap(
NUW);
79 I->setHasNoSignedWrap(
NSW);
88 I->setHasNoUnsignedWrap(
NUW);
89 I->setHasNoSignedWrap(
NSW);
114 Value *Ret =
nullptr;
119 if (U->getType() != Ty)
128 if (IP->getParent() == CI->
getParent() && &*BIP != CI &&
138 SCEVInsertPointGuard Guard(Builder,
this);
139 Builder.SetInsertPoint(&*IP);
140 Ret = Builder.CreateCast(
Op, V, Ty,
V->getName());
157 IP =
II->getNormalDest()->begin();
165 IP = MustDominate->
getParent()->getFirstInsertionPt();
167 assert(!IP->isEHPad() &&
"unexpected eh pad!");
183 while (!WorkList.
empty()) {
192 InsertedValues.erase(
I);
193 InsertedPostIncValues.erase(
I);
195 I->eraseFromParent();
200SCEVExpander::GetOptimalInsertionPointForCastOf(
Value *V)
const {
219 "Expected the cast argument to be a global/constant");
220 return Builder.GetInsertBlock()
223 .getFirstInsertionPt();
231 assert((
Op == Instruction::BitCast ||
232 Op == Instruction::PtrToInt ||
233 Op == Instruction::IntToPtr) &&
234 "InsertNoopCastOfTo cannot perform non-noop casts!");
235 assert(SE.getTypeSizeInBits(
V->getType()) == SE.getTypeSizeInBits(Ty) &&
236 "InsertNoopCastOfTo cannot change sizes!");
243 if (
Op == Instruction::IntToPtr) {
245 if (DL.isNonIntegralPointerType(PtrTy))
249 if (
Op == Instruction::BitCast) {
250 if (
V->getType() == Ty)
258 if ((
Op == Instruction::PtrToInt ||
Op == Instruction::IntToPtr) &&
259 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(
V->getType())) {
261 if ((CI->
getOpcode() == Instruction::PtrToInt ||
262 CI->
getOpcode() == Instruction::IntToPtr) &&
263 SE.getTypeSizeInBits(CI->
getType()) ==
267 if ((
CE->getOpcode() == Instruction::PtrToInt ||
268 CE->getOpcode() == Instruction::IntToPtr) &&
269 SE.getTypeSizeInBits(
CE->getType()) ==
270 SE.getTypeSizeInBits(
CE->getOperand(0)->getType()))
271 return CE->getOperand(0);
279 return ReuseOrCreateCast(V, Ty,
Op, GetOptimalInsertionPointForCastOf(V));
295 unsigned ScanLimit = 6;
299 if (IP != BlockBegin) {
301 for (; ScanLimit; --IP, --ScanLimit) {
316 if (IP->getOpcode() == (
unsigned)Opcode && IP->getOperand(0) ==
LHS &&
317 IP->getOperand(1) ==
RHS && !canGenerateIncompatiblePoison(&*IP))
319 if (IP == BlockBegin)
break;
324 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
325 SCEVInsertPointGuard Guard(Builder,
this);
329 while (
const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
330 if (!
L->isLoopInvariant(
LHS) || !
L->isLoopInvariant(
RHS))
break;
332 if (!Preheader)
break;
391 return Builder.CreatePtrAdd(CLHS, CRHS,
"", NW);
394 unsigned ScanLimit = 6;
398 if (IP != BlockBegin) {
400 for (; ScanLimit; --IP, --ScanLimit) {
402 if (
GEP->getPointerOperand() == V &&
403 GEP->getSourceElementType() == Builder.getInt8Ty() &&
404 GEP->getOperand(1) == Idx) {
406 GEP->setNoWrapFlags(
GEP->getNoWrapFlags() & NW);
410 if (IP == BlockBegin)
break;
415 SCEVInsertPointGuard Guard(Builder,
this);
418 while (
const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
419 if (!
L->isLoopInvariant(V) || !
L->isLoopInvariant(Idx))
break;
421 if (!Preheader)
break;
428 return Builder.CreatePtrAdd(V, Idx,
"scevgep", NW);
438 if (
A->contains(
B))
return B;
439 if (
B->contains(
A))
return A;
440 if (DT.
dominates(
A->getHeader(),
B->getHeader()))
return B;
441 if (DT.
dominates(
B->getHeader(),
A->getHeader()))
return A;
447const Loop *SCEVExpander::getRelevantLoop(
const SCEV *S) {
449 auto Pair = RelevantLoops.try_emplace(S);
451 return Pair.first->second;
470 const Loop *
L =
nullptr;
475 return RelevantLoops[S] =
L;
480 return Pair.first->second = SE.LI.getLoopFor(
I->getParent());
496 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
498 bool operator()(std::pair<const Loop *, const SCEV *>
LHS,
499 std::pair<const Loop *, const SCEV *>
RHS)
const {
506 if (
LHS.first !=
RHS.first)
512 if (
LHS.second->isNonConstantNegative()) {
513 if (!
RHS.second->isNonConstantNegative())
515 }
else if (
RHS.second->isNonConstantNegative())
527 const SCEV *URemLHS =
nullptr;
528 const SCEV *URemRHS =
nullptr;
529 if (SE.matchURem(S, URemLHS, URemRHS)) {
542 OpsAndLoops.
push_back(std::make_pair(getRelevantLoop(
Op),
Op));
550 Value *Sum =
nullptr;
551 for (
auto I = OpsAndLoops.
begin(),
E = OpsAndLoops.
end();
I !=
E;) {
552 const Loop *CurLoop =
I->first;
553 const SCEV *
Op =
I->second;
561 assert(!
Op->getType()->isPointerTy() &&
"Only first op can be pointer");
566 for (;
I !=
E &&
I->first == CurLoop; ++
I) {
569 const SCEV *
X =
I->second;
572 X = SE.getSCEV(
U->getValue());
575 Sum = expandAddToGEP(SE.getAddExpr(NewOps), Sum, S->
getNoWrapFlags());
576 }
else if (
Op->isNonConstantNegative()) {
578 Value *
W = expand(SE.getNegativeSCEV(
Op));
604 OpsAndLoops.
push_back(std::make_pair(getRelevantLoop(
Op),
Op));
611 Value *Prod =
nullptr;
612 auto I = OpsAndLoops.
begin();
617 const auto ExpandOpBinPowN = [
this, &
I, &OpsAndLoops]() {
627 while (
E != OpsAndLoops.
end() && *
I == *
E &&
Exponent != MaxExponent) {
631 assert(
Exponent > 0 &&
"Trying to calculate a zeroth exponent of operand?");
639 for (uint64_t BinExp = 2; BinExp <=
Exponent; BinExp <<= 1) {
650 assert(Result &&
"Nothing was expanded?");
654 while (
I != OpsAndLoops.
end()) {
657 Prod = ExpandOpBinPowN();
658 }
else if (
I->second->isAllOnesValue()) {
665 Value *
W = ExpandOpBinPowN();
674 if (
RHS->logBase2() ==
RHS->getBitWidth() - 1)
676 Prod = InsertBinop(Instruction::Shl, Prod,
677 ConstantInt::get(Ty,
RHS->logBase2()), NWFlags,
680 Prod = InsertBinop(Instruction::Mul, Prod, W, S->
getNoWrapFlags(),
692 const APInt &
RHS = SC->getAPInt();
693 if (
RHS.isPowerOf2())
694 return InsertBinop(Instruction::LShr,
LHS,
695 ConstantInt::get(SC->getType(),
RHS.logBase2()),
699 const SCEV *RHSExpr = S->
getRHS();
702 bool GuaranteedNotPoison =
703 ScalarEvolution::isGuaranteedNotToBePoison(RHSExpr);
704 if (!GuaranteedNotPoison)
705 RHS = Builder.CreateFreeze(
RHS);
710 if (!SE.isKnownNonZero(RHSExpr) || !GuaranteedNotPoison)
711 RHS = Builder.CreateIntrinsic(
RHS->
getType(), Intrinsic::umax,
712 {RHS, ConstantInt::get(RHS->getType(), 1)});
715 SE.isKnownNonZero(S->
getRHS()));
728 if (L == IVIncInsertLoop) {
731 if (!SE.DT.dominates(OInst, IVIncInsertPos))
745 return isNormalAddRecExprPHI(PN, IncV, L);
760 if (IncV == InsertPos)
767 case Instruction::Add:
768 case Instruction::Sub: {
770 if (!OInst || SE.DT.dominates(OInst, InsertPos))
774 case Instruction::BitCast:
776 case Instruction::GetElementPtr:
781 if (!SE.DT.dominates(OInst, InsertPos))
807 if (Builder.GetInsertPoint() == It)
808 Builder.SetInsertPoint(&*NewInsertPt);
809 for (
auto *InsertPtGuard : InsertPointGuards)
810 if (InsertPtGuard->GetInsertPoint() == It)
811 InsertPtGuard->SetInsertPoint(NewInsertPt);
818 bool RecomputePoisonFlags) {
823 I->dropPoisonGeneratingFlags();
825 if (
auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
834 if (SE.DT.dominates(IncV, InsertPos)) {
835 if (RecomputePoisonFlags)
836 FixupPoisonFlags(IncV);
846 if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
858 if (SE.DT.dominates(IncV, InsertPos))
862 fixupInsertPoints(
I);
864 if (RecomputePoisonFlags)
887 (IVOper =
getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
904 IncV = Builder.CreatePtrAdd(PN, StepV,
"scevgep");
907 Builder.CreateSub(PN, StepV, Twine(IVName) +
".iv.next") :
908 Builder.CreateAdd(PN, StepV, Twine(IVName) +
".iv.next");
920 Type *PhiTy = Phi->getType();
934 if (Phi == Requested) {
957 const SCEV *ExtendAfterOp =
959 return ExtendAfterOp == OpAfterExtend;
971 const SCEV *ExtendAfterOp =
973 return ExtendAfterOp == OpAfterExtend;
980SCEVExpander::getAddRecExprPHILiterally(
const SCEVAddRecExpr *Normalized,
983 assert((!IVIncInsertLoop || IVIncInsertPos) &&
984 "Uninitialized insert position");
989 PHINode *AddRecPhiMatch =
nullptr;
996 bool TryNonMatchingSCEV =
998 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1000 for (PHINode &PN :
L->getHeader()->phis()) {
1001 if (!SE.isSCEVable(PN.
getType()))
1008 DebugType,
dbgs() <<
"One incomplete PHI is found: " << PN <<
"\n");
1016 bool IsMatchingSCEV = PhiSCEV == Normalized;
1020 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1031 if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1034 if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1039 if (IsMatchingSCEV) {
1043 AddRecPhiMatch = &PN;
1049 if ((!TruncTy || InvertStep) &&
1053 AddRecPhiMatch = &PN;
1055 TruncTy = Normalized->
getType();
1059 if (AddRecPhiMatch) {
1062 InsertedValues.insert(AddRecPhiMatch);
1064 rememberInstruction(IncV);
1066 ReusedValues.insert(AddRecPhiMatch);
1067 ReusedValues.insert(IncV);
1068 return AddRecPhiMatch;
1073 SCEVInsertPointGuard Guard(Builder,
this);
1083 PostIncLoops.
clear();
1086 assert(
L->getLoopPreheader() &&
1087 "Can't expand add recurrences without a loop preheader!");
1089 expand(Normalized->
getStart(),
L->getLoopPreheader()->getTerminator());
1106 Step = SE.getNegativeSCEV(Step);
1108 Value *StepV = expand(Step,
L->getHeader()->getFirstInsertionPt());
1113 bool IncrementIsNUW = !useSubtract &&
IsIncrementNUW(SE, Normalized);
1114 bool IncrementIsNSW = !useSubtract &&
IsIncrementNSW(SE, Normalized);
1118 Builder.SetInsertPoint(Header, Header->begin());
1120 Builder.CreatePHI(ExpandTy,
pred_size(Header), Twine(IVName) +
".iv");
1125 if (!
L->contains(Pred)) {
1134 IVIncInsertPos : Pred->getTerminator();
1135 Builder.SetInsertPoint(InsertPos);
1136 Value *IncV = expandIVInc(PN, StepV, L, useSubtract);
1149 PostIncLoops = SavedPostIncLoops;
1153 InsertedValues.
insert(PN);
1154 InsertedIVs.push_back(PN);
1163 const SCEVAddRecExpr *Normalized = S;
1164 if (PostIncLoops.count(L)) {
1171 [[maybe_unused]]
const SCEV *
Start = Normalized->
getStart();
1173 assert(SE.properlyDominates(Start,
L->getHeader()) &&
1174 "Start does not properly dominate loop header");
1175 assert(SE.dominates(Step,
L->getHeader()) &&
"Step not dominate loop header");
1179 Type *TruncTy =
nullptr;
1180 bool InvertStep =
false;
1181 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, TruncTy, InvertStep);
1185 if (!PostIncLoops.count(L))
1190 assert(LatchBlock &&
"PostInc mode requires a unique loop latch!");
1199 I->setHasNoUnsignedWrap(
false);
1201 I->setHasNoSignedWrap(
false);
1209 &*Builder.GetInsertPoint())) {
1222 Step = SE.getNegativeSCEV(Step);
1226 SCEVInsertPointGuard Guard(Builder,
this);
1227 StepV = expand(Step,
L->getHeader()->getFirstInsertionPt());
1229 Result = expandIVInc(PN, StepV, L, useSubtract);
1237 if (TruncTy !=
Result->getType())
1238 Result = Builder.CreateTrunc(Result, TruncTy);
1242 Result = Builder.CreateSub(expand(Normalized->
getStart()), Result);
1253 !SE.DT.dominates(EB, Builder.GetInsertBlock()))
1256 for (
auto &PN : EB->
phis()) {
1257 if (!SE.isSCEVable(PN.
getType()))
1259 auto *ExitSCEV = SE.getSCEV(&PN);
1264 ExitSCEV = SE.getPtrToIntExpr(ExitSCEV, STy);
1273 const SCEV *Diff = SE.getMinusSCEV(S, ExitSCEV);
1274 const SCEV *
Op = Diff;
1282 "difference must be of integer type");
1283 Value *DiffV = expand(Diff);
1284 Value *BaseV = fixupLCSSAFormFor(&PN);
1287 return Builder.CreatePtrAdd(BaseV, DiffV);
1288 BaseV = Builder.CreatePtrToInt(BaseV, DiffV->
getType());
1290 return Builder.CreateAdd(BaseV, DiffV);
1308 return expandAddRecExprLiterally(S);
1314 PHINode *CanonicalIV =
nullptr;
1315 if (PHINode *PN =
L->getCanonicalInductionVariable())
1316 if (SE.getTypeSizeInBits(PN->
getType()) >= SE.getTypeSizeInBits(Ty))
1322 SE.getTypeSizeInBits(CanonicalIV->
getType()) > SE.getTypeSizeInBits(Ty) &&
1331 V = expand(SE.getTruncateExpr(SE.getUnknown(V), Ty), NewInsertPt);
1337 if (
Value *V = tryToReuseLCSSAPhi(S))
1343 Value *StartV = expand(SE.getPointerBase(S));
1344 return expandAddToGEP(SE.removePointerBase(S), StartV,
1349 NewOps[0] = SE.getConstant(Ty, 0);
1350 const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1357 const SCEV *AddExprLHS = SE.getUnknown(expand(S->
getStart()));
1358 const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1359 return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1370 rememberInstruction(CanonicalIV);
1372 SmallPtrSet<BasicBlock *, 4> PredSeen;
1373 Constant *One = ConstantInt::get(Ty, 1);
1376 if (!PredSeen.
insert(HP).second) {
1383 if (
L->contains(HP)) {
1390 rememberInstruction(
Add);
1400 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->
getType()) &&
1401 "IVs with types different from the canonical IV should "
1402 "already have been handled!");
1411 expand(SE.getTruncateOrNoop(
1412 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1421 const SCEV *IH = SE.getUnknown(CanonicalIV);
1424 const SCEV *NewS = S;
1425 const SCEV *
Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->
getType());
1432 const SCEV *
T = SE.getTruncateOrNoop(V, Ty);
1438 return ReuseOrCreateCast(V, S->
getType(), CastInst::PtrToInt,
1439 GetOptimalInsertionPointForCastOf(V));
1444 return Builder.CreateTrunc(V, S->
getType());
1449 return Builder.CreateZExt(V, S->
getType(),
"",
1455 return Builder.CreateSExt(V, S->
getType());
1460 bool IsSequential) {
1461 bool PrevSafeMode = SafeUDivMode;
1462 SafeUDivMode |= IsSequential;
1466 LHS = Builder.CreateFreeze(
LHS);
1468 SafeUDivMode = (IsSequential && i != 0) || PrevSafeMode;
1470 if (IsSequential && i != 0)
1471 RHS = Builder.CreateFreeze(
RHS);
1474 Sel = Builder.CreateIntrinsic(IntrinID, {Ty}, {
LHS,
RHS},
1479 Sel = Builder.CreateSelect(ICmp,
LHS,
RHS, Name);
1483 SafeUDivMode = PrevSafeMode;
1488 return expandMinMaxExpr(S, Intrinsic::smax,
"smax");
1492 return expandMinMaxExpr(S, Intrinsic::umax,
"umax");
1496 return expandMinMaxExpr(S, Intrinsic::smin,
"smin");
1500 return expandMinMaxExpr(S, Intrinsic::umin,
"umin");
1504 return expandMinMaxExpr(S, Intrinsic::umin,
"umin",
true);
1508 return Builder.CreateVScale(S->
getType());
1520 Value *V = expand(SH);
1522 if (Ty && Ty != V->getType()) {
1523 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->
getType()) &&
1524 "non-trivial casts should be done with the SCEVs directly!");
1525 V = InsertNoopCastOfTo(V, Ty);
1530Value *SCEVExpander::FindValueInExprValueMap(
1542 for (
Value *V : SE.getSCEVValues(S)) {
1559 DropPoisonGeneratingInsts.
clear();
1570Value *SCEVExpander::expand(
const SCEV *S) {
1577 auto SafeToHoist = [](
const SCEV *S) {
1582 return SC->getValue()->isZero();
1592 if (SafeToHoist(S)) {
1593 for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1594 L =
L->getParentLoop()) {
1595 if (SE.isLoopInvariant(S, L)) {
1597 if (BasicBlock *Preheader =
L->getLoopPreheader()) {
1603 InsertPt =
L->getHeader()->getFirstInsertionPt();
1609 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1610 InsertPt =
L->getHeader()->getFirstInsertionPt();
1612 while (InsertPt != Builder.GetInsertPoint() &&
1614 InsertPt = std::next(InsertPt);
1622 auto I = InsertedExpressions.find(std::make_pair(S, &*InsertPt));
1623 if (
I != InsertedExpressions.end())
1626 SCEVInsertPointGuard Guard(Builder,
this);
1627 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1631 Value *
V = FindValueInExprValueMap(S, &*InsertPt, DropPoisonGeneratingInsts);
1634 V = fixupLCSSAFormFor(V);
1636 for (Instruction *
I : DropPoisonGeneratingInsts) {
1638 I->dropPoisonGeneratingAnnotations();
1642 if (
auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
1650 auto *Src = NNI->getOperand(0);
1653 DL).value_or(
false))
1654 NNI->setNonNeg(
true);
1664 InsertedExpressions[std::make_pair(S, &*InsertPt)] =
V;
1668void SCEVExpander::rememberInstruction(
Value *
I) {
1669 auto DoInsert = [
this](
Value *
V) {
1670 if (!PostIncLoops.empty())
1671 InsertedPostIncValues.insert(V);
1673 InsertedValues.insert(V);
1680 OrigFlags.try_emplace(
I, PoisonFlags(
I));
1683void SCEVExpander::replaceCongruentIVInc(
1694 if (!OrigInc || !IsomorphicInc)
1701 bool Chained = ChainedPhis.contains(Phi);
1702 if (!(Chained || isExpandedAddRecExprPHI(OrigPhi, OrigInc, L)) &&
1703 (Chained || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1718 const SCEV *TruncExpr =
1719 SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->
getType());
1720 if (OrigInc == IsomorphicInc || TruncExpr != SE.getSCEV(IsomorphicInc) ||
1721 !SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc))
1724 bool BothHaveNUW =
false;
1725 bool BothHaveNSW =
false;
1728 if (OBOIncV && OBOIsomorphic) {
1730 OBOIncV->hasNoUnsignedWrap() && OBOIsomorphic->hasNoUnsignedWrap();
1732 OBOIncV->hasNoSignedWrap() && OBOIsomorphic->hasNoSignedWrap();
1745 "Should only replace an increment with a wider one.");
1746 if (BothHaveNUW || BothHaveNSW) {
1752 dbgs() <<
"INDVARS: Eliminated congruent iv.inc: "
1753 << *IsomorphicInc <<
'\n');
1754 Value *NewInc = OrigInc;
1758 IP = PN->
getParent()->getFirstInsertionPt();
1763 Builder.SetCurrentDebugLocation(IsomorphicInc->
getDebugLoc());
1765 Builder.CreateTruncOrBitCast(OrigInc, IsomorphicInc->
getType(), IVName);
1790 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1791 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1792 return RHS->getType()->getPrimitiveSizeInBits().getFixedValue() <
1793 LHS->getType()->getPrimitiveSizeInBits().getFixedValue();
1796 unsigned NumElim = 0;
1804 if (!SE.isSCEVable(PN->
getType()))
1809 return Const->getValue();
1814 if (
Value *V = SimplifyPHINode(Phi)) {
1815 if (V->getType() != Phi->getType())
1817 SE.forgetValue(Phi);
1818 Phi->replaceAllUsesWith(V);
1822 dbgs() <<
"INDVARS: Eliminated constant iv: " << *Phi
1827 if (!SE.isSCEVable(Phi->getType()))
1830 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1833 if (Phi->getType()->isIntegerTy() &&
TTI &&
1834 TTI->isTruncateFree(Phi->getType(), Phis.
back()->getType())) {
1838 const SCEV *PhiExpr = SE.getSCEV(Phi);
1842 const SCEV *TruncExpr =
1843 SE.getTruncateExpr(PhiExpr, Phis.
back()->getType());
1844 ExprToIVMap[TruncExpr] = Phi;
1855 replaceCongruentIVInc(Phi, OrigPhiRef, L, DT, DeadInsts);
1857 dbgs() <<
"INDVARS: Eliminated congruent iv: " << *Phi
1860 DebugType,
dbgs() <<
"INDVARS: Original iv: " << *OrigPhiRef <<
'\n');
1862 Value *NewIV = OrigPhiRef;
1863 if (OrigPhiRef->
getType() != Phi->getType()) {
1865 L->getHeader()->getFirstInsertionPt());
1866 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1867 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1869 Phi->replaceAllUsesWith(NewIV);
1881 L->getExitingBlocks(ExitingBlocks);
1888 if (!
match(BB->getTerminator(),
1893 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
1896 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
1905 return FindValueInExprValueMap(S, At, DropPoisonGeneratingInsts) !=
nullptr;
1916 struct OperationIndices {
1917 OperationIndices(
unsigned Opc,
size_t min,
size_t max) :
1918 Opcode(
Opc), MinIdx(min), MaxIdx(
max) { }
1931 return TTI.getCastInstrCost(Opcode, S->getType(),
1932 S->getOperand(0)->getType(),
1936 auto ArithCost = [&](
unsigned Opcode,
unsigned NumRequired,
1937 unsigned MinIdx = 0,
1940 return NumRequired *
1941 TTI.getArithmeticInstrCost(Opcode, S->getType(),
CostKind);
1944 auto CmpSelCost = [&](
unsigned Opcode,
unsigned NumRequired,
unsigned MinIdx,
1947 Type *OpType = S->getType();
1948 return NumRequired *
TTI.getCmpSelInstrCost(
1953 switch (S->getSCEVType()) {
1961 Cost = CastCost(Instruction::PtrToInt);
1964 Cost = CastCost(Instruction::Trunc);
1967 Cost = CastCost(Instruction::ZExt);
1970 Cost = CastCost(Instruction::SExt);
1973 unsigned Opcode = Instruction::UDiv;
1975 if (SC->getAPInt().isPowerOf2())
1976 Opcode = Instruction::LShr;
1977 Cost = ArithCost(Opcode, 1);
1981 Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
1987 Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1);
1996 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
1997 Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
1998 switch (S->getSCEVType()) {
2002 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 0);
2003 Cost += ArithCost(Instruction::Or,
2004 S->getNumOperands() > 2 ? S->getNumOperands() - 2 : 0);
2005 Cost += CmpSelCost(Instruction::Select, 1, 0, 1);
2010 "Unhandled SCEV expression type?");
2017 unsigned NumRecurrences = S->getNumOperands() - 1;
2018 Cost +=
TTI.getCFInstrCost(Instruction::PHI,
CostKind) * NumRecurrences;
2020 TTI.getArithmeticInstrCost(Instruction::Add, S->getType(),
CostKind) *
2023 Worklist.
emplace_back(Instruction::PHI, 0, S->getOperand(0));
2025 for (
const SCEV *
Op : S->operands().drop_front())
2031 for (
auto &CostOp : Operations) {
2032 for (
auto SCEVOp :
enumerate(S->operands())) {
2034 size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
2035 size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
2042bool SCEVExpander::isHighCostExpansionHelper(
2050 const SCEV *S = WorkItem.
S;
2061 L->getHeader()->getParent()->hasMinSize()
2080 return Cost > Budget;
2100 SE.getAddExpr(S, SE.getConstant(S->
getType(), 1)), &At, L))
2115 "Nary expr should have more than 1 operand.");
2120 return Cost > Budget;
2124 "Polynomial should be at least linear");
2127 return Cost > Budget;
2136 switch (Pred->getKind()) {
2151 Value *Expr0 = expand(Pred->getLHS(), IP);
2152 Value *Expr1 = expand(Pred->getRHS(), IP);
2154 Builder.SetInsertPoint(IP);
2156 auto *
I = Builder.CreateICmp(InvPred, Expr0, Expr1,
"ident.check");
2163 "non-affine expression");
2167 const SCEV *ExitCount =
2168 SE.getPredicatedSymbolicMaxBackedgeTakenCount(AR->
getLoop(), Pred);
2176 unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->
getType());
2177 unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2184 Builder.SetInsertPoint(
Loc);
2185 Value *TripCountVal = expand(ExitCount,
Loc);
2190 Value *StepValue = expand(Step,
Loc);
2191 Value *NegStepValue = expand(SE.getNegativeSCEV(Step),
Loc);
2192 Value *StartValue = expand(Start,
Loc);
2197 Builder.SetInsertPoint(
Loc);
2200 Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2210 auto ComputeEndCheck = [&]() ->
Value * {
2214 if (!
Signed && Start->isZero() && SE.isKnownPositive(Step) &&
2215 DstBits < SrcBits &&
2216 ExitCount == SE.getZeroExtendExpr(SE.getTruncateExpr(ExitCount, ARTy),
2218 SE.willNotOverflow(Instruction::Mul,
Signed, Step,
2219 SE.getTruncateExpr(ExitCount, ARTy)))
2223 Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2225 Value *MulV, *OfMul;
2226 if (Step->
isOne()) {
2230 MulV = TruncTripCount;
2233 CallInst *
Mul = Builder.CreateIntrinsic(Intrinsic::umul_with_overflow, Ty,
2234 {AbsStep, TruncTripCount},
2236 MulV = Builder.CreateExtractValue(
Mul, 0,
"mul.result");
2237 OfMul = Builder.CreateExtractValue(
Mul, 1,
"mul.overflow");
2241 bool NeedPosCheck = !SE.isKnownNegative(Step);
2242 bool NeedNegCheck = !SE.isKnownPositive(Step);
2245 Value *NegMulV = Builder.CreateNeg(MulV);
2247 Add = Builder.CreatePtrAdd(StartValue, MulV);
2249 Sub = Builder.CreatePtrAdd(StartValue, NegMulV);
2252 Add = Builder.CreateAdd(StartValue, MulV);
2254 Sub = Builder.CreateSub(StartValue, MulV);
2257 Value *EndCompareLT =
nullptr;
2258 Value *EndCompareGT =
nullptr;
2259 Value *EndCheck =
nullptr;
2261 EndCheck = EndCompareLT = Builder.CreateICmp(
2264 EndCheck = EndCompareGT = Builder.CreateICmp(
2266 if (NeedPosCheck && NeedNegCheck) {
2268 EndCheck = Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2270 return Builder.CreateOr(EndCheck, OfMul);
2272 Value *EndCheck = ComputeEndCheck();
2277 if (SrcBits > DstBits) {
2279 auto *BackedgeCheck =
2281 ConstantInt::get(
Loc->getContext(), MaxVal));
2282 BackedgeCheck = Builder.CreateAnd(
2285 EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2294 Value *NSSWCheck =
nullptr, *NUSWCheck =
nullptr;
2304 if (NUSWCheck && NSSWCheck)
2305 return Builder.CreateOr(NUSWCheck, NSSWCheck);
2320 for (
const auto *Pred : Union->getPredicates()) {
2322 Builder.SetInsertPoint(IP);
2327 return Builder.CreateOr(Checks);
2330Value *SCEVExpander::fixupLCSSAFormFor(
Value *V) {
2332 if (!PreserveLCSSA || !DefI)
2338 if (!DefLoop || UseLoop == DefLoop || DefLoop->
contains(UseLoop))
2349 if (DefI->getType()->isIntegerTy())
2355 auto RemoveUserOnExit =
2364 for (
PHINode *PN : InsertedPHIs)
2365 rememberInstruction(PN);
2366 for (
PHINode *PN : PHIsToRemove) {
2369 InsertedValues.erase(PN);
2370 InsertedPostIncValues.erase(PN);
2374 return User->getOperand(0);
2396struct SCEVFindUnsafe {
2397 ScalarEvolution &SE;
2399 bool IsUnsafe =
false;
2401 SCEVFindUnsafe(ScalarEvolution &SE,
bool CanonicalMode)
2402 : SE(SE), CanonicalMode(CanonicalMode) {}
2404 bool follow(
const SCEV *S) {
2414 if (!AR->getLoop()->getLoopPreheader() &&
2415 (!CanonicalMode || !AR->isAffine())) {
2422 bool isDone()
const {
return IsUnsafe; }
2427 SCEVFindUnsafe Search(SE, CanonicalMode);
2429 return !Search.IsUnsafe;
2460 for (
auto [
I, Flags] : Expander.OrigFlags)
2463 auto InsertedInstructions = Expander.getAllInsertedInstructions();
2466 InsertedInstructions);
2476 [&InsertedSet](
Value *U) {
2477 return InsertedSet.contains(cast<Instruction>(U));
2479 "removed instruction should only be used by instructions inserted "
2480 "during expansion");
2482 assert(!
I->getType()->isVoidTy() &&
2483 "inserted instruction should have non-void types");
2485 I->eraseFromParent();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
static const Loop * PickMostRelevantLoop(const Loop *A, const Loop *B, DominatorTree &DT)
PickMostRelevantLoop - Given two loops pick the one that's most relevant for SCEV expansion.
static InstructionCost costAndCollectOperands(const SCEVOperand &WorkItem, const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, SmallVectorImpl< SCEVOperand > &Worklist)
static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
static bool canBeCheaplyTransformed(ScalarEvolution &SE, const SCEVAddRecExpr *Phi, const SCEVAddRecExpr *Requested, bool &InvertStep)
Check whether we can cheaply express the requested SCEV in terms of the available PHI SCEV by truncat...
#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
This class represents an incoming formal argument to a Function.
LLVM Basic Block Representation.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
@ ICMP_SLT
signed less than
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
static GEPNoWrapFlags noUnsignedWrap()
static GEPNoWrapFlags none()
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
bool isComplete() const
If the PHI node is complete which means all of its parent's predecessors have incoming value in this ...
Value * getIncomingValueForBlock(const BasicBlock *BB) const
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStart() const
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const Loop * getLoop() const
const SCEV * getOperand() const
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
LLVM_ABI Value * generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc, bool Signed)
Generates code that evaluates if the AR expression will overflow.
LLVM_ABI bool hasRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L)
Determine whether there is an existing expansion of S that can be reused.
SmallVector< Instruction *, 32 > getAllInsertedInstructions() const
Return a vector containing all instructions inserted during expansion.
LLVM_ABI bool isSafeToExpand(const SCEV *S) const
Return true if the given expression is safe to expand in the sense that all materialized values are s...
LLVM_ABI bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint) const
Return true if the given expression is safe to expand in the sense that all materialized values are d...
LLVM_ABI unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT, SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetTransformInfo *TTI=nullptr)
replace congruent phis with their most canonical representative.
LLVM_ABI Value * expandUnionPredicate(const SCEVUnionPredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM_ABI bool hoistIVInc(Instruction *IncV, Instruction *InsertPos, bool RecomputePoisonFlags=false)
Utility for hoisting IncV (with all subexpressions requried for its computation) before InsertPos.
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
static LLVM_ABI bool canReuseFlagsFromOriginalIVInc(PHINode *OrigPhi, PHINode *WidePhi, Instruction *OrigInc, Instruction *WideInc)
Return true if both increments directly increment the corresponding IV PHI nodes and have the same op...
LLVM_ABI Value * expandComparePredicate(const SCEVComparePredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM_ABI Value * expandCodeFor(const SCEV *SH, Type *Ty, BasicBlock::iterator I)
Insert code to directly compute the specified SCEV expression into the program.
LLVM_ABI Value * expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM_ABI Instruction * getIVIncOperand(Instruction *IncV, Instruction *InsertPos, bool allowScale)
Return the induction variable increment's IV operand.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
LLVM_ABI BasicBlock::iterator findInsertPointAfter(Instruction *I, Instruction *MustDominate) const
Returns a suitable insert point after I, that dominates MustDominate.
void setInsertPoint(Instruction *IP)
Set the current insertion point.
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
const SCEV * getOperand(unsigned i) const
ArrayRef< const SCEV * > operands() const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
This class represents a cast from a pointer to a pointer-sized integer value.
This class represents a signed maximum selection.
This class represents a signed minimum selection.
This class represents a sequential/in-order unsigned minimum selection.
This class represents a sign extension of a small integer value to a larger integer value.
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
const SCEV * getLHS() const
const SCEV * getRHS() const
This class represents an unsigned maximum selection.
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
LLVM_ABI ArrayRef< const SCEV * > operands() const
Return operands of this SCEV expression.
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
SCEVTypes getSCEVType() const
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
The main scalar evolution driver.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
class_match< const SCEVConstant > m_SCEVConstant()
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t > m_scev_Mul(const Op0_t &Op0, const Op1_t &Op1)
bind_ty< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
SCEVUnaryExpr_match< SCEVPtrToIntExpr, Op0_t > m_scev_PtrToInt(const Op0_t &Op0)
class_match< const SCEV > m_SCEV()
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
auto pred_end(const MachineBasicBlock *BB)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
constexpr from_range_t from_range
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
auto pred_size(const MachineBasicBlock *BB)
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
auto reverse(ContainerTy &&C)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI cl::opt< unsigned > SCEVCheapExpansionBudget
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI const SCEV * normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops, ScalarEvolution &SE, bool CheckInvertible=true)
Normalize S to be post-increment for all loops present in Loops.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
PredIterator< BasicBlock, Value::user_iterator > pred_iterator
constexpr unsigned BitWidth
LLVM_ABI bool formLCSSAForInstructions(SmallVectorImpl< Instruction * > &Worklist, const DominatorTree &DT, const LoopInfo &LI, ScalarEvolution *SE, SmallVectorImpl< PHINode * > *PHIsToRemove=nullptr, SmallVectorImpl< PHINode * > *InsertedPHIs=nullptr)
Ensures LCSSA form for every instruction from the Worklist in the scope of innermost containing loop.
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
SmallPtrSet< const Loop *, 2 > PostIncLoopSet
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
LLVM_ABI void apply(Instruction *I)
LLVM_ABI PoisonFlags(const Instruction *I)
struct for holding enough information to help calculate the cost of the given SCEV when expanded into...
const SCEV * S
The SCEV operand to be costed.
unsigned ParentOpcode
LLVM instruction opcode that uses the operand.
int OperandIdx
The use index of an expanded instruction.
Value * visit(const SCEV *S)