83#include "llvm/Config/llvm-config.h"
135using namespace PatternMatch;
136using namespace SCEVPatternMatch;
138#define DEBUG_TYPE "scalar-evolution"
141 "Number of loop exits with predictable exit counts");
143 "Number of loop exits without predictable exit counts");
145 "Number of loops with trip counts computed by force");
147#ifdef EXPENSIVE_CHECKS
155 cl::desc(
"Maximum number of iterations SCEV will "
156 "symbolically execute a constant "
162 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
165 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
169 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
174 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
179 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
183 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
184 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
188 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
189 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
193 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
194 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
199 cl::desc(
"Maximum depth of recursive arithmetics"),
203 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
208 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
213 cl::desc(
"Max coefficients in AddRec during evolving"),
218 cl::desc(
"Size of the expression which is considered huge"),
223 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
227 "scalar-evolution-max-loop-guard-collection-depth",
cl::Hidden,
228 cl::desc(
"Maximum depth for recursive loop guard collection"),
cl::init(1));
233 cl::desc(
"When printing analysis, include information on every instruction"));
236 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
238 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
239 "be costly in terms of compile time"));
242 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
243 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
244 "Phi strongly connected components"),
249 cl::desc(
"Handle <= and >= in finite loops"),
253 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
254 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
265#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
275 cast<SCEVConstant>(
this)->getValue()->printAsOperand(
OS,
false);
283 OS <<
"(ptrtoint " << *
Op->getType() <<
" " << *
Op <<
" to "
284 << *PtrToInt->
getType() <<
")";
290 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
297 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
304 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
333 const char *OpStr =
nullptr;
346 OpStr =
" umin_seq ";
374 cast<SCEVUnknown>(
this)->getValue()->printAsOperand(
OS,
false);
377 OS <<
"***COULDNOTCOMPUTE***";
386 return cast<SCEVConstant>(
this)->getType();
388 return cast<SCEVVScale>(
this)->getType();
393 return cast<SCEVCastExpr>(
this)->getType();
395 return cast<SCEVAddRecExpr>(
this)->getType();
397 return cast<SCEVMulExpr>(
this)->getType();
402 return cast<SCEVMinMaxExpr>(
this)->getType();
404 return cast<SCEVSequentialMinMaxExpr>(
this)->getType();
406 return cast<SCEVAddExpr>(
this)->getType();
408 return cast<SCEVUDivExpr>(
this)->getType();
410 return cast<SCEVUnknown>(
this)->getType();
427 return cast<SCEVCastExpr>(
this)->operands();
436 return cast<SCEVNAryExpr>(
this)->operands();
438 return cast<SCEVUDivExpr>(
this)->operands();
453 if (!
Mul)
return false;
457 if (!SC)
return false;
475 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
477 UniqueSCEVs.InsertNode(S, IP);
496 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
499 UniqueSCEVs.InsertNode(S, IP);
519 "Must be a non-bit-width-changing pointer-to-integer cast!");
531 "Cannot truncate non-integer value!");
538 "Cannot zero extend non-integer value!");
545 "Cannot sign extend non-integer value!");
548void SCEVUnknown::deleted() {
550 SE->forgetMemoizedResults(
this);
553 SE->UniqueSCEVs.RemoveNode(
this);
559void SCEVUnknown::allUsesReplacedWith(
Value *New) {
561 SE->forgetMemoizedResults(
this);
564 SE->UniqueSCEVs.RemoveNode(
this);
586 if (LIsPointer != RIsPointer)
587 return (
int)LIsPointer - (int)RIsPointer;
592 return (
int)LID - (int)RID;
595 if (
const auto *LA = dyn_cast<Argument>(LV)) {
596 const auto *
RA = cast<Argument>(RV);
597 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
598 return (
int)LArgNo - (int)RArgNo;
601 if (
const auto *LGV = dyn_cast<GlobalValue>(LV)) {
602 const auto *RGV = cast<GlobalValue>(RV);
604 if (
auto L = LGV->getLinkage() - RGV->getLinkage())
607 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
608 auto LT = GV->getLinkage();
615 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
616 return LGV->getName().compare(RGV->getName());
621 if (
const auto *LInst = dyn_cast<Instruction>(LV)) {
622 const auto *RInst = cast<Instruction>(RV);
627 if (LParent != RParent) {
630 if (LDepth != RDepth)
631 return (
int)LDepth - (int)RDepth;
635 unsigned LNumOps = LInst->getNumOperands(),
636 RNumOps = RInst->getNumOperands();
637 if (LNumOps != RNumOps)
638 return (
int)LNumOps - (int)RNumOps;
640 for (
unsigned Idx :
seq(LNumOps)) {
656static std::optional<int>
666 return (
int)LType - (int)RType;
691 unsigned LBitWidth = LA.
getBitWidth(), RBitWidth =
RA.getBitWidth();
692 if (LBitWidth != RBitWidth)
693 return (
int)LBitWidth - (int)RBitWidth;
694 return LA.
ult(
RA) ? -1 : 1;
698 const auto *LTy = cast<IntegerType>(cast<SCEVVScale>(
LHS)->
getType());
699 const auto *RTy = cast<IntegerType>(cast<SCEVVScale>(
RHS)->
getType());
700 return LTy->getBitWidth() - RTy->getBitWidth();
711 if (LLoop != RLoop) {
713 assert(LHead != RHead &&
"Two loops share the same header?");
717 "No dominance between recurrences used by one SCEV?");
740 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
741 if (LNumOps != RNumOps)
742 return (
int)LNumOps - (int)RNumOps;
744 for (
unsigned i = 0; i != LNumOps; ++i) {
769 if (Ops.
size() < 2)
return;
774 return Complexity && *Complexity < 0;
776 if (Ops.
size() == 2) {
780 if (IsLessComplex(
RHS,
LHS))
787 return IsLessComplex(
LHS,
RHS);
794 for (
unsigned i = 0, e = Ops.
size(); i != e-2; ++i) {
795 const SCEV *S = Ops[i];
800 for (
unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
805 if (i == e-2)
return;
827template <
typename FoldT,
typename IsIdentityT,
typename IsAbsorberT>
831 IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
835 if (
const auto *
C = dyn_cast<SCEVConstant>(
Op)) {
839 Folded = cast<SCEVConstant>(
848 assert(Folded &&
"Must have folded value");
852 if (Folded && IsAbsorber(Folded->
getAPInt()))
856 if (Folded && !IsIdentity(Folded->
getAPInt()))
859 return Ops.
size() == 1 ? Ops[0] :
nullptr;
934 APInt OddFactorial(W, 1);
936 for (
unsigned i = 3; i <= K; ++i) {
939 OddFactorial *= (i >> TwoFactors);
943 unsigned CalculationBits = W +
T;
957 for (
unsigned i = 1; i != K; ++i) {
990 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
995 if (isa<SCEVCouldNotCompute>(Coeff))
1010 "getLosslessPtrToIntExpr() should self-recurse at most once.");
1014 if (!
Op->getType()->isPointerTy())
1025 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1044 if (
auto *U = dyn_cast<SCEVUnknown>(
Op)) {
1049 if (isa<ConstantPointerNull>(U->getValue()))
1055 SCEV *S =
new (SCEVAllocator)
1057 UniqueSCEVs.InsertNode(S, IP);
1062 assert(
Depth == 0 &&
"getLosslessPtrToIntExpr() should not self-recurse for "
1063 "non-SCEVUnknown's.");
1075 class SCEVPtrToIntSinkingRewriter
1083 SCEVPtrToIntSinkingRewriter
Rewriter(SE);
1093 return Base::visit(S);
1098 bool Changed =
false;
1108 bool Changed =
false;
1118 "Should only reach pointer-typed SCEVUnknown's.");
1124 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(
Op, *
this);
1126 "We must have succeeded in sinking the cast, "
1127 "and ending up with an integer-typed expression!");
1135 if (isa<SCEVCouldNotCompute>(IntOp))
1144 "This is not a truncating conversion!");
1146 "This is not a conversion to a SCEVable type!");
1147 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1155 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1177 UniqueSCEVs.InsertNode(S, IP);
1186 if (isa<SCEVAddExpr>(
Op) || isa<SCEVMulExpr>(
Op)) {
1187 auto *CommOp = cast<SCEVCommutativeExpr>(
Op);
1189 unsigned numTruncs = 0;
1190 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1193 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) &&
1194 isa<SCEVTruncateExpr>(S))
1198 if (numTruncs < 2) {
1199 if (isa<SCEVAddExpr>(
Op))
1201 if (isa<SCEVMulExpr>(
Op))
1208 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1215 for (
const SCEV *
Op : AddRec->operands())
1230 UniqueSCEVs.InsertNode(S, IP);
1270struct ExtendOpTraitsBase {
1276template <
typename ExtendOp>
struct ExtendOpTraits {
1292 static const GetExtendExprTy GetExtendExpr;
1294 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1301const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1308 static const GetExtendExprTy GetExtendExpr;
1310 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1317const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1329template <
typename ExtendOpTy>
1332 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1333 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1340 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1349 for (
auto It = DiffOps.
begin(); It != DiffOps.
end(); ++It)
1362 auto PreStartFlags =
1380 const SCEV *OperandExtendedStart =
1382 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1383 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1395 const SCEV *OverflowLimit =
1396 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1398 if (OverflowLimit &&
1406template <
typename ExtendOpTy>
1410 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1412 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE,
Depth);
1418 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1453template <
typename ExtendOpTy>
1454bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1457 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1463 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1469 for (
unsigned Delta : {-2, -1, 1, 2}) {
1474 ID.AddPointer(PreStart);
1475 ID.AddPointer(Step);
1483 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1486 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1487 DeltaS, &Pred,
this);
1505 const unsigned BitWidth =
C.getBitWidth();
1523 const APInt &ConstantStart,
1542 auto &UserIDs = FoldCacheUser[
I.first->second];
1543 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1544 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1545 if (UserIDs[
I] ==
ID) {
1550 I.first->second = S;
1552 FoldCacheUser[S].push_back(
ID);
1558 "This is not an extending conversion!");
1560 "This is not a conversion to a SCEVable type!");
1561 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1565 if (
const SCEV *S = FoldCache.lookup(
ID))
1569 if (!isa<SCEVZeroExtendExpr>(S))
1577 "This is not an extending conversion!");
1579 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1596 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1600 UniqueSCEVs.InsertNode(S, IP);
1609 const SCEV *
X = ST->getOperand();
1623 if (AR->isAffine()) {
1624 const SCEV *Start = AR->getStart();
1625 const SCEV *Step = AR->getStepRecurrence(*
this);
1627 const Loop *L = AR->getLoop();
1631 if (AR->hasNoUnsignedWrap()) {
1633 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1647 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1652 const SCEV *CastedMaxBECount =
1656 if (MaxBECount == RecastedMaxBECount) {
1666 const SCEV *WideMaxBECount =
1668 const SCEV *OperandExtendedAdd =
1674 if (ZAdd == OperandExtendedAdd) {
1678 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1685 OperandExtendedAdd =
1691 if (ZAdd == OperandExtendedAdd) {
1696 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1711 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1714 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1716 if (AR->hasNoUnsignedWrap()) {
1722 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1739 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1750 if (
const auto *SC = dyn_cast<SCEVConstant>(Start)) {
1751 const APInt &
C = SC->getAPInt();
1755 const SCEV *SResidual =
1764 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1767 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1783 if (
auto *Div = dyn_cast<SCEVUDivExpr>(
Op))
1787 if (
auto *SA = dyn_cast<SCEVAddExpr>(
Op)) {
1789 if (SA->hasNoUnsignedWrap()) {
1793 for (
const auto *
Op : SA->operands())
1806 if (
const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1810 const SCEV *SResidual =
1820 if (
auto *SM = dyn_cast<SCEVMulExpr>(
Op)) {
1822 if (SM->hasNoUnsignedWrap()) {
1826 for (
const auto *
Op : SM->operands())
1843 if (SM->getNumOperands() == 2)
1844 if (
auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1845 if (MulLHS->getAPInt().isPowerOf2())
1846 if (
auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1848 MulLHS->getAPInt().logBase2();
1860 if (isa<SCEVUMinExpr>(
Op) || isa<SCEVUMaxExpr>(
Op)) {
1861 auto *
MinMax = cast<SCEVMinMaxExpr>(
Op);
1863 for (
auto *Operand :
MinMax->operands())
1865 if (isa<SCEVUMinExpr>(
MinMax))
1871 if (
auto *
MinMax = dyn_cast<SCEVSequentialMinMaxExpr>(
Op)) {
1872 assert(isa<SCEVSequentialUMinExpr>(
MinMax) &&
"Not supported!");
1874 for (
auto *Operand :
MinMax->operands())
1881 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1884 UniqueSCEVs.InsertNode(S, IP);
1892 "This is not an extending conversion!");
1894 "This is not a conversion to a SCEVable type!");
1895 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1899 if (
const SCEV *S = FoldCache.lookup(
ID))
1903 if (!isa<SCEVSignExtendExpr>(S))
1911 "This is not an extending conversion!");
1913 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1935 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1940 UniqueSCEVs.InsertNode(S, IP);
1949 const SCEV *
X = ST->getOperand();
1958 if (
auto *SA = dyn_cast<SCEVAddExpr>(
Op)) {
1960 if (SA->hasNoSignedWrap()) {
1964 for (
const auto *
Op : SA->operands())
1978 if (
const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1982 const SCEV *SResidual =
1996 if (AR->isAffine()) {
1997 const SCEV *Start = AR->getStart();
1998 const SCEV *Step = AR->getStepRecurrence(*
this);
2000 const Loop *L = AR->getLoop();
2004 if (AR->hasNoSignedWrap()) {
2006 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2020 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
2026 const SCEV *CastedMaxBECount =
2030 if (MaxBECount == RecastedMaxBECount) {
2040 const SCEV *WideMaxBECount =
2042 const SCEV *OperandExtendedAdd =
2048 if (SAdd == OperandExtendedAdd) {
2052 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2059 OperandExtendedAdd =
2065 if (SAdd == OperandExtendedAdd) {
2077 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2085 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2087 if (AR->hasNoSignedWrap()) {
2093 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2101 if (
const auto *SC = dyn_cast<SCEVConstant>(Start)) {
2102 const APInt &
C = SC->getAPInt();
2106 const SCEV *SResidual =
2115 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2118 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2131 if (isa<SCEVSMinExpr>(
Op) || isa<SCEVSMaxExpr>(
Op)) {
2132 auto *
MinMax = cast<SCEVMinMaxExpr>(
Op);
2134 for (
auto *Operand :
MinMax->operands())
2136 if (isa<SCEVSMinExpr>(
MinMax))
2143 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2146 UniqueSCEVs.InsertNode(S, IP);
2172 "This is not an extending conversion!");
2174 "This is not a conversion to a SCEVable type!");
2179 if (SC->getAPInt().isNegative())
2184 const SCEV *NewOp =
T->getOperand();
2192 if (!isa<SCEVZeroExtendExpr>(ZExt))
2197 if (!isa<SCEVSignExtendExpr>(SExt))
2203 for (
const SCEV *
Op : AR->operands())
2209 if (isa<SCEVSMaxExpr>(
Op))
2242 APInt &AccumulatedConstant,
2245 bool Interesting =
false;
2249 while (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(Ops[i])) {
2252 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2254 AccumulatedConstant += Scale *
C->getAPInt();
2259 for (; i != Ops.
size(); ++i) {
2261 if (
Mul && isa<SCEVConstant>(
Mul->getOperand(0))) {
2263 Scale * cast<SCEVConstant>(
Mul->getOperand(0))->getAPInt();
2264 if (
Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(
Mul->getOperand(1))) {
2269 Add->operands(), NewScale, SE);
2275 auto Pair = M.insert({Key, NewScale});
2279 Pair.first->second += NewScale;
2287 std::pair<DenseMap<const SCEV *, APInt>::iterator,
bool> Pair =
2288 M.insert({Ops[i], Scale});
2292 Pair.first->second += Scale;
2311 case Instruction::Add:
2314 case Instruction::Sub:
2317 case Instruction::Mul:
2327 auto *NarrowTy = cast<IntegerType>(
LHS->
getType());
2331 const SCEV *
A = (this->*Extension)(
2333 const SCEV *LHSB = (this->*Extension)(
LHS, WideTy, 0);
2334 const SCEV *RHSB = (this->*Extension)(
RHS, WideTy, 0);
2341 auto *RHSC = dyn_cast<SCEVConstant>(
RHS);
2345 APInt C = RHSC->getAPInt();
2346 unsigned NumBits =
C.getBitWidth();
2347 if (BinOp == Instruction::Mul) {
2349 if (
C.isZero() ||
C.isOne())
2358 bool IsSub = (BinOp == Instruction::Sub);
2359 bool IsNegativeConst = (
Signed &&
C.isNegative());
2361 bool OverflowDown = IsSub ^ IsNegativeConst;
2363 if (IsNegativeConst) {
2376 APInt Limit = Min + Magnitude;
2382 APInt Limit = Max - Magnitude;
2387std::optional<SCEV::NoWrapFlags>
2392 return std::nullopt;
2401 bool Deduced =
false;
2403 if (OBO->
getOpcode() != Instruction::Add &&
2406 return std::nullopt;
2415 false,
LHS,
RHS, CtxI)) {
2429 return std::nullopt;
2439 using namespace std::placeholders;
2446 assert(CanAnalyze &&
"don't call from other places!");
2453 auto IsKnownNonNegative = [&](
const SCEV *S) {
2463 if (SignOrUnsignWrap != SignOrUnsignMask &&
2465 isa<SCEVConstant>(Ops[0])) {
2470 return Instruction::Add;
2472 return Instruction::Mul;
2478 const APInt &
C = cast<SCEVConstant>(Ops[0])->getAPInt();
2483 Opcode,
C, OBO::NoSignedWrap);
2491 Opcode,
C, OBO::NoUnsignedWrap);
2501 Ops[0]->isZero() && IsKnownNonNegative(Ops[1]))
2507 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0]))
2508 if (UDiv->getOperand(1) == Ops[1])
2510 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1]))
2511 if (UDiv->getOperand(1) == Ops[0])
2527 "only nuw or nsw allowed");
2529 if (Ops.
size() == 1)
return Ops[0];
2532 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
2534 "SCEVAddExpr operand types don't match!");
2537 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2542 [](
const APInt &C1,
const APInt &C2) {
return C1 + C2; },
2543 [](
const APInt &
C) {
return C.isZero(); },
2544 [](
const APInt &
C) {
return false; });
2548 unsigned Idx = isa<SCEVConstant>(Ops[0]) ? 1 : 0;
2557 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2562 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2563 Add->setNoWrapFlags(ComputeFlags(Ops));
2570 Type *Ty = Ops[0]->getType();
2571 bool FoundMatch =
false;
2572 for (
unsigned i = 0, e = Ops.
size(); i != e-1; ++i)
2573 if (Ops[i] == Ops[i+1]) {
2576 while (i+Count != e && Ops[i+Count] == Ops[i])
2581 if (Ops.
size() == Count)
2585 --i; e -= Count - 1;
2595 auto FindTruncSrcType = [&]() ->
Type * {
2600 if (
auto *
T = dyn_cast<SCEVTruncateExpr>(Ops[
Idx]))
2601 return T->getOperand()->getType();
2602 if (
const auto *
Mul = dyn_cast<SCEVMulExpr>(Ops[
Idx])) {
2603 const auto *LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2604 if (
const auto *
T = dyn_cast<SCEVTruncateExpr>(LastOp))
2605 return T->getOperand()->getType();
2609 if (
auto *SrcType = FindTruncSrcType()) {
2614 for (
const SCEV *
Op : Ops) {
2616 if (
T->getOperand()->getType() != SrcType) {
2623 }
else if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(
Op)) {
2625 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2627 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2628 if (
T->getOperand()->getType() != SrcType) {
2633 }
else if (
const auto *
C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2651 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2656 if (Ops.
size() == 2) {
2660 const SCEV *
A = Ops[0];
2661 const SCEV *
B = Ops[1];
2662 auto *AddExpr = dyn_cast<SCEVAddExpr>(
B);
2663 auto *
C = dyn_cast<SCEVConstant>(
A);
2664 if (AddExpr &&
C && isa<SCEVConstant>(AddExpr->getOperand(0))) {
2665 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt();
2666 auto C2 =
C->getAPInt();
2669 APInt ConstAdd = C1 + C2;
2670 auto AddFlags = AddExpr->getNoWrapFlags();
2710 if (Ops.
size() == 2) {
2712 if (
Mul &&
Mul->getNumOperands() == 2 &&
2713 Mul->getOperand(0)->isAllOnesValue()) {
2716 if (matchURem(
Mul->getOperand(1),
X,
Y) &&
X == Ops[1]) {
2728 bool DeletedAdd =
false;
2742 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2758 if (
Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[
Idx])) {
2765 struct APIntCompare {
2774 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2775 for (
const SCEV *NewOp : NewOps)
2776 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2779 if (AccumulatedConstant != 0)
2781 for (
auto &MulOp : MulOpLists) {
2782 if (MulOp.first == 1) {
2784 }
else if (MulOp.first != 0) {
2793 if (Ops.
size() == 1)
2802 for (;
Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[
Idx]); ++
Idx) {
2804 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2805 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2806 if (isa<SCEVConstant>(MulOpSCEV))
2808 for (
unsigned AddOp = 0, e = Ops.
size(); AddOp != e; ++AddOp)
2809 if (MulOpSCEV == Ops[AddOp]) {
2811 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2812 if (
Mul->getNumOperands() != 2) {
2816 Mul->operands().take_front(MulOp));
2824 if (Ops.
size() == 2)
return OuterMul;
2837 for (
unsigned OtherMulIdx =
Idx+1;
2838 OtherMulIdx < Ops.
size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2840 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2844 OMulOp != e; ++OMulOp)
2845 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2847 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2848 if (
Mul->getNumOperands() != 2) {
2850 Mul->operands().take_front(MulOp));
2857 OtherMul->
operands().take_front(OMulOp));
2862 const SCEV *InnerMulSum =
2866 if (Ops.
size() == 2)
return OuterMul;
2883 for (;
Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[
Idx]); ++
Idx) {
2889 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
2897 if (!LIOps.
empty()) {
2922 auto *DefI = getDefiningScopeBound(LIOps);
2924 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
2936 if (Ops.
size() == 1)
return NewRec;
2939 for (
unsigned i = 0;; ++i)
2940 if (Ops[i] == AddRec) {
2950 for (
unsigned OtherIdx =
Idx+1;
2951 OtherIdx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2956 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2958 "AddRecExprs are not sorted in reverse dominance order?");
2959 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2962 for (; OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2964 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2965 if (OtherAddRec->getLoop() == AddRecLoop) {
2966 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
2968 if (i >= AddRecOps.
size()) {
2969 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
2973 AddRecOps[i], OtherAddRec->getOperand(i)};
2976 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
2991 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2999 for (
const SCEV *
Op : Ops)
3003 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3007 S =
new (SCEVAllocator)
3009 UniqueSCEVs.InsertNode(S, IP);
3021 for (
const SCEV *
Op : Ops)
3030 S =
new (SCEVAllocator)
3032 UniqueSCEVs.InsertNode(S, IP);
3033 LoopUsers[
L].push_back(S);
3045 for (
const SCEV *
Op : Ops)
3049 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3053 S =
new (SCEVAllocator)
SCEVMulExpr(
ID.Intern(SCEVAllocator),
3055 UniqueSCEVs.InsertNode(S, IP);
3064 if (j > 1 && k / j != i) Overflow =
true;
3080 if (n == 0 || n == k)
return 1;
3081 if (k > n)
return 0;
3087 for (
uint64_t i = 1; i <= k; ++i) {
3088 r =
umul_ov(r, n-(i-1), Overflow);
3097 struct FindConstantInAddMulChain {
3098 bool FoundConstant =
false;
3100 bool follow(
const SCEV *S) {
3101 FoundConstant |= isa<SCEVConstant>(S);
3102 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
3105 bool isDone()
const {
3106 return FoundConstant;
3110 FindConstantInAddMulChain
F;
3112 ST.visitAll(StartExpr);
3113 return F.FoundConstant;
3121 "only nuw or nsw allowed");
3123 if (Ops.
size() == 1)
return Ops[0];
3125 Type *ETy = Ops[0]->getType();
3127 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
3129 "SCEVMulExpr operand types don't match!");
3134 [](
const APInt &C1,
const APInt &C2) {
return C1 * C2; },
3135 [](
const APInt &
C) {
return C.isOne(); },
3136 [](
const APInt &
C) {
return C.isZero(); });
3147 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3152 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3153 Mul->setNoWrapFlags(ComputeFlags(Ops));
3157 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3158 if (Ops.
size() == 2) {
3175 if (Ops[0]->isAllOnesValue()) {
3180 bool AnyFolded =
false;
3181 for (
const SCEV *AddOp :
Add->operands()) {
3184 if (!isa<SCEVMulExpr>(
Mul)) AnyFolded =
true;
3189 }
else if (
const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
3208 AddRec->getNoWrapFlags(FlagsMask));
3217 if (isa<SCEVConstant>(InnerAdd->
getOperand(0)) &&
3236 bool DeletedMul =
false;
3261 for (;
Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[
Idx]); ++
Idx) {
3266 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
3274 if (!LIOps.
empty()) {
3287 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
3303 if (Ops.
size() == 1)
return NewRec;
3306 for (
unsigned i = 0;; ++i)
3307 if (Ops[i] == AddRec) {
3328 bool OpsModified =
false;
3329 for (
unsigned OtherIdx =
Idx+1;
3330 OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3333 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3343 bool Overflow =
false;
3349 SmallVector <const SCEV *, 7> SumOps;
3350 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3354 z < ze && !Overflow; ++z) {
3357 if (LargerThan64Bits)
3358 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3360 Coeff = Coeff1*Coeff2;
3375 if (Ops.
size() == 2)
return NewAddRec;
3376 Ops[
Idx] = NewAddRec;
3377 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
3379 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3393 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3401 "SCEVURemExpr operand types don't match!");
3406 if (RHSC->getValue()->isOne())
3410 if (RHSC->getAPInt().isPowerOf2()) {
3429 "SCEVUDivExpr operand can't be pointer!");
3431 "SCEVUDivExpr operand types don't match!");
3438 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3446 if (RHSC->getValue()->isOne())
3451 if (!RHSC->getValue()->isZero()) {
3456 unsigned LZ = RHSC->getAPInt().countl_zero();
3460 if (!RHSC->getAPInt().isPowerOf2())
3466 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*
this))) {
3468 const APInt &StepInt = Step->getAPInt();
3469 const APInt &DivInt = RHSC->getAPInt();
3470 if (!StepInt.
urem(DivInt) &&
3476 for (
const SCEV *
Op : AR->operands())
3483 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
3484 if (StartC && !DivInt.
urem(StepInt) &&
3490 const APInt &StartRem = StartInt.
urem(StepInt);
3491 if (StartRem != 0) {
3492 const SCEV *NewLHS =
3495 if (
LHS != NewLHS) {
3505 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3514 for (
const SCEV *
Op : M->operands())
3518 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3519 const SCEV *
Op = M->getOperand(i);
3521 if (!isa<SCEVUDivExpr>(Div) &&
getMulExpr(Div, RHSC) ==
Op) {
3531 if (
auto *DivisorConstant =
3532 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3533 bool Overflow =
false;
3535 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3546 for (
const SCEV *
Op :
A->operands())
3550 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3552 if (isa<SCEVUDivExpr>(
Op) ||
3557 if (
Operands.size() ==
A->getNumOperands())
3564 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3569 if (
const auto *AE = dyn_cast<SCEVAddExpr>(
LHS);
3570 AE && AE->getNumOperands() == 2) {
3571 if (
const auto *VC = dyn_cast<SCEVConstant>(AE->getOperand(0))) {
3572 const APInt &NegC = VC->getAPInt();
3574 const auto *MME = dyn_cast<SCEVSMaxExpr>(AE->getOperand(1));
3575 if (MME && MME->getNumOperands() == 2 &&
3576 isa<SCEVConstant>(MME->getOperand(0)) &&
3577 cast<SCEVConstant>(MME->getOperand(0))->getAPInt() == -NegC &&
3578 MME->getOperand(1) ==
RHS)
3587 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3590 UniqueSCEVs.InsertNode(S, IP);
3620 if (!
Mul || !
Mul->hasNoUnsignedWrap())
3626 if (
const auto *LHSCst = dyn_cast<SCEVConstant>(
Mul->getOperand(0))) {
3627 if (LHSCst == RHSCst) {
3635 APInt Factor =
gcd(LHSCst, RHSCst);
3638 cast<SCEVConstant>(
getConstant(LHSCst->getAPInt().udiv(Factor)));
3640 cast<SCEVConstant>(
getConstant(RHSCst->getAPInt().udiv(Factor)));
3646 Mul = dyn_cast<SCEVMulExpr>(
LHS);
3653 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3654 if (
Mul->getOperand(i) ==
RHS) {
3672 if (
const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3673 if (StepChrec->getLoop() == L) {
3692 "SCEVAddRecExpr operand types don't match!");
3693 assert(!
Op->getType()->isPointerTy() &&
"Step must be integer");
3697 "SCEVAddRecExpr operand is not available at loop entry!");
3715 const Loop *NestedLoop = NestedAR->getLoop();
3716 if (L->contains(NestedLoop)
3721 Operands[0] = NestedAR->getStart();
3725 bool AllInvariant =
all_of(
3737 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3748 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3758 return getOrCreateAddRecExpr(
Operands, L, Flags);
3775 auto *GEPI = dyn_cast<Instruction>(
GEP);
3776 if (!GEPI || !isSCEVExprNeverPoison(GEPI))
3787 bool FirstIter =
true;
3789 for (
const SCEV *IndexExpr : IndexExprs) {
3791 if (
StructType *STy = dyn_cast<StructType>(CurTy)) {
3793 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
3794 unsigned FieldNo = Index->getZExtValue();
3796 Offsets.push_back(FieldOffset);
3799 CurTy = STy->getTypeAtIndex(Index);
3803 assert(isa<PointerType>(CurTy) &&
3804 "The first index of a GEP indexes a pointer");
3805 CurTy =
GEP->getSourceElementType();
3816 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3817 Offsets.push_back(LocalOffset);
3822 if (Offsets.empty())
3835 "GEP should not change type mid-flight.");
3839SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3842 ID.AddInteger(SCEVType);
3843 for (
const SCEV *
Op : Ops)
3846 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3856 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
3857 assert(!Ops.
empty() &&
"Cannot get empty (u|s)(min|max)!");
3858 if (Ops.
size() == 1)
return Ops[0];
3861 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
3863 "Operand types don't match!");
3866 "min/max should be consistently pointerish");
3892 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3894 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3899 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3901 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3907 if (
const SCEV *S = findExistingSCEVInCache(Kind, Ops)) {
3913 while (
Idx < Ops.
size() && Ops[
Idx]->getSCEVType() < Kind)
3919 bool DeletedAny =
false;
3920 while (Ops[
Idx]->getSCEVType() == Kind) {
3940 for (
unsigned i = 0, e = Ops.
size() - 1; i != e; ++i) {
3941 if (Ops[i] == Ops[i + 1] ||
3942 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
3948 }
else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
3957 if (Ops.
size() == 1)
return Ops[0];
3959 assert(!Ops.
empty() &&
"Reduced smax down to nothing!");
3964 ID.AddInteger(Kind);
3965 for (
const SCEV *
Op : Ops)
3968 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3970 return ExistingSCEV;
3973 SCEV *S =
new (SCEVAllocator)
3976 UniqueSCEVs.InsertNode(S, IP);
3983class SCEVSequentialMinMaxDeduplicatingVisitor final
3984 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
3985 std::optional<const SCEV *>> {
3986 using RetVal = std::optional<const SCEV *>;
3994 bool canRecurseInto(
SCEVTypes Kind)
const {
3997 return RootKind == Kind || NonSequentialRootKind == Kind;
4000 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
4001 assert((isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) &&
4002 "Only for min/max expressions.");
4005 if (!canRecurseInto(Kind))
4008 auto *NAry = cast<SCEVNAryExpr>(S);
4010 bool Changed =
visit(Kind, NAry->operands(), NewOps);
4015 return std::nullopt;
4017 return isa<SCEVSequentialMinMaxExpr>(S)
4024 if (!SeenOps.
insert(S).second)
4025 return std::nullopt;
4026 return Base::visit(S);
4032 : SE(SE), RootKind(RootKind),
4033 NonSequentialRootKind(
4039 bool Changed =
false;
4043 for (
const SCEV *
Op : OrigOps) {
4052 NewOps = std::move(Ops);
4058 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4068 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4070 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4072 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4074 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4077 return visitAnyMinMaxExpr(Expr);
4081 return visitAnyMinMaxExpr(Expr);
4085 return visitAnyMinMaxExpr(Expr);
4089 return visitAnyMinMaxExpr(Expr);
4093 return visitAnyMinMaxExpr(Expr);
4096 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4140struct SCEVPoisonCollector {
4141 bool LookThroughMaybePoisonBlocking;
4143 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4144 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4146 bool follow(
const SCEV *S) {
4147 if (!LookThroughMaybePoisonBlocking &&
4151 if (
auto *SU = dyn_cast<SCEVUnknown>(S)) {
4157 bool isDone()
const {
return false; }
4167 SCEVPoisonCollector PC1(
true);
4172 if (PC1.MaybePoison.empty())
4178 SCEVPoisonCollector PC2(
false);
4188 SCEVPoisonCollector PC(
false);
4211 while (!Worklist.
empty()) {
4213 if (!Visited.
insert(V).second)
4217 if (Visited.
size() > 16)
4222 if (PoisonVals.
contains(V) || ::isGuaranteedNotToBePoison(V))
4225 auto *
I = dyn_cast<Instruction>(V);
4232 if (
auto *PDI = dyn_cast<PossiblyDisjointInst>(
I))
4233 if (PDI->isDisjoint())
4239 if (
auto *
II = dyn_cast<IntrinsicInst>(
I);
4240 II &&
II->getIntrinsicID() == Intrinsic::vscale)
4247 if (
I->hasPoisonGeneratingAnnotations())
4258 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4259 "Not a SCEVSequentialMinMaxExpr!");
4260 assert(!Ops.
empty() &&
"Cannot get empty (u|s)(min|max)!");
4261 if (Ops.
size() == 1)
4265 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
4267 "Operand types don't match!");
4270 "min/max should be consistently pointerish");
4278 if (
const SCEV *S = findExistingSCEVInCache(Kind, Ops))
4285 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4286 bool Changed = Deduplicator.visit(Kind, Ops, Ops);
4295 bool DeletedAny =
false;
4297 if (Ops[
Idx]->getSCEVType() != Kind) {
4301 const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[
Idx]);
4304 SMME->operands().end());
4312 const SCEV *SaturationPoint;
4323 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
4324 if (!isGuaranteedNotToCauseUB(Ops[i]))
4341 if (isKnownViaNonRecursiveReasoning(Pred, Ops[i - 1], Ops[i])) {
4350 ID.AddInteger(Kind);
4351 for (
const SCEV *
Op : Ops)
4354 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4356 return ExistingSCEV;
4360 SCEV *S =
new (SCEVAllocator)
4363 UniqueSCEVs.InsertNode(S, IP);
4411 if (
Size.isScalable())
4432 "Cannot get offset for structure containing scalable vector types");
4446 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4447 assert(cast<SCEVUnknown>(S)->getValue() == V &&
4448 "Stale SCEVUnknown in uniquing map!");
4453 FirstUnknown = cast<SCEVUnknown>(S);
4454 UniqueSCEVs.InsertNode(S, IP);
4502 bool PreciseA, PreciseB;
4503 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);
4504 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4505 if (!PreciseA || !PreciseB)
4508 return (ScopeA == ScopeB) || DT.
dominates(ScopeA, ScopeB) ||
4513 return CouldNotCompute.get();
4516bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
4518 auto *SU = dyn_cast<SCEVUnknown>(S);
4519 return SU && SU->getValue() ==
nullptr;
4522 return !ContainsNulls;
4527 if (
I != HasRecMap.
end())
4532 HasRecMap.
insert({S, FoundAddRec});
4540 if (SI == ExprValueMap.
end())
4542 return SI->second.getArrayRef();
4548void ScalarEvolution::eraseValueFromMap(
Value *V) {
4550 if (
I != ValueExprMap.
end()) {
4551 auto EVIt = ExprValueMap.
find(
I->second);
4552 bool Removed = EVIt->second.remove(V);
4554 assert(Removed &&
"Value not in ExprValueMap?");
4559void ScalarEvolution::insertValueToMap(
Value *V,
const SCEV *S) {
4563 auto It = ValueExprMap.
find_as(V);
4564 if (It == ValueExprMap.
end()) {
4566 ExprValueMap[S].
insert(V);
4577 return createSCEVIter(V);
4584 if (
I != ValueExprMap.
end()) {
4585 const SCEV *S =
I->second;
4586 assert(checkValidity(S) &&
4587 "existing SCEV has not been properly invalidated");
4596 if (
const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4600 Type *Ty = V->getType();
4608 if (!
Add ||
Add->getNumOperands() != 2 ||
4609 !
Add->getOperand(0)->isAllOnesValue())
4612 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(
Add->getOperand(1));
4622 assert(!V->getType()->isPointerTy() &&
"Can't negate pointer");
4624 if (
const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4635 return (
const SCEV *)
nullptr;
4641 if (
const SCEV *Replaced = MatchMinMaxNegation(MME))
4645 Type *Ty = V->getType();
4651 assert(
P->getType()->isPointerTy());
4653 if (
auto *AddRec = dyn_cast<SCEVAddRecExpr>(
P)) {
4661 if (
auto *
Add = dyn_cast<SCEVAddExpr>(
P)) {
4664 const SCEV **PtrOp =
nullptr;
4665 for (
const SCEV *&AddOp : Ops) {
4666 if (AddOp->getType()->isPointerTy()) {
4667 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4701 const bool RHSIsNotMinSigned =
4732 Type *SrcTy = V->getType();
4734 "Cannot truncate or zero extend with non-integer arguments!");
4744 Type *SrcTy = V->getType();
4746 "Cannot truncate or zero extend with non-integer arguments!");
4756 Type *SrcTy = V->getType();
4758 "Cannot noop or zero extend with non-integer arguments!");
4760 "getNoopOrZeroExtend cannot truncate!");
4768 Type *SrcTy = V->getType();
4770 "Cannot noop or sign extend with non-integer arguments!");
4772 "getNoopOrSignExtend cannot truncate!");
4780 Type *SrcTy = V->getType();
4782 "Cannot noop or any extend with non-integer arguments!");
4784 "getNoopOrAnyExtend cannot truncate!");
4792 Type *SrcTy = V->getType();
4794 "Cannot truncate or noop with non-integer arguments!");
4796 "getTruncateOrNoop cannot extend!");
4804 const SCEV *PromotedLHS =
LHS;
4805 const SCEV *PromotedRHS =
RHS;
4825 assert(!Ops.
empty() &&
"At least one operand must be!");
4827 if (Ops.
size() == 1)
4831 Type *MaxType =
nullptr;
4832 for (
const auto *S : Ops)
4837 assert(MaxType &&
"Failed to find maximum type!");
4841 for (
const auto *S : Ops)
4850 if (!V->getType()->isPointerTy())
4854 if (
auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
4855 V = AddRec->getStart();
4856 }
else if (
auto *
Add = dyn_cast<SCEVAddExpr>(V)) {
4857 const SCEV *PtrOp =
nullptr;
4858 for (
const SCEV *AddOp :
Add->operands()) {
4859 if (AddOp->getType()->isPointerTy()) {
4860 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4864 assert(PtrOp &&
"Must have pointer op");
4876 for (
User *U :
I->users()) {
4877 auto *UserInsn = cast<Instruction>(U);
4878 if (Visited.
insert(UserInsn).second)
4893 bool IgnoreOtherLoops =
true) {
4896 if (
Rewriter.hasSeenLoopVariantSCEVUnknown())
4898 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4905 SeenLoopVariantSCEVUnknown =
true;
4913 SeenOtherLoops =
true;
4917 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4919 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4926 bool SeenLoopVariantSCEVUnknown =
false;
4927 bool SeenOtherLoops =
false;
4937 SCEVPostIncRewriter
Rewriter(L, SE);
4939 return Rewriter.hasSeenLoopVariantSCEVUnknown()
4946 SeenLoopVariantSCEVUnknown =
true;
4954 SeenOtherLoops =
true;
4958 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4960 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4967 bool SeenLoopVariantSCEVUnknown =
false;
4968 bool SeenOtherLoops =
false;
4974class SCEVBackedgeConditionFolder
4977 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
4979 bool IsPosBECond =
false;
4980 Value *BECond =
nullptr;
4982 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator());
4985 "Both outgoing branches should not target same header!");
4992 SCEVBackedgeConditionFolder
Rewriter(L, BECond, IsPosBECond, SE);
5002 switch (
I->getOpcode()) {
5003 case Instruction::Select: {
5005 std::optional<const SCEV *> Res =
5006 compareWithBackedgeCondition(
SI->getCondition());
5008 bool IsOne = cast<SCEVConstant>(*Res)->getValue()->isOne();
5014 std::optional<const SCEV *> Res = compareWithBackedgeCondition(
I);
5025 explicit SCEVBackedgeConditionFolder(
const Loop *L,
Value *BECond,
5028 IsPositiveBECond(IsPosBECond) {}
5030 std::optional<const SCEV *> compareWithBackedgeCondition(
Value *IC);
5034 Value *BackedgeCond =
nullptr;
5036 bool IsPositiveBECond;
5039std::optional<const SCEV *>
5040SCEVBackedgeConditionFolder::compareWithBackedgeCondition(
Value *IC) {
5045 if (BackedgeCond == IC)
5048 return std::nullopt;
5053 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5074 bool isValid() {
return Valid; }
5087ScalarEvolution::proveNoWrapViaConstantRanges(
const SCEVAddRecExpr *AR) {
5097 if (
const SCEVConstant *BECountMax = dyn_cast<SCEVConstant>(BECount)) {
5099 const APInt &BECountAP = BECountMax->getAPInt();
5100 unsigned NoOverflowBitWidth =
5112 Instruction::Add, IncRange, OBO::NoSignedWrap);
5113 if (NSWRegion.contains(AddRecRange))
5122 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
5123 if (NUWRegion.contains(AddRecRange))
5131ScalarEvolution::proveNoSignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5141 if (!SignedWrapViaInductionTried.insert(AR).second)
5165 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
5174 const SCEV *OverflowLimit =
5176 if (OverflowLimit &&
5184ScalarEvolution::proveNoUnsignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5194 if (!UnsignedWrapViaInductionTried.insert(AR).second)
5219 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
5258 if (
auto *OBO = dyn_cast<OverflowingBinaryOperator>(
Op)) {
5259 IsNSW = OBO->hasNoSignedWrap();
5260 IsNUW = OBO->hasNoUnsignedWrap();
5264 explicit BinaryOp(
unsigned Opcode,
Value *
LHS,
Value *
RHS,
bool IsNSW =
false,
5266 : Opcode(Opcode),
LHS(
LHS),
RHS(
RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
5276 auto *
Op = dyn_cast<Operator>(V);
5278 return std::nullopt;
5284 switch (
Op->getOpcode()) {
5285 case Instruction::Add:
5286 case Instruction::Sub:
5287 case Instruction::Mul:
5288 case Instruction::UDiv:
5289 case Instruction::URem:
5290 case Instruction::And:
5291 case Instruction::AShr:
5292 case Instruction::Shl:
5293 return BinaryOp(
Op);
5295 case Instruction::Or: {
5297 if (cast<PossiblyDisjointInst>(
Op)->isDisjoint())
5298 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1),
5300 return BinaryOp(
Op);
5303 case Instruction::Xor:
5304 if (
auto *RHSC = dyn_cast<ConstantInt>(
Op->getOperand(1)))
5307 if (RHSC->getValue().isSignMask())
5308 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5310 if (V->getType()->isIntegerTy(1))
5311 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5312 return BinaryOp(
Op);
5314 case Instruction::LShr:
5316 if (
ConstantInt *SA = dyn_cast<ConstantInt>(
Op->getOperand(1))) {
5323 if (SA->getValue().ult(
BitWidth)) {
5325 ConstantInt::get(SA->getContext(),
5327 return BinaryOp(Instruction::UDiv,
Op->getOperand(0),
X);
5330 return BinaryOp(
Op);
5332 case Instruction::ExtractValue: {
5333 auto *EVI = cast<ExtractValueInst>(
Op);
5334 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
5337 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand());
5342 bool Signed = WO->isSigned();
5345 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
5350 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
5360 if (
auto *
II = dyn_cast<IntrinsicInst>(V))
5361 if (
II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
5362 return BinaryOp(Instruction::Sub,
II->getOperand(0),
II->getOperand(1));
5364 return std::nullopt;
5390 if (
Op == SymbolicPHI)
5395 if (SourceBits != NewBits)
5403 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->
getOperand())
5404 : dyn_cast<SCEVTruncateExpr>(ZExt->
getOperand());
5408 if (
X != SymbolicPHI)
5410 Signed = SExt !=
nullptr;
5418 if (!L || L->getHeader() != PN->
getParent())
5476std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5477ScalarEvolution::createAddRecFromPHIWithCastsImpl(
const SCEVUnknown *SymbolicPHI) {
5483 auto *PN = cast<PHINode>(SymbolicPHI->
getValue());
5485 assert(L &&
"Expecting an integer loop header phi");
5490 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5491 for (
unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5492 Value *
V = PN->getIncomingValue(i);
5493 if (
L->contains(PN->getIncomingBlock(i))) {
5496 }
else if (BEValueV != V) {
5500 }
else if (!StartValueV) {
5502 }
else if (StartValueV != V) {
5503 StartValueV =
nullptr;
5507 if (!BEValueV || !StartValueV)
5508 return std::nullopt;
5515 const auto *
Add = dyn_cast<SCEVAddExpr>(BEValue);
5517 return std::nullopt;
5521 unsigned FoundIndex =
Add->getNumOperands();
5522 Type *TruncTy =
nullptr;
5524 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5527 if (FoundIndex == e) {
5532 if (FoundIndex ==
Add->getNumOperands())
5533 return std::nullopt;
5537 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5538 if (i != FoundIndex)
5545 return std::nullopt;
5599 const SCEV *PHISCEV =
5609 if (
const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
5626 auto getExtendedExpr = [&](
const SCEV *Expr,
5627 bool CreateSignExtend) ->
const SCEV * {
5630 const SCEV *ExtendedExpr =
5633 return ExtendedExpr;
5641 auto PredIsKnownFalse = [&](
const SCEV *Expr,
5642 const SCEV *ExtendedExpr) ->
bool {
5643 return Expr != ExtendedExpr &&
5647 const SCEV *StartExtended = getExtendedExpr(StartVal,
Signed);
5648 if (PredIsKnownFalse(StartVal, StartExtended)) {
5650 return std::nullopt;
5655 const SCEV *AccumExtended = getExtendedExpr(Accum,
true);
5656 if (PredIsKnownFalse(Accum, AccumExtended)) {
5658 return std::nullopt;
5661 auto AppendPredicate = [&](
const SCEV *Expr,
5662 const SCEV *ExtendedExpr) ->
void {
5663 if (Expr != ExtendedExpr &&
5671 AppendPredicate(StartVal, StartExtended);
5672 AppendPredicate(Accum, AccumExtended);
5680 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5681 std::make_pair(NewAR, Predicates);
5683 PredicatedSCEVRewrites[{SymbolicPHI,
L}] = PredRewrite;
5687std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5689 auto *PN = cast<PHINode>(SymbolicPHI->
getValue());
5692 return std::nullopt;
5695 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5696 if (
I != PredicatedSCEVRewrites.end()) {
5697 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5700 if (Rewrite.first == SymbolicPHI)
5701 return std::nullopt;
5704 assert(isa<SCEVAddRecExpr>(Rewrite.first) &&
"Expected an AddRec");
5705 assert(!(Rewrite.second).empty() &&
"Expected to find Predicates");
5709 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5710 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5715 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5716 return std::nullopt;
5733 auto areExprsEqual = [&](
const SCEV *Expr1,
const SCEV *Expr2) ->
bool {
5734 if (Expr1 != Expr2 &&
5753const SCEV *ScalarEvolution::createSimpleAffineAddRec(
PHINode *PN,
5755 Value *StartValueV) {
5758 assert(BEValueV && StartValueV);
5764 if (BO->Opcode != Instruction::Add)
5767 const SCEV *Accum =
nullptr;
5768 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5770 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5784 insertValueToMap(PN, PHISCEV);
5786 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
5789 proveNoWrapViaConstantRanges(AR)));
5795 if (
auto *BEInst = dyn_cast<Instruction>(BEValueV)) {
5797 "Accum is defined outside L, but is not invariant?");
5798 if (isAddRecNeverPoison(BEInst, L))
5805const SCEV *ScalarEvolution::createAddRecFromPHI(
PHINode *PN) {
5813 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5819 }
else if (BEValueV != V) {
5823 }
else if (!StartValueV) {
5825 }
else if (StartValueV != V) {
5826 StartValueV =
nullptr;
5830 if (!BEValueV || !StartValueV)
5834 "PHI node already processed?");
5838 if (
auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5843 insertValueToMap(PN, SymbolicName);
5857 unsigned FoundIndex =
Add->getNumOperands();
5858 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5859 if (
Add->getOperand(i) == SymbolicName)
5860 if (FoundIndex == e) {
5865 if (FoundIndex !=
Add->getNumOperands()) {
5868 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5869 if (i != FoundIndex)
5870 Ops.
push_back(SCEVBackedgeConditionFolder::rewrite(
Add->getOperand(i),
5877 (isa<SCEVAddRecExpr>(Accum) &&
5878 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
5882 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
5889 if (
GEP->getOperand(0) == PN) {
5914 forgetMemoizedResults(SymbolicName);
5915 insertValueToMap(PN, PHISCEV);
5917 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
5920 proveNoWrapViaConstantRanges(AR)));
5926 if (
auto *BEInst = dyn_cast<Instruction>(BEValueV))
5945 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *
this);
5946 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *
this,
false);
5948 isGuaranteedNotToCauseUB(Shifted) &&
::impliesPoison(Shifted, Start)) {
5950 if (Start == StartVal) {
5954 forgetMemoizedResults(SymbolicName);
5955 insertValueToMap(PN, Shifted);
5965 eraseValueFromMap(PN);
5985 Use &LeftUse =
Merge->getOperandUse(0);
5986 Use &RightUse =
Merge->getOperandUse(1);
6003const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(
PHINode *PN) {
6020 assert(IDom &&
"At least the entry block should dominate PN");
6029 return createNodeForSelectOrPHI(PN,
Cond, LHS, RHS);
6045ScalarEvolution::createNodeForPHIWithIdenticalOperands(
PHINode *PN) {
6049 auto *IncomingInst = dyn_cast<BinaryOperator>(
Incoming);
6057 CommonInst = IncomingInst;
6065 bool SCEVExprsIdentical =
6067 [
this, CommonSCEV](
Value *V) { return CommonSCEV == getSCEV(V); });
6068 return SCEVExprsIdentical ? CommonSCEV :
nullptr;
6071const SCEV *ScalarEvolution::createNodeForPHI(
PHINode *PN) {
6072 if (
const SCEV *S = createAddRecFromPHI(PN))
6082 if (
const SCEV *S = createNodeForPHIWithIdenticalOperands(PN))
6085 if (
const SCEV *S = createNodeFromSelectLikePHI(PN))
6094 struct FindClosure {
6095 const SCEV *OperandToFind;
6101 bool canRecurseInto(
SCEVTypes Kind)
const {
6104 return RootKind == Kind || NonSequentialRootKind == Kind ||
6109 : OperandToFind(OperandToFind), RootKind(RootKind),
6110 NonSequentialRootKind(
6114 bool follow(
const SCEV *S) {
6115 Found = S == OperandToFind;
6117 return !isDone() && canRecurseInto(S->
getSCEVType());
6120 bool isDone()
const {
return Found; }
6123 FindClosure FC(OperandToFind, RootKind);
6128std::optional<const SCEV *>
6129ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
Type *Ty,
6139 switch (ICI->getPredicate()) {
6153 bool Signed = ICI->isSigned();
6162 if (LA == LS &&
RA == RS)
6164 if (LA == RS &&
RA == LS)
6167 auto CoerceOperand = [&](
const SCEV *
Op) ->
const SCEV * {
6168 if (
Op->getType()->isPointerTy()) {
6170 if (isa<SCEVCouldNotCompute>(
Op))
6179 LS = CoerceOperand(LS);
6180 RS = CoerceOperand(RS);
6181 if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS))
6202 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->
isZero()) {
6208 if (isa<SCEVConstant>(
C) && cast<SCEVConstant>(
C)->getAPInt().ule(1))
6215 if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->
isZero() &&
6216 isa<ConstantInt>(TrueVal) && cast<ConstantInt>(TrueVal)->
isZero()) {
6218 while (
auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(
X))
6219 X = ZExt->getOperand();
6232 return std::nullopt;
6235static std::optional<const SCEV *>
6237 const SCEV *TrueExpr,
const SCEV *FalseExpr) {
6241 "Unexpected operands of a select.");
6252 if (!isa<SCEVConstant>(TrueExpr) && !isa<SCEVConstant>(FalseExpr))
6253 return std::nullopt;
6256 if (isa<SCEVConstant>(TrueExpr)) {
6268static std::optional<const SCEV *>
6271 if (!isa<ConstantInt>(TrueVal) && !isa<ConstantInt>(FalseVal))
6272 return std::nullopt;
6275 const auto *SETrue = SE->
getSCEV(TrueVal);
6276 const auto *SEFalse = SE->
getSCEV(FalseVal);
6280const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
6282 assert(
Cond->getType()->isIntegerTy(1) &&
"Select condition is not an i1?");
6284 V->getType() ==
TrueVal->getType() &&
6285 "Types of select hands and of the result must match.");
6288 if (!
V->getType()->isIntegerTy(1))
6291 if (std::optional<const SCEV *> S =
6303 if (
auto *CI = dyn_cast<ConstantInt>(
Cond))
6304 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
6306 if (
auto *
I = dyn_cast<Instruction>(V)) {
6307 if (
auto *ICI = dyn_cast<ICmpInst>(
Cond)) {
6308 if (std::optional<const SCEV *> S =
6309 createNodeForSelectOrPHIInstWithICmpInstCond(
I->getType(), ICI,
6315 return createNodeForSelectOrPHIViaUMinSeq(V,
Cond, TrueVal, FalseVal);
6321 assert(
GEP->getSourceElementType()->isSized() &&
6322 "GEP source element type must be sized");
6325 for (
Value *Index :
GEP->indices())
6330APInt ScalarEvolution::getConstantMultipleImpl(
const SCEV *S) {
6340 for (
unsigned I = 1, E =
N->getNumOperands();
I < E && Res != 1; ++
I)
6348 return cast<SCEVConstant>(S)->getAPInt();
6358 return GetShiftedByZeros(TZ);
6368 return GetShiftedByZeros(TZ);
6372 if (
M->hasNoUnsignedWrap()) {
6375 for (
const SCEV *Operand :
M->operands().drop_front())
6383 for (
const SCEV *Operand :
M->operands())
6385 return GetShiftedByZeros(TZ);
6390 if (
N->hasNoUnsignedWrap())
6391 return GetGCDMultiple(
N);
6394 for (
const SCEV *Operand :
N->operands().drop_front())
6396 return GetShiftedByZeros(TZ);
6403 return GetGCDMultiple(cast<SCEVNAryExpr>(S));
6409 .countMinTrailingZeros();
6410 return GetShiftedByZeros(Known);
6419 auto I = ConstantMultipleCache.find(S);
6420 if (
I != ConstantMultipleCache.end())
6423 APInt Result = getConstantMultipleImpl(S);
6424 auto InsertPair = ConstantMultipleCache.insert({S, Result});
6425 assert(InsertPair.second &&
"Should insert a new key");
6426 return InsertPair.first->second;
6442 if (
MDNode *MD =
I->getMetadata(LLVMContext::MD_range))
6444 if (
const auto *CB = dyn_cast<CallBase>(V))
6445 if (std::optional<ConstantRange>
Range = CB->getRange())
6448 if (
auto *
A = dyn_cast<Argument>(V))
6449 if (std::optional<ConstantRange>
Range =
A->getRange())
6452 return std::nullopt;
6459 UnsignedRanges.erase(AddRec);
6460 SignedRanges.erase(AddRec);
6461 ConstantMultipleCache.erase(AddRec);
6466getRangeForUnknownRecurrence(
const SCEVUnknown *U) {
6480 auto *
P = dyn_cast<PHINode>(U->getValue());
6492 Value *Start, *Step;
6499 assert(L && L->getHeader() ==
P->getParent());
6512 case Instruction::AShr:
6513 case Instruction::LShr:
6514 case Instruction::Shl:
6529 KnownStep.getBitWidth() ==
BitWidth);
6532 auto MaxShiftAmt = KnownStep.getMaxValue();
6534 bool Overflow =
false;
6535 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
6542 case Instruction::AShr: {
6550 if (KnownStart.isNonNegative())
6553 KnownStart.getMaxValue() + 1);
6554 if (KnownStart.isNegative())
6557 KnownEnd.getMaxValue() + 1);
6560 case Instruction::LShr: {
6569 KnownStart.getMaxValue() + 1);
6571 case Instruction::Shl: {
6575 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
6577 KnownEnd.getMaxValue() + 1);
6585ScalarEvolution::getRangeRefIter(
const SCEV *S,
6586 ScalarEvolution::RangeSignHint SignHint) {
6588 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6595 auto AddToWorklist = [&WorkList, &Seen, &Cache](
const SCEV *Expr) {
6596 if (!Seen.
insert(Expr).second)
6602 if (!isa<PHINode>(cast<SCEVUnknown>(Expr)->getValue()))
6629 for (
unsigned I = 0;
I != WorkList.
size(); ++
I) {
6630 const SCEV *
P = WorkList[
I];
6631 auto *UnknownS = dyn_cast<SCEVUnknown>(
P);
6634 for (
const SCEV *
Op :
P->operands())
6639 if (
const PHINode *
P = dyn_cast<PHINode>(UnknownS->getValue())) {
6640 if (!PendingPhiRangesIter.insert(
P).second)
6647 if (!WorkList.
empty()) {
6652 getRangeRef(
P, SignHint);
6654 if (
auto *UnknownS = dyn_cast<SCEVUnknown>(
P))
6655 if (
const PHINode *
P = dyn_cast<PHINode>(UnknownS->getValue()))
6656 PendingPhiRangesIter.erase(
P);
6660 return getRangeRef(S, SignHint, 0);
6667 const SCEV *S, ScalarEvolution::RangeSignHint SignHint,
unsigned Depth) {
6669 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6677 if (
I != Cache.
end())
6686 return getRangeRefIter(S, SignHint);
6694 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
6698 ConservativeResult =
6721 ConservativeResult.intersectWith(
X.truncate(
BitWidth), RangeType));
6728 ConservativeResult.intersectWith(
X.zeroExtend(
BitWidth), RangeType));
6735 ConservativeResult.intersectWith(
X.signExtend(
BitWidth), RangeType));
6740 return setRange(PtrToInt, SignHint,
X);
6745 unsigned WrapType = OBO::AnyWrap;
6746 if (
Add->hasNoSignedWrap())
6747 WrapType |= OBO::NoSignedWrap;
6748 if (
Add->hasNoUnsignedWrap())
6749 WrapType |= OBO::NoUnsignedWrap;
6751 X =
X.addWithNoWrap(getRangeRef(
Op, SignHint,
Depth + 1), WrapType,
6753 return setRange(
Add, SignHint,
6754 ConservativeResult.intersectWith(
X, RangeType));
6760 X =
X.multiply(getRangeRef(
Op, SignHint,
Depth + 1));
6761 return setRange(
Mul, SignHint,
6762 ConservativeResult.intersectWith(
X, RangeType));
6768 return setRange(UDiv, SignHint,
6769 ConservativeResult.intersectWith(
X.udiv(
Y), RangeType));
6777 if (!UnsignedMinValue.
isZero())
6778 ConservativeResult = ConservativeResult.intersectWith(
6788 bool AllNonNeg =
true;
6789 bool AllNonPos =
true;
6790 for (
unsigned i = 1, e = AddRec->
getNumOperands(); i != e; ++i) {
6797 ConservativeResult = ConservativeResult.intersectWith(
6802 ConservativeResult = ConservativeResult.intersectWith(
6811 const SCEV *MaxBEScev =
6813 if (!isa<SCEVCouldNotCompute>(MaxBEScev)) {
6814 APInt MaxBECount = cast<SCEVConstant>(MaxBEScev)->getAPInt();
6825 auto RangeFromAffine = getRangeForAffineAR(
6827 ConservativeResult =
6828 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
6830 auto RangeFromFactoring = getRangeViaFactoring(
6832 ConservativeResult =
6833 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
6839 const SCEV *SymbolicMaxBECount =
6841 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) &&
6844 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
6845 AddRec, SymbolicMaxBECount,
BitWidth, SignHint);
6846 ConservativeResult =
6847 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
6852 return setRange(AddRec, SignHint, std::move(ConservativeResult));
6862 ID = Intrinsic::umax;
6865 ID = Intrinsic::smax;
6869 ID = Intrinsic::umin;
6872 ID = Intrinsic::smin;
6878 const auto *NAry = cast<SCEVNAryExpr>(S);
6880 for (
unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i)
6882 ID, {
X, getRangeRef(NAry->getOperand(i), SignHint,
Depth + 1)});
6883 return setRange(S, SignHint,
6884 ConservativeResult.intersectWith(
X, RangeType));
6893 ConservativeResult =
6894 ConservativeResult.intersectWith(*MDRange, RangeType);
6899 auto CR = getRangeForUnknownRecurrence(U);
6900 ConservativeResult = ConservativeResult.intersectWith(CR);
6911 if (
U->getType()->isPointerTy()) {
6914 unsigned ptrSize =
DL.getPointerTypeSizeInBits(
U->getType());
6915 int ptrIdxDiff = ptrSize -
BitWidth;
6916 if (ptrIdxDiff > 0 && ptrSize >
BitWidth && NS > (
unsigned)ptrIdxDiff)
6929 ConservativeResult = ConservativeResult.intersectWith(
6933 ConservativeResult = ConservativeResult.intersectWith(
6938 if (
U->getType()->isPointerTy() && SignHint == HINT_RANGE_UNSIGNED) {
6941 bool CanBeNull, CanBeFreed;
6943 V->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
6959 ConservativeResult = ConservativeResult.intersectWith(
6965 if (
PHINode *Phi = dyn_cast<PHINode>(V)) {
6967 if (PendingPhiRanges.insert(Phi).second) {
6970 for (
const auto &
Op :
Phi->operands()) {
6972 RangeFromOps = RangeFromOps.unionWith(OpRange);
6974 if (RangeFromOps.isFullSet())
6977 ConservativeResult =
6978 ConservativeResult.intersectWith(RangeFromOps, RangeType);
6979 bool Erased = PendingPhiRanges.erase(Phi);
6980 assert(Erased &&
"Failed to erase Phi properly?");
6986 if (
const auto *
II = dyn_cast<IntrinsicInst>(V))
6987 if (
II->getIntrinsicID() == Intrinsic::vscale) {
6989 ConservativeResult = ConservativeResult.difference(Disallowed);
6992 return setRange(U, SignHint, std::move(ConservativeResult));
6998 return setRange(S, SignHint, std::move(ConservativeResult));
7007 const APInt &MaxBECount,
7014 if (Step == 0 || MaxBECount == 0)
7021 return ConstantRange::getFull(
BitWidth);
7037 return ConstantRange::getFull(
BitWidth);
7049 APInt MovedBoundary = Descending ? (StartLower - std::move(
Offset))
7050 : (StartUpper + std::move(
Offset));
7055 if (StartRange.
contains(MovedBoundary))
7056 return ConstantRange::getFull(
BitWidth);
7059 Descending ? std::move(MovedBoundary) : std::move(StartLower);
7061 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
7070 const APInt &MaxBECount) {
7074 "mismatched bit widths");
7083 StepSRange.
getSignedMin(), StartSRange, MaxBECount,
true);
7085 StartSRange, MaxBECount,
7097ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
7099 ScalarEvolution::RangeSignHint SignHint) {
7100 assert(AddRec->
isAffine() &&
"Non-affine AddRecs are not suppored!\n");
7102 "This only works for non-self-wrapping AddRecs!");
7103 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
7106 if (!isa<SCEVConstant>(Step))
7107 return ConstantRange::getFull(
BitWidth);
7115 return ConstantRange::getFull(
BitWidth);
7121 MaxItersWithoutWrap))
7122 return ConstantRange::getFull(
BitWidth);
7149 return RangeBetween;
7154 return ConstantRange::getFull(
BitWidth);
7157 isKnownPredicateViaConstantRanges(LEPred, Start,
End))
7158 return RangeBetween;
7160 isKnownPredicateViaConstantRanges(GEPred, Start,
End))
7161 return RangeBetween;
7162 return ConstantRange::getFull(
BitWidth);
7167 const APInt &MaxBECount) {
7174 "mismatched bit widths");
7176 struct SelectPattern {
7177 Value *Condition =
nullptr;
7183 std::optional<unsigned> CastOp;
7196 if (
auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) {
7197 CastOp = SCast->getSCEVType();
7198 S = SCast->getOperand();
7203 auto *SU = dyn_cast<SCEVUnknown>(S);
7208 Condition =
nullptr;
7240 bool isRecognized() {
return Condition !=
nullptr; }
7243 SelectPattern StartPattern(*
this,
BitWidth, Start);
7244 if (!StartPattern.isRecognized())
7245 return ConstantRange::getFull(
BitWidth);
7247 SelectPattern StepPattern(*
this,
BitWidth, Step);
7248 if (!StepPattern.isRecognized())
7249 return ConstantRange::getFull(
BitWidth);
7251 if (StartPattern.Condition != StepPattern.Condition) {
7255 return ConstantRange::getFull(
BitWidth);
7272 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount);
7274 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount);
7296ScalarEvolution::getNonTrivialDefiningScopeBound(
const SCEV *S) {
7297 if (
auto *AddRec = dyn_cast<SCEVAddRecExpr>(S))
7299 if (
auto *U = dyn_cast<SCEVUnknown>(S))
7300 if (
auto *
I = dyn_cast<Instruction>(
U->getValue()))
7312 auto pushOp = [&](
const SCEV *S) {
7313 if (!Visited.
insert(S).second)
7316 if (Visited.
size() > 30) {
7323 for (
const auto *S : Ops)
7327 while (!Worklist.
empty()) {
7329 if (
auto *DefI = getNonTrivialDefiningScopeBound(S)) {
7330 if (!Bound || DT.
dominates(Bound, DefI))
7343 return getDefiningScopeBound(Ops, Discard);
7346bool ScalarEvolution::isGuaranteedToTransferExecutionTo(
const Instruction *
A,
7348 if (
A->getParent() ==
B->getParent() &&
7354 if (BLoop && BLoop->getHeader() ==
B->getParent() &&
7355 BLoop->getLoopPreheader() ==
A->getParent() &&
7357 A->getParent()->end()) &&
7364bool ScalarEvolution::isGuaranteedNotToBePoison(
const SCEV *
Op) {
7365 SCEVPoisonCollector PC(
true);
7367 return PC.MaybePoison.empty();
7370bool ScalarEvolution::isGuaranteedNotToCauseUB(
const SCEV *
Op) {
7376 return M && (!
isKnownNonZero(Op1) || !isGuaranteedNotToBePoison(Op1));
7380bool ScalarEvolution::isSCEVExprNeverPoison(
const Instruction *
I) {
7397 for (
const Use &
Op :
I->operands()) {
7403 auto *DefI = getDefiningScopeBound(SCEVOps);
7404 return isGuaranteedToTransferExecutionTo(DefI,
I);
7407bool ScalarEvolution::isAddRecNeverPoison(
const Instruction *
I,
const Loop *L) {
7409 if (isSCEVExprNeverPoison(
I))
7420 auto *ExitingBB =
L->getExitingBlock();
7433 while (!Worklist.
empty()) {
7437 const Instruction *PoisonUser = cast<Instruction>(
U.getUser());
7443 if (KnownPoison.
insert(PoisonUser).second)
7451ScalarEvolution::LoopProperties
7452ScalarEvolution::getLoopProperties(
const Loop *L) {
7453 using LoopProperties = ScalarEvolution::LoopProperties;
7455 auto Itr = LoopPropertiesCache.find(L);
7456 if (Itr == LoopPropertiesCache.end()) {
7458 if (
auto *SI = dyn_cast<StoreInst>(
I))
7459 return !
SI->isSimple();
7466 if (isa<MemIntrinsic>(
I) && !
I->isVolatile())
7469 return I->mayWriteToMemory();
7472 LoopProperties LP = {
true,
7475 for (
auto *BB :
L->getBlocks())
7476 for (
auto &
I : *BB) {
7478 LP.HasNoAbnormalExits =
false;
7479 if (HasSideEffects(&
I))
7480 LP.HasNoSideEffects =
false;
7481 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
7485 auto InsertPair = LoopPropertiesCache.insert({
L, LP});
7486 assert(InsertPair.second &&
"We just checked!");
7487 Itr = InsertPair.first;
7500const SCEV *ScalarEvolution::createSCEVIter(
Value *V) {
7506 Stack.emplace_back(V,
true);
7507 Stack.emplace_back(V,
false);
7508 while (!Stack.empty()) {
7509 auto E = Stack.pop_back_val();
7510 Value *CurV = E.getPointer();
7516 const SCEV *CreatedSCEV =
nullptr;
7519 CreatedSCEV = createSCEV(CurV);
7524 CreatedSCEV = getOperandsToCreate(CurV, Ops);
7528 insertValueToMap(CurV, CreatedSCEV);
7532 Stack.emplace_back(CurV,
true);
7534 Stack.emplace_back(
Op,
false);
7553 }
else if (
ConstantInt *CI = dyn_cast<ConstantInt>(V))
7555 else if (isa<GlobalAlias>(V))
7557 else if (!isa<ConstantExpr>(V))
7563 bool IsConstArg = isa<ConstantInt>(BO->RHS);
7564 switch (BO->Opcode) {
7565 case Instruction::Add:
7566 case Instruction::Mul: {
7579 dyn_cast<Instruction>(V));
7581 (BO->Opcode == Instruction::Add &&
7582 (NewBO->Opcode != Instruction::Add &&
7583 NewBO->Opcode != Instruction::Sub)) ||
7584 (BO->Opcode == Instruction::Mul &&
7585 NewBO->Opcode != Instruction::Mul)) {
7591 if (BO->
Op && (BO->IsNSW || BO->IsNUW)) {
7592 auto *
I = dyn_cast<Instruction>(BO->
Op);
7602 case Instruction::Sub:
7603 case Instruction::UDiv:
7604 case Instruction::URem:
7606 case Instruction::AShr:
7607 case Instruction::Shl:
7608 case Instruction::Xor:
7612 case Instruction::And:
7613 case Instruction::Or:
7617 case Instruction::LShr:
7629 switch (
U->getOpcode()) {
7630 case Instruction::Trunc:
7631 case Instruction::ZExt:
7632 case Instruction::SExt:
7633 case Instruction::PtrToInt:
7637 case Instruction::BitCast:
7644 case Instruction::SDiv:
7645 case Instruction::SRem:
7650 case Instruction::GetElementPtr:
7651 assert(cast<GEPOperator>(U)->getSourceElementType()->isSized() &&
7652 "GEP source element type must be sized");
7656 case Instruction::IntToPtr:
7659 case Instruction::PHI:
7663 case Instruction::Select: {
7665 auto CanSimplifyToUnknown = [
this,
U]() {
7666 if (
U->getType()->isIntegerTy(1) || isa<ConstantInt>(
U->getOperand(0)))
7669 auto *ICI = dyn_cast<ICmpInst>(
U->getOperand(0));
7676 if (!(isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->
isZero()))
7683 if (CanSimplifyToUnknown())
7690 case Instruction::Call:
7691 case Instruction::Invoke:
7692 if (
Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) {
7697 if (
auto *
II = dyn_cast<IntrinsicInst>(U)) {
7698 switch (
II->getIntrinsicID()) {
7699 case Intrinsic::abs:
7702 case Intrinsic::umax:
7703 case Intrinsic::umin:
7704 case Intrinsic::smax:
7705 case Intrinsic::smin:
7706 case Intrinsic::usub_sat:
7707 case Intrinsic::uadd_sat:
7711 case Intrinsic::start_loop_iterations:
7712 case Intrinsic::annotation:
7713 case Intrinsic::ptr_annotation:
7726const SCEV *ScalarEvolution::createSCEV(
Value *V) {
7737 }
else if (
ConstantInt *CI = dyn_cast<ConstantInt>(V))
7739 else if (isa<GlobalAlias>(V))
7741 else if (!isa<ConstantExpr>(V))
7750 switch (BO->Opcode) {
7751 case Instruction::Add: {
7777 if (BO->Opcode == Instruction::Sub)
7785 if (BO->Opcode == Instruction::Sub)
7791 dyn_cast<Instruction>(V));
7792 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
7793 NewBO->Opcode != Instruction::Sub)) {
7803 case Instruction::Mul: {
7823 dyn_cast<Instruction>(V));
7824 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
7833 case Instruction::UDiv:
7837 case Instruction::URem:
7841 case Instruction::Sub: {
7844 Flags = getNoWrapFlagsFromUB(BO->
Op);
7849 case Instruction::And:
7852 if (
ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
7855 if (CI->isMinusOne())
7857 const APInt &
A = CI->getValue();
7863 unsigned LZ =
A.countl_zero();
7864 unsigned TZ =
A.countr_zero();
7869 APInt EffectiveMask =
7871 if ((LZ != 0 || TZ != 0) && !((~
A & ~Known.
Zero) & EffectiveMask)) {
7874 const SCEV *ShiftedLHS =
nullptr;
7875 if (
auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) {
7876 if (
auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) {
7878 unsigned MulZeros = OpC->getAPInt().countr_zero();
7879 unsigned GCD = std::min(MulZeros, TZ);
7884 auto *NewMul =
getMulExpr(MulOps, LHSMul->getNoWrapFlags());
7906 case Instruction::Or:
7915 case Instruction::Xor:
7916 if (
ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
7918 if (CI->isMinusOne())
7925 if (
auto *LBO = dyn_cast<BinaryOperator>(BO->LHS))
7926 if (
ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1)))
7927 if (LBO->getOpcode() == Instruction::And &&
7928 LCI->getValue() == CI->getValue())
7930 dyn_cast<SCEVZeroExtendExpr>(
getSCEV(BO->LHS))) {
7932 const SCEV *Z0 =
Z->getOperand();
7939 if (CI->getValue().isMask(Z0TySize))
7945 APInt Trunc = CI->getValue().
trunc(Z0TySize);
7954 case Instruction::Shl:
7956 if (
ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) {
7972 auto MulFlags = getNoWrapFlagsFromUB(BO->
Op);
7986 case Instruction::AShr:
8007 Operator *
L = dyn_cast<Operator>(BO->LHS);
8008 const SCEV *AddTruncateExpr =
nullptr;
8010 const SCEV *AddConstant =
nullptr;
8012 if (L &&
L->getOpcode() == Instruction::Add) {
8018 Operator *LShift = dyn_cast<Operator>(
L->getOperand(0));
8019 ConstantInt *AddOperandCI = dyn_cast<ConstantInt>(
L->getOperand(1));
8020 if (LShift && LShift->
getOpcode() == Instruction::Shl) {
8023 ShlAmtCI = dyn_cast<ConstantInt>(LShift->
getOperand(1));
8035 }
else if (L &&
L->getOpcode() == Instruction::Shl) {
8041 ShlAmtCI = dyn_cast<ConstantInt>(
L->getOperand(1));
8045 if (AddTruncateExpr && ShlAmtCI) {
8061 const SCEV *CompositeExpr =
8063 if (
L->getOpcode() != Instruction::Shl)
8064 CompositeExpr =
getAddExpr(CompositeExpr, AddConstant);
8073 switch (
U->getOpcode()) {
8074 case Instruction::Trunc:
8077 case Instruction::ZExt:
8080 case Instruction::SExt:
8082 dyn_cast<Instruction>(V))) {
8090 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
8091 Type *Ty =
U->getType();
8099 case Instruction::BitCast:
8105 case Instruction::PtrToInt: {
8108 Type *DstIntTy =
U->getType();
8112 if (isa<SCEVCouldNotCompute>(IntOp))
8116 case Instruction::IntToPtr:
8120 case Instruction::SDiv:
8127 case Instruction::SRem:
8134 case Instruction::GetElementPtr:
8135 return createNodeForGEP(cast<GEPOperator>(U));
8137 case Instruction::PHI:
8138 return createNodeForPHI(cast<PHINode>(U));
8140 case Instruction::Select:
8141 return createNodeForSelectOrPHI(U,
U->getOperand(0),
U->getOperand(1),
8144 case Instruction::Call:
8145 case Instruction::Invoke:
8146 if (
Value *RV = cast<CallBase>(U)->getReturnedArgOperand())
8149 if (
auto *
II = dyn_cast<IntrinsicInst>(U)) {
8150 switch (
II->getIntrinsicID()) {
8151 case Intrinsic::abs:
8154 cast<ConstantInt>(
II->getArgOperand(1))->
isOne());
8155 case Intrinsic::umax:
8159 case Intrinsic::umin:
8163 case Intrinsic::smax:
8167 case Intrinsic::smin:
8171 case Intrinsic::usub_sat: {
8177 case Intrinsic::uadd_sat: {
8183 case Intrinsic::start_loop_iterations:
8184 case Intrinsic::annotation:
8185 case Intrinsic::ptr_annotation:
8189 case Intrinsic::vscale:
8206 if (isa<SCEVCouldNotCompute>(ExitCount))
8209 auto *ExitCountType = ExitCount->
getType();
8210 assert(ExitCountType->isIntegerTy());
8212 1 + ExitCountType->getScalarSizeInBits());
8219 if (isa<SCEVCouldNotCompute>(ExitCount))
8225 auto CanAddOneWithoutOverflow = [&]() {
8227 getRangeRef(ExitCount, RangeSignHint::HINT_RANGE_UNSIGNED);
8238 if (EvalSize > ExitCountSize && CanAddOneWithoutOverflow())
8268 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8269 assert(L->isLoopExiting(ExitingBlock) &&
8270 "Exiting block must actually branch out of the loop!");
8279 const auto *MaxExitCount =
8287 L->getExitingBlocks(ExitingBlocks);
8289 std::optional<unsigned> Res;
8290 for (
auto *ExitingBB : ExitingBlocks) {
8294 Res = std::gcd(*Res, Multiple);
8296 return Res.value_or(1);
8300 const SCEV *ExitCount) {
8301 if (isa<SCEVCouldNotCompute>(ExitCount))
8330 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8331 assert(L->isLoopExiting(ExitingBlock) &&
8332 "Exiting block must actually branch out of the loop!");
8342 return getBackedgeTakenInfo(L).getExact(ExitingBlock,
this);
8344 return getBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this);
8346 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this);
8356 return getPredicatedBackedgeTakenInfo(L).getExact(ExitingBlock,
this,
8359 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this,
8362 return getPredicatedBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this,
8370 return getPredicatedBackedgeTakenInfo(L).getExact(L,
this, &Preds);
8377 return getBackedgeTakenInfo(L).getExact(L,
this);
8379 return getBackedgeTakenInfo(L).getConstantMax(
this);
8381 return getBackedgeTakenInfo(L).getSymbolicMax(L,
this);
8388 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(L,
this, &Preds);
8393 return getPredicatedBackedgeTakenInfo(L).getConstantMax(
this, &Preds);
8397 return getBackedgeTakenInfo(L).isConstantMaxOrZero(
this);
8407 for (
PHINode &PN : Header->phis())
8408 if (Visited.
insert(&PN).second)
8412ScalarEvolution::BackedgeTakenInfo &
8413ScalarEvolution::getPredicatedBackedgeTakenInfo(
const Loop *L) {
8414 auto &BTI = getBackedgeTakenInfo(L);
8415 if (BTI.hasFullInfo())
8418 auto Pair = PredicatedBackedgeTakenCounts.try_emplace(L);
8421 return Pair.first->second;
8423 BackedgeTakenInfo
Result =
8424 computeBackedgeTakenCount(L,
true);
8426 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
8429ScalarEvolution::BackedgeTakenInfo &
8430ScalarEvolution::getBackedgeTakenInfo(
const Loop *L) {
8436 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator,
bool> Pair =
8437 BackedgeTakenCounts.try_emplace(L);
8439 return Pair.first->second;
8444 BackedgeTakenInfo
Result = computeBackedgeTakenCount(L);
8451 if (
Result.hasAnyInfo()) {
8454 auto LoopUsersIt = LoopUsers.find(L);
8455 if (LoopUsersIt != LoopUsers.end())
8457 forgetMemoizedResults(ToForget);
8460 for (
PHINode &PN :
L->getHeader()->phis())
8461 ConstantEvolutionLoopExitValue.erase(&PN);
8469 return BackedgeTakenCounts.find(L)->second = std::move(Result);
8478 BackedgeTakenCounts.clear();
8479 PredicatedBackedgeTakenCounts.clear();
8480 BECountUsers.clear();
8481 LoopPropertiesCache.clear();
8482 ConstantEvolutionLoopExitValue.clear();
8483 ValueExprMap.
clear();
8484 ValuesAtScopes.clear();
8485 ValuesAtScopesUsers.clear();
8486 LoopDispositions.clear();
8487 BlockDispositions.clear();
8488 UnsignedRanges.clear();
8489 SignedRanges.clear();
8490 ExprValueMap.
clear();
8492 ConstantMultipleCache.clear();
8493 PredicatedSCEVRewrites.clear();
8495 FoldCacheUser.clear();
8497void ScalarEvolution::visitAndClearUsers(
8501 while (!Worklist.
empty()) {
8503 if (!
isSCEVable(
I->getType()) && !isa<WithOverflowInst>(
I))
8508 if (It != ValueExprMap.
end()) {
8509 eraseValueFromMap(It->first);
8511 if (
PHINode *PN = dyn_cast<PHINode>(
I))
8512 ConstantEvolutionLoopExitValue.erase(PN);
8526 while (!LoopWorklist.
empty()) {
8530 forgetBackedgeTakenCounts(CurrL,
false);
8531 forgetBackedgeTakenCounts(CurrL,
true);
8534 for (
auto I = PredicatedSCEVRewrites.begin();
8535 I != PredicatedSCEVRewrites.end();) {
8536 std::pair<const SCEV *, const Loop *> Entry =
I->first;
8537 if (Entry.second == CurrL)
8538 PredicatedSCEVRewrites.erase(
I++);
8543 auto LoopUsersItr = LoopUsers.find(CurrL);
8544 if (LoopUsersItr != LoopUsers.end())
8549 visitAndClearUsers(Worklist, Visited, ToForget);
8551 LoopPropertiesCache.erase(CurrL);
8554 LoopWorklist.
append(CurrL->begin(), CurrL->end());
8556 forgetMemoizedResults(ToForget);
8573 visitAndClearUsers(Worklist, Visited, ToForget);
8575 forgetMemoizedResults(ToForget);
8587 struct InvalidationRootCollector {
8591 InvalidationRootCollector(
Loop *L) : L(L) {}
8593 bool follow(
const SCEV *S) {
8594 if (
auto *SU = dyn_cast<SCEVUnknown>(S)) {
8595 if (
auto *
I = dyn_cast<Instruction>(SU->getValue()))
8598 }
else if (
auto *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
8599 if (L->contains(AddRec->
getLoop()))
8604 bool isDone()
const {
return false; }
8607 InvalidationRootCollector
C(L);
8609 forgetMemoizedResults(
C.Roots);
8622 BlockDispositions.clear();
8623 LoopDispositions.clear();
8640 while (!Worklist.
empty()) {
8642 bool LoopDispoRemoved = LoopDispositions.erase(Curr);
8643 bool BlockDispoRemoved = BlockDispositions.erase(Curr);
8644 if (!LoopDispoRemoved && !BlockDispoRemoved)
8646 auto Users = SCEVUsers.find(Curr);
8647 if (
Users != SCEVUsers.end())
8660const SCEV *ScalarEvolution::BackedgeTakenInfo::getExact(
8664 if (!isComplete() || ExitNotTaken.empty())
8675 for (
const auto &ENT : ExitNotTaken) {
8676 const SCEV *BECount = ENT.ExactNotTaken;
8679 "We should only have known counts for exiting blocks that dominate "
8687 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
8688 "Predicate should be always true!");
8697const ScalarEvolution::ExitNotTakenInfo *
8698ScalarEvolution::BackedgeTakenInfo::getExitNotTaken(
8701 for (
const auto &ENT : ExitNotTaken)
8702 if (ENT.ExitingBlock == ExitingBlock) {
8703 if (ENT.hasAlwaysTruePredicate())
8705 else if (Predicates) {
8715const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
8718 if (!getConstantMax())
8721 for (
const auto &ENT : ExitNotTaken)
8722 if (!ENT.hasAlwaysTruePredicate()) {
8728 assert((isa<SCEVCouldNotCompute>(getConstantMax()) ||
8729 isa<SCEVConstant>(getConstantMax())) &&
8730 "No point in having a non-constant max backedge taken count!");
8731 return getConstantMax();
8734const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
8744 for (
const auto &ENT : ExitNotTaken) {
8745 const SCEV *ExitCount = ENT.SymbolicMaxNotTaken;
8746 if (!isa<SCEVCouldNotCompute>(ExitCount)) {
8748 "We should only have known counts for exiting blocks that "
8754 assert((Predicates || ENT.hasAlwaysTruePredicate()) &&
8755 "Predicate should be always true!");
8758 if (ExitCounts.
empty())
8767bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
8769 auto PredicateNotAlwaysTrue = [](
const ExitNotTakenInfo &ENT) {
8770 return !ENT.hasAlwaysTruePredicate();
8772 return MaxOrZero && !
any_of(ExitNotTaken, PredicateNotAlwaysTrue);
8779 const SCEV *E,
const SCEV *ConstantMaxNotTaken,
8780 const SCEV *SymbolicMaxNotTaken,
bool MaxOrZero,
8782 : ExactNotTaken(E), ConstantMaxNotTaken(ConstantMaxNotTaken),
8783 SymbolicMaxNotTaken(SymbolicMaxNotTaken), MaxOrZero(MaxOrZero) {
8794 "Exact is not allowed to be less precise than Constant Max");
8797 "Exact is not allowed to be less precise than Symbolic Max");
8800 "Symbolic Max is not allowed to be less precise than Constant Max");
8803 "No point in having a non-constant max backedge taken count!");
8805 for (
const auto PredList : PredLists)
8806 for (
const auto *
P : PredList) {
8809 assert(!isa<SCEVUnionPredicate>(
P) &&
"Only add leaf predicates here!");
8814 "Backedge count should be int");
8817 "Max backedge count should be int");
8821 const SCEV *ConstantMaxNotTaken,
8822 const SCEV *SymbolicMaxNotTaken,
8825 :
ExitLimit(E, ConstantMaxNotTaken, SymbolicMaxNotTaken, MaxOrZero,
8830ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
8832 bool IsComplete,
const SCEV *ConstantMax,
bool MaxOrZero)
8833 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
8834 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8836 ExitNotTaken.reserve(ExitCounts.
size());
8837 std::transform(ExitCounts.
begin(), ExitCounts.
end(),
8838 std::back_inserter(ExitNotTaken),
8839 [&](
const EdgeExitInfo &EEI) {
8840 BasicBlock *ExitBB = EEI.first;
8841 const ExitLimit &EL = EEI.second;
8842 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken,
8843 EL.ConstantMaxNotTaken, EL.SymbolicMaxNotTaken,
8846 assert((isa<SCEVCouldNotCompute>(ConstantMax) ||
8847 isa<SCEVConstant>(ConstantMax)) &&
8848 "No point in having a non-constant max backedge taken count!");
8852ScalarEvolution::BackedgeTakenInfo
8853ScalarEvolution::computeBackedgeTakenCount(
const Loop *L,
8854 bool AllowPredicates) {
8856 L->getExitingBlocks(ExitingBlocks);
8858 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8861 bool CouldComputeBECount =
true;
8863 const SCEV *MustExitMaxBECount =
nullptr;
8864 const SCEV *MayExitMaxBECount =
nullptr;
8865 bool MustExitMaxOrZero =
false;
8866 bool IsOnlyExit = ExitingBlocks.
size() == 1;
8875 if (
auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator()))
8876 if (
auto *CI = dyn_cast<ConstantInt>(BI->
getCondition())) {
8878 if (ExitIfTrue == CI->
isZero())
8882 ExitLimit EL = computeExitLimit(L, ExitBB, IsOnlyExit, AllowPredicates);
8884 assert((AllowPredicates || EL.Predicates.empty()) &&
8885 "Predicated exit limit when predicates are not allowed!");
8890 ++NumExitCountsComputed;
8894 CouldComputeBECount =
false;
8901 "Exact is known but symbolic isn't?");
8902 ++NumExitCountsNotComputed;
8918 if (!MustExitMaxBECount) {
8919 MustExitMaxBECount = EL.ConstantMaxNotTaken;
8920 MustExitMaxOrZero = EL.MaxOrZero;
8923 EL.ConstantMaxNotTaken);
8927 MayExitMaxBECount = EL.ConstantMaxNotTaken;
8930 EL.ConstantMaxNotTaken);
8934 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
8938 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
8944 for (
const auto &Pair : ExitCounts) {
8945 if (!isa<SCEVConstant>(Pair.second.ExactNotTaken))
8946 BECountUsers[Pair.second.ExactNotTaken].insert({L, AllowPredicates});
8947 if (!isa<SCEVConstant>(Pair.second.SymbolicMaxNotTaken))
8948 BECountUsers[Pair.second.SymbolicMaxNotTaken].insert(
8949 {L, AllowPredicates});
8951 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
8952 MaxBECount, MaxOrZero);
8956ScalarEvolution::computeExitLimit(
const Loop *L,
BasicBlock *ExitingBlock,
8957 bool IsOnlyExit,
bool AllowPredicates) {
8958 assert(
L->contains(ExitingBlock) &&
"Exit count for non-loop block?");
8962 if (!Latch || !DT.
dominates(ExitingBlock, Latch))
8966 if (
BranchInst *BI = dyn_cast<BranchInst>(Term)) {
8970 "It should have one successor in loop and one exit block!");
8977 if (
SwitchInst *SI = dyn_cast<SwitchInst>(Term)) {
8981 if (!
L->contains(SBB)) {
8986 assert(Exit &&
"Exiting block must have at least one exit");
8987 return computeExitLimitFromSingleExitSwitch(
8988 L, SI, Exit, IsOnlyExit);
8995 const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
8996 bool AllowPredicates) {
8997 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
8998 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
8999 ControlsOnlyExit, AllowPredicates);
9002std::optional<ScalarEvolution::ExitLimit>
9003ScalarEvolution::ExitLimitCache::find(
const Loop *L,
Value *ExitCond,
9004 bool ExitIfTrue,
bool ControlsOnlyExit,
9005 bool AllowPredicates) {
9007 (void)this->ExitIfTrue;
9008 (void)this->AllowPredicates;
9010 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9011 this->AllowPredicates == AllowPredicates &&
9012 "Variance in assumed invariant key components!");
9013 auto Itr = TripCountMap.find({ExitCond, ControlsOnlyExit});
9014 if (Itr == TripCountMap.end())
9015 return std::nullopt;
9019void ScalarEvolution::ExitLimitCache::insert(
const Loop *L,
Value *ExitCond,
9021 bool ControlsOnlyExit,
9022 bool AllowPredicates,
9023 const ExitLimit &EL) {
9024 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9025 this->AllowPredicates == AllowPredicates &&
9026 "Variance in assumed invariant key components!");
9028 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsOnlyExit}, EL});
9029 assert(InsertResult.second &&
"Expected successful insertion!");
9035 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9036 bool ControlsOnlyExit,
bool AllowPredicates) {
9038 if (
auto MaybeEL = Cache.find(L, ExitCond, ExitIfTrue, ControlsOnlyExit,
9042 ExitLimit EL = computeExitLimitFromCondImpl(
9043 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates);
9044 Cache.insert(L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates, EL);
9049 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9050 bool ControlsOnlyExit,
bool AllowPredicates) {
9052 if (
auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
9053 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates))
9054 return *LimitFromBinOp;
9058 if (
ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) {
9060 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsOnlyExit);
9061 if (EL.hasFullInfo() || !AllowPredicates)
9065 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue,
9074 if (
ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
9100 auto EL = computeExitLimitFromICmp(L, Pred, LHS,
getConstant(NewRHSC),
9101 ControlsOnlyExit, AllowPredicates);
9102 if (EL.hasAnyInfo())
9107 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9110std::optional<ScalarEvolution::ExitLimit>
9111ScalarEvolution::computeExitLimitFromCondFromBinOp(
9112 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9113 bool ControlsOnlyExit,
bool AllowPredicates) {
9122 return std::nullopt;
9127 bool EitherMayExit = IsAnd ^ ExitIfTrue;
9128 ExitLimit EL0 = computeExitLimitFromCondCached(
9129 Cache, L, Op0, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9131 ExitLimit EL1 = computeExitLimitFromCondCached(
9132 Cache, L, Op1, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9136 const Constant *NeutralElement = ConstantInt::get(ExitCond->
getType(), IsAnd);
9137 if (isa<ConstantInt>(Op1))
9138 return Op1 == NeutralElement ? EL0 : EL1;
9139 if (isa<ConstantInt>(Op0))
9140 return Op0 == NeutralElement ? EL1 : EL0;
9145 if (EitherMayExit) {
9146 bool UseSequentialUMin = !isa<BinaryOperator>(ExitCond);
9155 ConstantMaxBECount = EL1.ConstantMaxNotTaken;
9157 ConstantMaxBECount = EL0.ConstantMaxNotTaken;
9160 EL1.ConstantMaxNotTaken);
9162 SymbolicMaxBECount = EL1.SymbolicMaxNotTaken;
9164 SymbolicMaxBECount = EL0.SymbolicMaxNotTaken;
9167 EL0.SymbolicMaxNotTaken, EL1.SymbolicMaxNotTaken, UseSequentialUMin);
9171 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
9172 BECount = EL0.ExactNotTaken;
9181 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount) &&
9182 !isa<SCEVCouldNotCompute>(BECount))
9184 if (isa<SCEVCouldNotCompute>(SymbolicMaxBECount))
9185 SymbolicMaxBECount =
9186 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount;
9187 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
9192 const Loop *L,
ICmpInst *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9193 bool AllowPredicates) {
9205 ExitLimit EL = computeExitLimitFromICmp(L, Pred, LHS, RHS, ControlsOnlyExit,
9207 if (EL.hasAnyInfo())
9210 auto *ExhaustiveCount =
9211 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9213 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount))
9214 return ExhaustiveCount;
9216 return computeShiftCompareExitLimit(ExitCond->
getOperand(0),
9221 bool ControlsOnlyExit,
bool AllowPredicates) {
9242 if (
const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
9243 if (
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
9250 if (!isa<SCEVCouldNotCompute>(Ret))
return Ret;
9262 auto *InnerLHS =
LHS;
9263 if (
auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS))
9264 InnerLHS = ZExt->getOperand();
9265 if (
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(InnerLHS);
9301 if (isa<SCEVCouldNotCompute>(LHS))
9306 if (isa<SCEVCouldNotCompute>(RHS))
9309 ExitLimit EL = howFarToZero(
getMinusSCEV(LHS, RHS), L, ControlsOnlyExit,
9311 if (EL.hasAnyInfo())
9319 if (isa<SCEVCouldNotCompute>(LHS))
9324 if (isa<SCEVCouldNotCompute>(RHS))
9327 ExitLimit EL = howFarToNonZero(
getMinusSCEV(LHS, RHS), L);
9328 if (EL.hasAnyInfo())
return EL;
9340 auto *OldType = dyn_cast<IntegerType>(
LHS->
getType());
9360 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsOnlyExit,
9362 if (EL.hasAnyInfo())
9378 ExitLimit EL = howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsOnlyExit,
9380 if (EL.hasAnyInfo())
9392ScalarEvolution::computeExitLimitFromSingleExitSwitch(
const Loop *L,
9395 bool ControlsOnlyExit) {
9396 assert(!
L->contains(ExitingBlock) &&
"Not an exiting block!");
9399 if (
Switch->getDefaultDest() == ExitingBlock)
9403 "Default case must not exit the loop!");
9408 ExitLimit EL = howFarToZero(
getMinusSCEV(LHS, RHS), L, ControlsOnlyExit);
9409 if (EL.hasAnyInfo())
9420 assert(isa<SCEVConstant>(Val) &&
9421 "Evaluation of SCEV at constant didn't fold correctly?");
9422 return cast<SCEVConstant>(Val)->getValue();
9435 const BasicBlock *Predecessor =
L->getLoopPredecessor();
9441 auto MatchPositiveShift =
9444 using namespace PatternMatch;
9448 OutOpCode = Instruction::LShr;
9450 OutOpCode = Instruction::AShr;
9452 OutOpCode = Instruction::Shl;
9467 auto MatchShiftRecurrence =
9469 std::optional<Instruction::BinaryOps> PostShiftOpCode;
9484 if (MatchPositiveShift(LHS, V, OpC)) {
9485 PostShiftOpCode = OpC;
9490 PNOut = dyn_cast<PHINode>(LHS);
9491 if (!PNOut || PNOut->getParent() !=
L->getHeader())
9494 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
9500 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
9507 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut);
9512 if (!MatchShiftRecurrence(LHS, PN, OpCode))
9529 case Instruction::AShr: {
9535 auto *Ty = cast<IntegerType>(
RHS->
getType());
9537 StableValue = ConstantInt::get(Ty, 0);
9539 StableValue = ConstantInt::get(Ty, -1,
true);
9545 case Instruction::LShr:
9546 case Instruction::Shl:
9549 StableValue = ConstantInt::get(cast<IntegerType>(
RHS->
getType()), 0);
9556 "Otherwise cannot be an operand to a branch instruction");
9558 if (
Result->isZeroValue()) {
9560 const SCEV *UpperBound =
9571 if (isa<BinaryOperator>(
I) || isa<CmpInst>(
I) ||
9572 isa<SelectInst>(
I) || isa<CastInst>(
I) || isa<GetElementPtrInst>(
I) ||
9573 isa<LoadInst>(
I) || isa<ExtractValueInst>(
I))
9576 if (
const CallInst *CI = dyn_cast<CallInst>(
I))
9577 if (
const Function *F = CI->getCalledFunction())
9586 if (!L->contains(
I))
return false;
9588 if (isa<PHINode>(
I)) {
9591 return L->getHeader() ==
I->getParent();
9612 if (isa<Constant>(
Op))
continue;
9617 PHINode *
P = dyn_cast<PHINode>(OpInst);
9648 if (
PHINode *PN = dyn_cast<PHINode>(
I))
9665 if (
Constant *
C = dyn_cast<Constant>(V))
return C;
9667 if (!
I)
return nullptr;
9678 if (isa<PHINode>(
I))
return nullptr;
9680 std::vector<Constant*>
Operands(
I->getNumOperands());
9682 for (
unsigned i = 0, e =
I->getNumOperands(); i != e; ++i) {
9683 Instruction *Operand = dyn_cast<Instruction>(
I->getOperand(i));
9685 Operands[i] = dyn_cast<Constant>(
I->getOperand(i));
9691 if (!
C)
return nullptr;
9713 if (IncomingVal != CurrentVal) {
9716 IncomingVal = CurrentVal;
9728ScalarEvolution::getConstantEvolutionLoopExitValue(
PHINode *PN,
9731 auto [
I,
Inserted] = ConstantEvolutionLoopExitValue.try_emplace(PN);
9742 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9750 CurrentIterVals[&
PHI] = StartCST;
9752 if (!CurrentIterVals.
count(PN))
9753 return RetVal =
nullptr;
9759 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
9762 unsigned IterationNum = 0;
9764 for (; ; ++IterationNum) {
9765 if (IterationNum == NumIterations)
9766 return RetVal = CurrentIterVals[PN];
9775 NextIterVals[PN] = NextPHI;
9777 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
9783 for (
const auto &
I : CurrentIterVals) {
9785 if (!
PHI ||
PHI == PN ||
PHI->getParent() != Header)
continue;
9790 for (
const auto &
I : PHIsToCompute) {
9794 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
9797 if (NextPHI !=
I.second)
9798 StoppedEvolving =
false;
9803 if (StoppedEvolving)
9804 return RetVal = CurrentIterVals[PN];
9806 CurrentIterVals.swap(NextIterVals);
9810const SCEV *ScalarEvolution::computeExitCountExhaustively(
const Loop *L,
9822 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9825 assert(Latch &&
"Should follow from NumIncomingValues == 2!");
9829 CurrentIterVals[&
PHI] = StartCST;
9831 if (!CurrentIterVals.
count(PN))
9839 for (
unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
9840 auto *CondVal = dyn_cast_or_null<ConstantInt>(
9846 if (CondVal->getValue() ==
uint64_t(ExitWhen)) {
9847 ++NumBruteForceTripCountsComputed;
9858 for (
const auto &
I : CurrentIterVals) {
9860 if (!
PHI ||
PHI->getParent() != Header)
continue;
9865 if (NextPHI)
continue;
9867 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
9870 CurrentIterVals.swap(NextIterVals);
9881 for (
auto &LS : Values)
9883 return LS.second ? LS.second : V;
9888 const SCEV *
C = computeSCEVAtScope(V, L);
9889 for (
auto &LS :
reverse(ValuesAtScopes[V]))
9890 if (LS.first == L) {
9892 if (!isa<SCEVConstant>(
C))
9893 ValuesAtScopesUsers[
C].push_back({L, V});
9904 switch (V->getSCEVType()) {
9910 return cast<SCEVConstant>(V)->getValue();
9912 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
9937 assert(!
C->getType()->isPointerTy() &&
9938 "Can only have one pointer, and it must be last");
9965ScalarEvolution::getWithOperands(
const SCEV *S,
9974 auto *AddRec = cast<SCEVAddRecExpr>(S);
9978 return getAddExpr(NewOps, cast<SCEVAddExpr>(S)->getNoWrapFlags());
9980 return getMulExpr(NewOps, cast<SCEVMulExpr>(S)->getNoWrapFlags());
10000const SCEV *ScalarEvolution::computeSCEVAtScope(
const SCEV *V,
const Loop *L) {
10001 switch (
V->getSCEVType()) {
10012 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
10023 for (++i; i !=
e; ++i)
10028 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
10067 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i) {
10069 if (OpAtScope != Ops[i]) {
10077 for (++i; i !=
e; ++i) {
10082 return getWithOperands(V, NewOps);
10096 if (
PHINode *PN = dyn_cast<PHINode>(
I)) {
10097 const Loop *CurrLoop = this->LI[
I->getParent()];
10108 if (BackedgeTakenCount->
isZero()) {
10109 Value *InitValue =
nullptr;
10110 bool MultipleInitValues =
false;
10116 MultipleInitValues =
true;
10121 if (!MultipleInitValues && InitValue)
10126 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
10130 unsigned InLoopPred =
10136 if (
auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
10141 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), CurrLoop);
10157 bool MadeImprovement =
false;
10172 MadeImprovement |= OrigV != OpV;
10177 assert(
C->getType() ==
Op->getType() &&
"Type mismatch");
10182 if (!MadeImprovement)
10203const SCEV *ScalarEvolution::stripInjectiveFunctions(
const SCEV *S)
const {
10205 return stripInjectiveFunctions(ZExt->getOperand());
10207 return stripInjectiveFunctions(SExt->getOperand());
10226 assert(
A != 0 &&
"A must be non-zero.");
10262 APInt AD =
A.lshr(Mult2).trunc(BW - Mult2);
10282static std::optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
10288 LLVM_DEBUG(
dbgs() << __func__ <<
": analyzing quadratic addrec: "
10289 << *AddRec <<
'\n');
10292 if (!LC || !MC || !
NC) {
10293 LLVM_DEBUG(
dbgs() << __func__ <<
": coefficients are not constant\n");
10294 return std::nullopt;
10300 assert(!
N.isZero() &&
"This is not a quadratic addrec");
10308 N =
N.sext(NewWidth);
10309 M = M.sext(NewWidth);
10310 L = L.sext(NewWidth);
10327 <<
"x + " <<
C <<
", coeff bw: " << NewWidth
10328 <<
", multiplied by " <<
T <<
'\n');
10337 std::optional<APInt>
Y) {
10339 unsigned W = std::max(
X->getBitWidth(),
Y->getBitWidth());
10342 return XW.
slt(YW) ? *
X : *
Y;
10345 return std::nullopt;
10346 return X ? *
X : *
Y;
10363 return std::nullopt;
10364 unsigned W =
X->getBitWidth();
10384static std::optional<APInt>
10390 return std::nullopt;
10393 LLVM_DEBUG(
dbgs() << __func__ <<
": solving for unsigned overflow\n");
10394 std::optional<APInt>
X =
10397 return std::nullopt;
10402 return std::nullopt;
10417static std::optional<APInt>
10421 "Starting value of addrec should be 0");
10422 LLVM_DEBUG(
dbgs() << __func__ <<
": solving boundary crossing for range "
10423 <<
Range <<
", addrec " << *AddRec <<
'\n');
10427 "Addrec's initial value should be in range");
10433 return std::nullopt;
10443 auto SolveForBoundary =
10444 [&](
APInt Bound) -> std::pair<std::optional<APInt>,
bool> {
10447 LLVM_DEBUG(
dbgs() <<
"SolveQuadraticAddRecRange: checking boundary "
10448 << Bound <<
" (before multiplying by " << M <<
")\n");
10451 std::optional<APInt> SO;
10454 "signed overflow\n");
10458 "unsigned overflow\n");
10459 std::optional<APInt> UO =
10462 auto LeavesRange = [&] (
const APInt &
X) {
10479 return {std::nullopt,
false};
10484 if (LeavesRange(*Min))
10485 return { Min,
true };
10486 std::optional<APInt> Max = Min == SO ? UO : SO;
10487 if (LeavesRange(*Max))
10488 return { Max,
true };
10491 return {std::nullopt,
true};
10498 auto SL = SolveForBoundary(
Lower);
10499 auto SU = SolveForBoundary(
Upper);
10502 if (!SL.second || !SU.second)
10503 return std::nullopt;
10548 bool ControlsOnlyExit,
10549 bool AllowPredicates) {
10560 if (
C->getValue()->isZero())
return C;
10565 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V));
10567 if (!AddRec && AllowPredicates)
10573 if (!AddRec || AddRec->
getLoop() != L)
10584 return ExitLimit(R, R, R,
false, Predicates);
10649 return ExitLimit(Distance,
getConstant(MaxBECount), Distance,
false,
10676 const SCEV *SymbolicMax =
10677 isa<SCEVCouldNotCompute>(
Exact) ? ConstantMax :
Exact;
10678 return ExitLimit(
Exact, ConstantMax, SymbolicMax,
false, Predicates);
10682 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
10687 AllowPredicates ? &Predicates :
nullptr, *
this);
10694 auto *S = isa<SCEVCouldNotCompute>(E) ?
M : E;
10695 return ExitLimit(E, M, S,
false, Predicates);
10699ScalarEvolution::howFarToNonZero(
const SCEV *V,
const Loop *L) {
10707 if (!
C->getValue()->isZero())
10717std::pair<const BasicBlock *, const BasicBlock *>
10718ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(
const BasicBlock *BB)
10730 return {
L->getLoopPredecessor(),
L->getHeader()};
10732 return {
nullptr, BB};
10741 if (
A ==
B)
return true;
10747 return A->isIdenticalTo(
B) && (isa<BinaryOperator>(
A) || isa<GetElementPtrInst>(
A));
10754 if (
const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
10755 if (
const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
10756 if (ComputesEqualValues(AI, BI))
10765 if (!
Add ||
Add->getNumOperands() != 2)
10767 if (
auto *ME = dyn_cast<SCEVMulExpr>(
Add->getOperand(0));
10768 ME && ME->getNumOperands() == 2 && ME->getOperand(0)->isAllOnesValue()) {
10769 LHS =
Add->getOperand(1);
10770 RHS = ME->getOperand(1);
10773 if (
auto *ME = dyn_cast<SCEVMulExpr>(
Add->getOperand(1));
10774 ME && ME->getNumOperands() == 2 && ME->getOperand(0)->isAllOnesValue()) {
10775 LHS =
Add->getOperand(0);
10776 RHS = ME->getOperand(1);
10784 bool Changed =
false;
10787 auto TrivialCase = [&](
bool TriviallyTrue) {
10801 return TrivialCase(
false);
10802 return TrivialCase(
true);
10825 const APInt &
RA = RC->getAPInt();
10827 bool SimplifiedByConstantRange =
false;
10832 return TrivialCase(
true);
10834 return TrivialCase(
false);
10843 Changed = SimplifiedByConstantRange =
true;
10847 if (!SimplifiedByConstantRange) {
10864 assert(!
RA.isMinValue() &&
"Should have been caught earlier!");
10870 assert(!
RA.isMaxValue() &&
"Should have been caught earlier!");
10876 assert(!
RA.isMinSignedValue() &&
"Should have been caught earlier!");
10882 assert(!
RA.isMaxSignedValue() &&
"Should have been caught earlier!");
10894 return TrivialCase(
true);
10896 return TrivialCase(
false);
10943 if ((isa<SCEVConstant>(
RHS) ||
10944 (isa<SCEVAddExpr, SCEVAddRecExpr>(
RHS) &&
10945 isa<SCEVConstant>(cast<SCEVNAryExpr>(
RHS)->getOperand(0)))) &&
10994 if (
const auto *SExt = dyn_cast<SCEVSignExtendExpr>(S))
11001 auto NonRecursive = [
this, OrNegative](
const SCEV *S) {
11002 if (
auto *
C = dyn_cast<SCEVConstant>(S))
11003 return C->getAPInt().isPowerOf2() ||
11004 (OrNegative &&
C->getAPInt().isNegatedPowerOf2());
11007 return isa<SCEVVScale>(S) && F.
hasFnAttribute(Attribute::VScaleRange);
11010 if (NonRecursive(S))
11013 auto *
Mul = dyn_cast<SCEVMulExpr>(S);
11030 if (
auto *AddRec = dyn_cast<SCEVAddRecExpr>(S))
11035 if (
auto *Cst = dyn_cast<SCEVConstant>(S)) {
11036 APInt C = Cst->getAPInt();
11037 return C.urem(M) == 0;
11044 auto *STy = dyn_cast<IntegerType>(S->
getType());
11045 const SCEV *SmodM =
11060 for (
auto *
A : Assumptions)
11061 if (
A->implies(
P, *
this))
11069std::pair<const SCEV *, const SCEV *>
11072 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *
this);
11074 return { Start, Start };
11076 const SCEV *
PostInc = SCEVPostIncRewriter::rewrite(S, L, *
this);
11085 getUsedLoops(
LHS, LoopsUsed);
11086 getUsedLoops(
RHS, LoopsUsed);
11088 if (LoopsUsed.
empty())
11093 for (
const auto *L1 : LoopsUsed)
11094 for (
const auto *L2 : LoopsUsed)
11096 DT.
dominates(L2->getHeader(), L1->getHeader())) &&
11097 "Domination relationship is not a linear order");
11127 SplitRHS.second) &&
11139 if (isKnownPredicateViaSplitting(Pred,
LHS,
RHS))
11143 return isKnownViaNonRecursiveReasoning(Pred,
LHS,
RHS);
11153 return std::nullopt;
11168 if (KnownWithoutContext)
11169 return KnownWithoutContext;
11176 return std::nullopt;
11182 const Loop *L =
LHS->getLoop();
11187std::optional<ScalarEvolution::MonotonicPredicateType>
11190 auto Result = getMonotonicPredicateTypeImpl(
LHS, Pred);
11196 auto ResultSwapped =
11199 assert(*ResultSwapped != *Result &&
11200 "monotonicity should flip as we flip the predicate");
11207std::optional<ScalarEvolution::MonotonicPredicateType>
11208ScalarEvolution::getMonotonicPredicateTypeImpl(
const SCEVAddRecExpr *LHS,
11222 return std::nullopt;
11226 "Should be greater or less!");
11230 if (!
LHS->hasNoUnsignedWrap())
11231 return std::nullopt;
11235 "Relational predicate is either signed or unsigned!");
11236 if (!
LHS->hasNoSignedWrap())
11237 return std::nullopt;
11239 const SCEV *Step =
LHS->getStepRecurrence(*
this);
11247 return std::nullopt;
11250std::optional<ScalarEvolution::LoopInvariantPredicate>
11257 return std::nullopt;
11264 if (!ArLHS || ArLHS->
getLoop() != L)
11265 return std::nullopt;
11269 return std::nullopt;
11295 return std::nullopt;
11332 return std::nullopt;
11335std::optional<ScalarEvolution::LoopInvariantPredicate>
11340 Pred,
LHS,
RHS, L, CtxI, MaxIter))
11342 if (
auto *
UMin = dyn_cast<SCEVUMinExpr>(MaxIter))
11348 for (
auto *
Op :
UMin->operands())
11352 return std::nullopt;
11355std::optional<ScalarEvolution::LoopInvariantPredicate>
11370 return std::nullopt;
11376 auto *AR = dyn_cast<SCEVAddRecExpr>(
LHS);
11377 if (!AR || AR->
getLoop() != L)
11378 return std::nullopt;
11382 return std::nullopt;
11388 if (Step != One && Step != MinusOne)
11389 return std::nullopt;
11395 return std::nullopt;
11401 return std::nullopt;
11409 if (Step == MinusOne)
11413 return std::nullopt;
11419bool ScalarEvolution::isKnownPredicateViaConstantRanges(
CmpPredicate Pred,
11425 auto CheckRange = [&](
bool IsSigned) {
11428 return RangeLHS.
icmp(Pred, RangeRHS);
11437 if (CheckRange(
true) || CheckRange(
false))
11446bool ScalarEvolution::isKnownPredicateViaNoOverflow(
CmpPredicate Pred,
11453 auto MatchBinaryAddToConst = [
this](
const SCEV *
X,
const SCEV *
Y,
11456 const SCEV *XNonConstOp, *XConstOp;
11457 const SCEV *YNonConstOp, *YConstOp;
11461 if (!splitBinaryAdd(
X, XConstOp, XNonConstOp, XFlagsPresent)) {
11464 XFlagsPresent = ExpectedFlags;
11466 if (!isa<SCEVConstant>(XConstOp))
11469 if (!splitBinaryAdd(
Y, YConstOp, YNonConstOp, YFlagsPresent)) {
11472 YFlagsPresent = ExpectedFlags;
11475 if (YNonConstOp != XNonConstOp)
11478 if (!isa<SCEVConstant>(YConstOp))
11483 if ((YFlagsPresent & ExpectedFlags) != ExpectedFlags)
11486 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) {
11490 OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt();
11491 OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt();
11546bool ScalarEvolution::isKnownPredicateViaSplitting(
CmpPredicate Pred,
11569 const SCEV *LHS,
const SCEV *RHS) {
11578 return match(&
I, m_Intrinsic<Intrinsic::experimental_guard>(
11580 isImpliedCond(Pred, LHS, RHS, Condition,
false);
11599 "This cannot be done on broken IR!");
11602 if (isKnownViaNonRecursiveReasoning(Pred,
LHS,
RHS))
11611 if (LoopContinuePredicate && LoopContinuePredicate->
isConditional() &&
11612 isImpliedCond(Pred,
LHS,
RHS,
11614 LoopContinuePredicate->
getSuccessor(0) != L->getHeader()))
11619 if (WalkingBEDominatingConds)
11625 const auto &BETakenInfo = getBackedgeTakenInfo(L);
11626 const SCEV *LatchBECount = BETakenInfo.getExact(Latch,
this);
11633 const SCEV *LoopCounter =
11644 auto *CI = cast<CallInst>(AssumeVH);
11648 if (isImpliedCond(Pred,
LHS,
RHS, CI->getArgOperand(0),
false))
11652 if (isImpliedViaGuard(Latch, Pred,
LHS,
RHS))
11655 for (
DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
11656 DTN != HeaderDTN; DTN = DTN->getIDom()) {
11657 assert(DTN &&
"should reach the loop header before reaching the root!");
11660 if (isImpliedViaGuard(BB, Pred,
LHS,
RHS))
11668 if (!ContinuePredicate || !ContinuePredicate->
isConditional())
11684 if (isImpliedCond(Pred,
LHS,
RHS, Condition,
11702 "This cannot be done on broken IR!");
11710 const bool ProvingStrictComparison =
11712 bool ProvedNonStrictComparison =
false;
11713 bool ProvedNonEquality =
false;
11716 if (!ProvedNonStrictComparison)
11717 ProvedNonStrictComparison = Fn(NonStrictPredicate);
11718 if (!ProvedNonEquality)
11720 if (ProvedNonStrictComparison && ProvedNonEquality)
11725 if (ProvingStrictComparison) {
11727 return isKnownViaNonRecursiveReasoning(
P,
LHS,
RHS);
11729 if (SplitAndProve(ProofFn))
11734 auto ProveViaCond = [&](
const Value *Condition,
bool Inverse) {
11736 if (isImpliedCond(Pred,
LHS,
RHS, Condition,
Inverse, CtxI))
11738 if (ProvingStrictComparison) {
11742 if (SplitAndProve(ProofFn))
11753 if (ContainingLoop && ContainingLoop->
getHeader() == BB)
11757 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
11758 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
11760 dyn_cast<BranchInst>(Pair.first->getTerminator());
11773 auto *CI = cast<CallInst>(AssumeVH);
11777 if (ProveViaCond(CI->getArgOperand(0),
false))
11783 F.
getParent(), Intrinsic::experimental_guard);
11785 for (
const auto *GU : GuardDecl->users())
11786 if (
const auto *Guard = dyn_cast<IntrinsicInst>(GU))
11788 if (ProveViaCond(Guard->getArgOperand(0),
false))
11803 "LHS is not available at Loop Entry");
11805 "RHS is not available at Loop Entry");
11807 if (isKnownViaNonRecursiveReasoning(Pred,
LHS,
RHS))
11818 if (FoundCondValue ==
11822 if (!PendingLoopPredicates.insert(FoundCondValue).second)
11826 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); });
11829 const Value *Op0, *Op1;
11832 return isImpliedCond(Pred, LHS, RHS, Op0,
Inverse, CtxI) ||
11833 isImpliedCond(Pred, LHS, RHS, Op1,
Inverse, CtxI);
11836 return isImpliedCond(Pred, LHS, RHS, Op0,
Inverse, CtxI) ||
11837 isImpliedCond(Pred, LHS, RHS, Op1,
Inverse, CtxI);
11840 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
11841 if (!ICI)
return false;
11854 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
11859 const SCEV *FoundLHS,
const SCEV *FoundRHS,
11870 auto *WideType = FoundLHS->
getType();
11881 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred.
dropSameSign(),
11882 TruncFoundLHS, TruncFoundRHS, CtxI))
11908 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS,
11912bool ScalarEvolution::isImpliedCondBalancedTypes(
11917 "Types should be balanced!");
11924 if (FoundLHS == FoundRHS)
11928 if (LHS == FoundRHS || RHS == FoundLHS) {
11929 if (isa<SCEVConstant>(RHS)) {
11940 return isImpliedCondOperands(*
P, LHS, RHS, FoundLHS, FoundRHS, CtxI);
11955 if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS))
11957 LHS, FoundLHS, FoundRHS, CtxI);
11958 if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS))
11959 return isImpliedCondOperands(*
P, LHS, RHS, FoundRHS, FoundLHS, CtxI);
11972 isImpliedCondOperands(*
P, LHS, RHS,
getNotSCEV(FoundLHS),
11981 assert(P1 != P2 &&
"Handled earlier!");
11985 if (IsSignFlippedPredicate(Pred, FoundPred)) {
11990 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI);
11993 CmpPredicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred;
11994 const SCEV *CanonicalLHS =
LHS, *CanonicalRHS =
RHS,
11995 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS;
12000 std::swap(CanonicalFoundLHS, CanonicalFoundRHS);
12011 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12012 CanonicalRHS, CanonicalFoundLHS,
12013 CanonicalFoundRHS);
12018 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12019 CanonicalRHS, CanonicalFoundLHS,
12020 CanonicalFoundRHS);
12025 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {
12028 const SCEV *
V =
nullptr;
12030 if (isa<SCEVConstant>(FoundLHS)) {
12031 C = cast<SCEVConstant>(FoundLHS);
12034 C = cast<SCEVConstant>(FoundRHS);
12046 if (Min ==
C->getAPInt()) {
12051 APInt SharperMin = Min + 1;
12058 if (isImpliedCondOperands(Pred, LHS, RHS, V,
getConstant(SharperMin),
12074 if (isImpliedCondOperands(Pred, LHS, RHS, V,
getConstant(Min), CtxI))
12103 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI))
12107 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, CtxI))
12110 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS))
12117bool ScalarEvolution::splitBinaryAdd(
const SCEV *Expr,
12120 const auto *AE = dyn_cast<SCEVAddExpr>(Expr);
12121 if (!AE || AE->getNumOperands() != 2)
12124 L = AE->getOperand(0);
12125 R = AE->getOperand(1);
12126 Flags = AE->getNoWrapFlags();
12130std::optional<APInt>
12137 APInt DiffMul(BW, 1);
12140 for (
unsigned I = 0;
I < 8; ++
I) {
12145 if (isa<SCEVAddRecExpr>(
Less) && isa<SCEVAddRecExpr>(More)) {
12146 const auto *LAR = cast<SCEVAddRecExpr>(
Less);
12147 const auto *MAR = cast<SCEVAddRecExpr>(More);
12149 if (LAR->getLoop() != MAR->getLoop())
12150 return std::nullopt;
12154 if (!LAR->isAffine() || !MAR->isAffine())
12155 return std::nullopt;
12157 if (LAR->getStepRecurrence(*
this) != MAR->getStepRecurrence(*
this))
12158 return std::nullopt;
12160 Less = LAR->getStart();
12161 More = MAR->getStart();
12166 auto MatchConstMul =
12167 [](
const SCEV *S) -> std::optional<std::pair<const SCEV *, APInt>> {
12168 auto *M = dyn_cast<SCEVMulExpr>(S);
12169 if (!M || M->getNumOperands() != 2 ||
12170 !isa<SCEVConstant>(M->getOperand(0)))
12171 return std::nullopt;
12173 {M->getOperand(1), cast<SCEVConstant>(M->getOperand(0))->getAPInt()}};
12175 if (
auto MatchedMore = MatchConstMul(More)) {
12176 if (
auto MatchedLess = MatchConstMul(
Less)) {
12177 if (MatchedMore->second == MatchedLess->second) {
12178 More = MatchedMore->first;
12179 Less = MatchedLess->first;
12180 DiffMul *= MatchedMore->second;
12189 if (
auto *
C = dyn_cast<SCEVConstant>(S)) {
12191 Diff +=
C->getAPInt() * DiffMul;
12194 Diff -=
C->getAPInt() * DiffMul;
12197 Multiplicity[S] +=
Mul;
12199 auto Decompose = [&](
const SCEV *S,
int Mul) {
12200 if (isa<SCEVAddExpr>(S)) {
12206 Decompose(More, 1);
12207 Decompose(
Less, -1);
12211 const SCEV *NewMore =
nullptr, *NewLess =
nullptr;
12212 for (
const auto &[S,
Mul] : Multiplicity) {
12217 return std::nullopt;
12219 }
else if (
Mul == -1) {
12221 return std::nullopt;
12224 return std::nullopt;
12228 if (NewMore == More || NewLess ==
Less)
12229 return std::nullopt;
12235 if (!More && !
Less)
12239 if (!More || !
Less)
12240 return std::nullopt;
12244 return std::nullopt;
12247bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
12267 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) {
12271 if (!L->contains(ContextBB) || !DT.
dominates(ContextBB, L->getLoopLatch()))
12275 return isImpliedCondOperands(Pred, LHS, RHS, AR->
getStart(), FoundRHS);
12278 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) {
12282 if (!L->contains(ContextBB) || !DT.
dominates(ContextBB, L->getLoopLatch()))
12286 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->
getStart());
12292bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
CmpPredicate Pred,
12295 const SCEV *FoundLHS,
12296 const SCEV *FoundRHS) {
12300 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS);
12304 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS);
12305 if (!AddRecFoundLHS)
12312 const Loop *
L = AddRecFoundLHS->getLoop();
12313 if (L != AddRecLHS->getLoop())
12352 if (!RDiff || *LDiff != *RDiff)
12355 if (LDiff->isMinValue())
12358 APInt FoundRHSLimit;
12361 FoundRHSLimit = -(*RDiff);
12373bool ScalarEvolution::isImpliedViaMerge(
CmpPredicate Pred,
const SCEV *LHS,
12374 const SCEV *RHS,
const SCEV *FoundLHS,
12375 const SCEV *FoundRHS,
unsigned Depth) {
12376 const PHINode *LPhi =
nullptr, *RPhi =
nullptr;
12380 bool Erased = PendingMerges.erase(LPhi);
12381 assert(Erased &&
"Failed to erase LPhi!");
12385 bool Erased = PendingMerges.erase(RPhi);
12386 assert(Erased &&
"Failed to erase RPhi!");
12392 if (
const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS))
12393 if (
auto *Phi = dyn_cast<PHINode>(LU->getValue())) {
12394 if (!PendingMerges.insert(Phi).second)
12398 if (
const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS))
12399 if (
auto *Phi = dyn_cast<PHINode>(RU->getValue())) {
12408 if (!PendingMerges.insert(Phi).second)
12414 if (!LPhi && !RPhi)
12425 assert(LPhi &&
"LPhi should definitely be a SCEVUnknown Phi!");
12429 auto ProvedEasily = [&](
const SCEV *
S1,
const SCEV *S2) {
12430 return isKnownViaNonRecursiveReasoning(Pred,
S1, S2) ||
12431 isImpliedCondOperandsViaRanges(Pred,
S1, S2, Pred, FoundLHS, FoundRHS) ||
12432 isImpliedViaOperations(Pred,
S1, S2, FoundLHS, FoundRHS,
Depth);
12435 if (RPhi && RPhi->getParent() == LBB) {
12442 const SCEV *
R =
getSCEV(RPhi->getIncomingValueForBlock(IncBB));
12443 if (!ProvedEasily(L, R))
12454 auto *RLoop = RAR->
getLoop();
12455 auto *Predecessor = RLoop->getLoopPredecessor();
12456 assert(Predecessor &&
"Loop with AddRec with no predecessor?");
12458 if (!ProvedEasily(L1, RAR->
getStart()))
12460 auto *Latch = RLoop->getLoopLatch();
12461 assert(Latch &&
"Loop with AddRec with no latch?");
12485 if (!ProvedEasily(L, RHS))
12492bool ScalarEvolution::isImpliedCondOperandsViaShift(
CmpPredicate Pred,
12495 const SCEV *FoundLHS,
12496 const SCEV *FoundRHS) {
12499 if (RHS == FoundRHS) {
12504 if (LHS != FoundLHS)
12507 auto *SUFoundRHS = dyn_cast<SCEVUnknown>(FoundRHS);
12511 Value *Shiftee, *ShiftValue;
12513 using namespace PatternMatch;
12514 if (
match(SUFoundRHS->getValue(),
12516 auto *ShifteeS =
getSCEV(Shiftee);
12534bool ScalarEvolution::isImpliedCondOperands(
CmpPredicate Pred,
const SCEV *LHS,
12536 const SCEV *FoundLHS,
12537 const SCEV *FoundRHS,
12539 return isImpliedCondOperandsViaRanges(Pred, LHS, RHS, Pred, FoundLHS,
12541 isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS,
12543 isImpliedCondOperandsViaShift(Pred, LHS, RHS, FoundLHS, FoundRHS) ||
12544 isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS,
12546 isImpliedCondOperandsHelper(Pred, LHS, RHS, FoundLHS, FoundRHS);
12550template <
typename MinMaxExprType>
12552 const SCEV *Candidate) {
12553 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr);
12557 return is_contained(MinMaxExpr->operands(), Candidate);
12570 const SCEV *LStart, *RStart, *Step;
12590 const SCEV *LHS,
const SCEV *RHS) {
12601 IsMinMaxConsistingOf<SCEVSMinExpr>(
LHS,
RHS) ||
12603 IsMinMaxConsistingOf<SCEVSMaxExpr>(
RHS,
LHS);
12612 IsMinMaxConsistingOf<SCEVUMinExpr>(
LHS,
RHS) ||
12614 IsMinMaxConsistingOf<SCEVUMaxExpr>(
RHS,
LHS);
12620bool ScalarEvolution::isImpliedViaOperations(
CmpPredicate Pred,
const SCEV *LHS,
12622 const SCEV *FoundLHS,
12623 const SCEV *FoundRHS,
12627 "LHS and RHS have different sizes?");
12630 "FoundLHS and FoundRHS have different sizes?");
12664 auto GetOpFromSExt = [&](
const SCEV *S) {
12665 if (
auto *Ext = dyn_cast<SCEVSignExtendExpr>(S))
12666 return Ext->getOperand();
12673 auto *OrigLHS =
LHS;
12674 auto *OrigFoundLHS = FoundLHS;
12675 LHS = GetOpFromSExt(LHS);
12676 FoundLHS = GetOpFromSExt(FoundLHS);
12679 auto IsSGTViaContext = [&](
const SCEV *
S1,
const SCEV *S2) {
12682 FoundRHS,
Depth + 1);
12685 if (
auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) {
12695 if (!LHSAddExpr->hasNoSignedWrap())
12698 auto *LL = LHSAddExpr->getOperand(0);
12699 auto *LR = LHSAddExpr->getOperand(1);
12703 auto IsSumGreaterThanRHS = [&](
const SCEV *
S1,
const SCEV *S2) {
12704 return IsSGTViaContext(
S1, MinusOne) && IsSGTViaContext(S2, RHS);
12709 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
12711 }
else if (
auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) {
12726 if (!isa<ConstantInt>(LR))
12729 auto *Denominator = cast<SCEVConstant>(
getSCEV(LR));
12734 if (!Numerator || Numerator->getType() != FoundLHS->
getType())
12742 auto *DTy = Denominator->getType();
12743 auto *FRHSTy = FoundRHS->
getType();
12744 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
12763 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
12774 auto *NegDenomMinusOne =
getMinusSCEV(MinusOne, DenominatorExt);
12776 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
12784 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS,
Depth + 1))
12817bool ScalarEvolution::isKnownViaNonRecursiveReasoning(
CmpPredicate Pred,
12821 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) ||
12824 isKnownPredicateViaNoOverflow(Pred, LHS, RHS);
12827bool ScalarEvolution::isImpliedCondOperandsHelper(
CmpPredicate Pred,
12830 const SCEV *FoundLHS,
12831 const SCEV *FoundRHS) {
12867 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS))
12873bool ScalarEvolution::isImpliedCondOperandsViaRanges(
12875 const SCEV *FoundLHS,
const SCEV *FoundRHS) {
12876 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
12885 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
12897 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
12900 return LHSRange.
icmp(Pred, ConstRHS);
12903bool ScalarEvolution::canIVOverflowOnLT(
const SCEV *RHS,
const SCEV *Stride,
12916 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
12924 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
12927bool ScalarEvolution::canIVOverflowOnGT(
const SCEV *RHS,
const SCEV *Stride,
12939 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
12947 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
12959const SCEV *ScalarEvolution::computeMaxBECountForLT(
const SCEV *Start,
12960 const SCEV *Stride,
12987 : APIntOps::umax(One, MinStride);
12991 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
13002 : APIntOps::umax(MaxEnd, MinStart);
13009ScalarEvolution::howManyLessThans(
const SCEV *LHS,
const SCEV *RHS,
13010 const Loop *L,
bool IsSigned,
13011 bool ControlsOnlyExit,
bool AllowPredicates) {
13015 bool PredicatedIV =
false;
13017 if (
auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) {
13018 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ZExt->getOperand());
13020 auto canProveNUW = [&]() {
13023 if (!ControlsOnlyExit)
13044 Limit = Limit.
zext(OuterBitWidth);
13056 Type *Ty = ZExt->getType();
13058 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this, 0),
13060 IV = dyn_cast<SCEVAddRecExpr>(S);
13067 if (!
IV && AllowPredicates) {
13072 PredicatedIV =
true;
13076 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13090 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13093 const SCEV *Stride =
IV->getStepRecurrence(*
this);
13098 if (!PositiveStride) {
13150 auto wouldZeroStrideBeUB = [&]() {
13162 if (!wouldZeroStrideBeUB()) {
13166 }
else if (!NoWrap) {
13169 if (canIVOverflowOnLT(RHS, Stride, IsSigned))
13182 const SCEV *Start =
IV->getStart();
13188 const SCEV *OrigStart = Start;
13190 if (Start->getType()->isPointerTy()) {
13192 if (isa<SCEVCouldNotCompute>(Start))
13197 if (isa<SCEVCouldNotCompute>(RHS))
13201 const SCEV *
End =
nullptr, *BECount =
nullptr,
13202 *BECountIfBackedgeTaken =
nullptr;
13204 const auto *RHSAddRec = dyn_cast<SCEVAddRecExpr>(RHS);
13205 if (PositiveStride && RHSAddRec !=
nullptr && RHSAddRec->getLoop() == L &&
13206 RHSAddRec->getNoWrapFlags()) {
13219 const SCEV *RHSStart = RHSAddRec->getStart();
13220 const SCEV *RHSStride = RHSAddRec->getStepRecurrence(*
this);
13241 BECountIfBackedgeTaken =
13246 if (BECount ==
nullptr) {
13251 const SCEV *MaxBECount = computeMaxBECountForLT(
13254 MaxBECount,
false , Predicates);
13261 auto *OrigStartMinusStride =
getMinusSCEV(OrigStart, Stride);
13288 const SCEV *Numerator =
13294 auto canProveRHSGreaterThanEqualStart = [&]() {
13313 auto *StartMinusOne =
13320 if (canProveRHSGreaterThanEqualStart()) {
13335 BECountIfBackedgeTaken =
13351 bool MayAddOverflow = [&] {
13397 if (Start == Stride || Start ==
getMinusSCEV(Stride, One)) {
13411 if (!MayAddOverflow) {
13423 const SCEV *ConstantMaxBECount;
13424 bool MaxOrZero =
false;
13425 if (isa<SCEVConstant>(BECount)) {
13426 ConstantMaxBECount = BECount;
13427 }
else if (BECountIfBackedgeTaken &&
13428 isa<SCEVConstant>(BECountIfBackedgeTaken)) {
13432 ConstantMaxBECount = BECountIfBackedgeTaken;
13435 ConstantMaxBECount = computeMaxBECountForLT(
13439 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount) &&
13440 !isa<SCEVCouldNotCompute>(BECount))
13443 const SCEV *SymbolicMaxBECount =
13444 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount;
13445 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero,
13450 const SCEV *LHS,
const SCEV *RHS,
const Loop *L,
bool IsSigned,
13451 bool ControlsOnlyExit,
bool AllowPredicates) {
13458 if (!
IV && AllowPredicates)
13465 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13469 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13482 if (!Stride->
isOne() && !NoWrap)
13483 if (canIVOverflowOnGT(RHS, Stride, IsSigned))
13486 const SCEV *Start =
IV->getStart();
13498 if (Start->getType()->isPointerTy()) {
13500 if (isa<SCEVCouldNotCompute>(Start))
13503 if (
End->getType()->isPointerTy()) {
13505 if (isa<SCEVCouldNotCompute>(
End))
13533 const SCEV *ConstantMaxBECount =
13534 isa<SCEVConstant>(BECount)
13539 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount))
13540 ConstantMaxBECount = BECount;
13541 const SCEV *SymbolicMaxBECount =
13542 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount;
13544 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
13554 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
13555 if (!SC->getValue()->isZero()) {
13559 getNoWrapFlags(FlagNW));
13560 if (
const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted))
13561 return ShiftedAddRec->getNumIterationsInRange(
13569 if (
any_of(operands(), [](
const SCEV *
Op) {
return !isa<SCEVConstant>(
Op); }))
13589 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt();
13607 "Linear scev computation is off in a bad way!");
13611 if (isQuadratic()) {
13621 assert(getNumOperands() > 1 &&
"AddRec with zero step?");
13631 for (
unsigned i = 0, e = getNumOperands() - 1; i < e; ++i)
13637 const SCEV *
Last = getOperand(getNumOperands() - 1);
13638 assert(!
Last->isZero() &&
"Recurrency with zero step?");
13640 return cast<SCEVAddRecExpr>(SE.
getAddRecExpr(Ops, getLoop(),
13647 if (
const auto *SU = dyn_cast<SCEVUnknown>(S))
13648 return isa<UndefValue>(SU->
getValue());
13656 if (
const auto *SU = dyn_cast<SCEVUnknown>(S))
13665 if (
StoreInst *Store = dyn_cast<StoreInst>(Inst))
13666 Ty = Store->getValueOperand()->getType();
13667 else if (
LoadInst *Load = dyn_cast<LoadInst>(Inst))
13668 Ty = Load->getType();
13680void ScalarEvolution::SCEVCallbackVH::deleted() {
13681 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13682 if (
PHINode *PN = dyn_cast<PHINode>(getValPtr()))
13683 SE->ConstantEvolutionLoopExitValue.erase(PN);
13684 SE->eraseValueFromMap(getValPtr());
13688void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(
Value *V) {
13689 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13708 :
F(
F),
DL(
F.getDataLayout()), TLI(TLI), AC(AC), DT(DT), LI(LI),
13710 LoopDispositions(64), BlockDispositions(64) {
13722 F.getParent(), Intrinsic::experimental_guard);
13723 HasGuards = GuardDecl && !GuardDecl->use_empty();
13727 :
F(Arg.
F),
DL(Arg.
DL), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC),
13728 DT(Arg.DT), LI(Arg.LI), CouldNotCompute(
std::
move(Arg.CouldNotCompute)),
13729 ValueExprMap(
std::
move(Arg.ValueExprMap)),
13730 PendingLoopPredicates(
std::
move(Arg.PendingLoopPredicates)),
13731 PendingPhiRanges(
std::
move(Arg.PendingPhiRanges)),
13732 PendingMerges(
std::
move(Arg.PendingMerges)),
13733 ConstantMultipleCache(
std::
move(Arg.ConstantMultipleCache)),
13734 BackedgeTakenCounts(
std::
move(Arg.BackedgeTakenCounts)),
13735 PredicatedBackedgeTakenCounts(
13736 std::
move(Arg.PredicatedBackedgeTakenCounts)),
13737 BECountUsers(
std::
move(Arg.BECountUsers)),
13738 ConstantEvolutionLoopExitValue(
13739 std::
move(Arg.ConstantEvolutionLoopExitValue)),
13740 ValuesAtScopes(
std::
move(Arg.ValuesAtScopes)),
13741 ValuesAtScopesUsers(
std::
move(Arg.ValuesAtScopesUsers)),
13742 LoopDispositions(
std::
move(Arg.LoopDispositions)),
13743 LoopPropertiesCache(
std::
move(Arg.LoopPropertiesCache)),
13744 BlockDispositions(
std::
move(Arg.BlockDispositions)),
13745 SCEVUsers(
std::
move(Arg.SCEVUsers)),
13746 UnsignedRanges(
std::
move(Arg.UnsignedRanges)),
13747 SignedRanges(
std::
move(Arg.SignedRanges)),
13748 UniqueSCEVs(
std::
move(Arg.UniqueSCEVs)),
13749 UniquePreds(
std::
move(Arg.UniquePreds)),
13750 SCEVAllocator(
std::
move(Arg.SCEVAllocator)),
13751 LoopUsers(
std::
move(Arg.LoopUsers)),
13752 PredicatedSCEVRewrites(
std::
move(Arg.PredicatedSCEVRewrites)),
13753 FirstUnknown(Arg.FirstUnknown) {
13754 Arg.FirstUnknown =
nullptr;
13763 Tmp->~SCEVUnknown();
13765 FirstUnknown =
nullptr;
13767 ExprValueMap.
clear();
13768 ValueExprMap.
clear();
13770 BackedgeTakenCounts.clear();
13771 PredicatedBackedgeTakenCounts.clear();
13773 assert(PendingLoopPredicates.empty() &&
"isImpliedCond garbage");
13774 assert(PendingPhiRanges.empty() &&
"getRangeRef garbage");
13775 assert(PendingMerges.empty() &&
"isImpliedViaMerge garbage");
13776 assert(!WalkingBEDominatingConds &&
"isLoopBackedgeGuardedByCond garbage!");
13777 assert(!ProvingSplitPredicate &&
"ProvingSplitPredicate garbage!");
13787 if (isa<SCEVConstant>(S))
13799 L->getHeader()->printAsOperand(
OS,
false);
13803 L->getExitingBlocks(ExitingBlocks);
13804 if (ExitingBlocks.
size() != 1)
13805 OS <<
"<multiple exits> ";
13808 if (!isa<SCEVCouldNotCompute>(BTC)) {
13809 OS <<
"backedge-taken count is ";
13812 OS <<
"Unpredictable backedge-taken count.";
13815 if (ExitingBlocks.
size() > 1)
13816 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
13817 OS <<
" exit count for " << ExitingBlock->
getName() <<
": ";
13820 if (isa<SCEVCouldNotCompute>(EC)) {
13824 if (!isa<SCEVCouldNotCompute>(EC)) {
13825 OS <<
"\n predicated exit count for " << ExitingBlock->
getName()
13828 OS <<
"\n Predicates:\n";
13829 for (
const auto *
P : Predicates)
13837 L->getHeader()->printAsOperand(
OS,
false);
13841 if (!isa<SCEVCouldNotCompute>(ConstantBTC)) {
13842 OS <<
"constant max backedge-taken count is ";
13845 OS <<
", actual taken count either this or zero.";
13847 OS <<
"Unpredictable constant max backedge-taken count. ";
13852 L->getHeader()->printAsOperand(
OS,
false);
13856 if (!isa<SCEVCouldNotCompute>(SymbolicBTC)) {
13857 OS <<
"symbolic max backedge-taken count is ";
13860 OS <<
", actual taken count either this or zero.";
13862 OS <<
"Unpredictable symbolic max backedge-taken count. ";
13866 if (ExitingBlocks.
size() > 1)
13867 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
13868 OS <<
" symbolic max exit count for " << ExitingBlock->
getName() <<
": ";
13872 if (isa<SCEVCouldNotCompute>(ExitBTC)) {
13877 if (!isa<SCEVCouldNotCompute>(ExitBTC)) {
13878 OS <<
"\n predicated symbolic max exit count for "
13879 << ExitingBlock->
getName() <<
": ";
13881 OS <<
"\n Predicates:\n";
13882 for (
const auto *
P : Predicates)
13892 assert(!Preds.
empty() &&
"Different predicated BTC, but no predicates");
13894 L->getHeader()->printAsOperand(
OS,
false);
13896 if (!isa<SCEVCouldNotCompute>(PBT)) {
13897 OS <<
"Predicated backedge-taken count is ";
13900 OS <<
"Unpredictable predicated backedge-taken count.";
13902 OS <<
" Predicates:\n";
13903 for (
const auto *
P : Preds)
13908 auto *PredConstantMax =
13910 if (PredConstantMax != ConstantBTC) {
13912 "different predicated constant max BTC but no predicates");
13914 L->getHeader()->printAsOperand(
OS,
false);
13916 if (!isa<SCEVCouldNotCompute>(PredConstantMax)) {
13917 OS <<
"Predicated constant max backedge-taken count is ";
13920 OS <<
"Unpredictable predicated constant max backedge-taken count.";
13922 OS <<
" Predicates:\n";
13923 for (
const auto *
P : Preds)
13928 auto *PredSymbolicMax =
13930 if (SymbolicBTC != PredSymbolicMax) {
13932 "Different predicated symbolic max BTC, but no predicates");
13934 L->getHeader()->printAsOperand(
OS,
false);
13936 if (!isa<SCEVCouldNotCompute>(PredSymbolicMax)) {
13937 OS <<
"Predicated symbolic max backedge-taken count is ";
13940 OS <<
"Unpredictable predicated symbolic max backedge-taken count.";
13942 OS <<
" Predicates:\n";
13943 for (
const auto *
P : Preds)
13949 L->getHeader()->printAsOperand(
OS,
false);
13965 OS <<
"Computable";
13974 OS <<
"DoesNotDominate";
13980 OS <<
"ProperlyDominates";
13997 OS <<
"Classifying expressions for: ";
14006 if (!isa<SCEVCouldNotCompute>(SV)) {
14019 if (!isa<SCEVCouldNotCompute>(AtUse)) {
14028 OS <<
"\t\t" "Exits: ";
14031 OS <<
"<<Unknown>>";
14037 for (
const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
14039 OS <<
"\t\t" "LoopDispositions: { ";
14045 Iter->getHeader()->printAsOperand(
OS,
false);
14053 OS <<
"\t\t" "LoopDispositions: { ";
14059 InnerL->getHeader()->printAsOperand(
OS,
false);
14070 OS <<
"Determining loop execution counts for: ";
14079 auto &Values = LoopDispositions[S];
14080 for (
auto &V : Values) {
14081 if (V.getPointer() == L)
14086 auto &Values2 = LoopDispositions[S];
14088 if (V.getPointer() == L) {
14097ScalarEvolution::computeLoopDisposition(
const SCEV *S,
const Loop *L) {
14116 assert(!L->contains(AR->
getLoop()) &&
"Containing loop's header does not"
14117 " dominate the contained loop's header?");
14144 bool HasVarying =
false;
14159 if (
auto *
I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
14178 auto &Values = BlockDispositions[S];
14179 for (
auto &V : Values) {
14180 if (V.getPointer() == BB)
14185 auto &Values2 = BlockDispositions[S];
14187 if (V.getPointer() == BB) {
14196ScalarEvolution::computeBlockDisposition(
const SCEV *S,
const BasicBlock *BB) {
14225 bool Proper =
true;
14237 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
14238 if (
I->getParent() == BB)
14263void ScalarEvolution::forgetBackedgeTakenCounts(
const Loop *L,
14266 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14267 auto It = BECounts.find(L);
14268 if (It != BECounts.end()) {
14269 for (
const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) {
14270 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14271 if (!isa<SCEVConstant>(S)) {
14272 auto UserIt = BECountUsers.find(S);
14273 assert(UserIt != BECountUsers.end());
14278 BECounts.erase(It);
14286 while (!Worklist.
empty()) {
14288 auto Users = SCEVUsers.find(Curr);
14289 if (
Users != SCEVUsers.end())
14295 for (
const auto *S : ToForget)
14296 forgetMemoizedResultsImpl(S);
14298 for (
auto I = PredicatedSCEVRewrites.begin();
14299 I != PredicatedSCEVRewrites.end();) {
14300 std::pair<const SCEV *, const Loop *>
Entry =
I->first;
14301 if (ToForget.count(
Entry.first))
14302 PredicatedSCEVRewrites.erase(
I++);
14308void ScalarEvolution::forgetMemoizedResultsImpl(
const SCEV *S) {
14309 LoopDispositions.erase(S);
14310 BlockDispositions.erase(S);
14311 UnsignedRanges.erase(S);
14312 SignedRanges.erase(S);
14313 HasRecMap.
erase(S);
14314 ConstantMultipleCache.erase(S);
14316 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(S)) {
14317 UnsignedWrapViaInductionTried.erase(AR);
14318 SignedWrapViaInductionTried.erase(AR);
14321 auto ExprIt = ExprValueMap.
find(S);
14322 if (ExprIt != ExprValueMap.
end()) {
14323 for (
Value *V : ExprIt->second) {
14324 auto ValueIt = ValueExprMap.
find_as(V);
14325 if (ValueIt != ValueExprMap.
end())
14326 ValueExprMap.
erase(ValueIt);
14328 ExprValueMap.
erase(ExprIt);
14331 auto ScopeIt = ValuesAtScopes.find(S);
14332 if (ScopeIt != ValuesAtScopes.end()) {
14333 for (
const auto &Pair : ScopeIt->second)
14334 if (!isa_and_nonnull<SCEVConstant>(Pair.second))
14336 std::make_pair(Pair.first, S));
14337 ValuesAtScopes.erase(ScopeIt);
14340 auto ScopeUserIt = ValuesAtScopesUsers.find(S);
14341 if (ScopeUserIt != ValuesAtScopesUsers.end()) {
14342 for (
const auto &Pair : ScopeUserIt->second)
14343 llvm::erase(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S));
14344 ValuesAtScopesUsers.erase(ScopeUserIt);
14347 auto BEUsersIt = BECountUsers.find(S);
14348 if (BEUsersIt != BECountUsers.end()) {
14350 auto Copy = BEUsersIt->second;
14351 for (
const auto &Pair : Copy)
14352 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt());
14353 BECountUsers.erase(BEUsersIt);
14356 auto FoldUser = FoldCacheUser.find(S);
14357 if (FoldUser != FoldCacheUser.end())
14358 for (
auto &KV : FoldUser->second)
14359 FoldCache.erase(KV);
14360 FoldCacheUser.erase(S);
14364ScalarEvolution::getUsedLoops(
const SCEV *S,
14366 struct FindUsedLoops {
14368 : LoopsUsed(LoopsUsed) {}
14370 bool follow(
const SCEV *S) {
14371 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(S))
14376 bool isDone()
const {
return false; }
14379 FindUsedLoops F(LoopsUsed);
14383void ScalarEvolution::getReachableBlocks(
14387 while (!Worklist.
empty()) {
14389 if (!Reachable.
insert(BB).second)
14396 if (
auto *
C = dyn_cast<ConstantInt>(
Cond)) {
14397 Worklist.
push_back(
C->isOne() ? TrueBB : FalseBB);
14401 if (
auto *Cmp = dyn_cast<ICmpInst>(
Cond)) {
14404 if (isKnownPredicateViaConstantRanges(
Cmp->getCmpPredicate(), L, R)) {
14408 if (isKnownPredicateViaConstantRanges(
Cmp->getInverseCmpPredicate(), L,
14443 SCEVMapper SCM(SE2);
14445 SE2.getReachableBlocks(ReachableBlocks,
F);
14447 auto GetDelta = [&](
const SCEV *Old,
const SCEV *New) ->
const SCEV * {
14465 while (!LoopStack.
empty()) {
14471 if (!ReachableBlocks.
contains(L->getHeader()))
14476 auto It = BackedgeTakenCounts.find(L);
14477 if (It == BackedgeTakenCounts.end())
14481 SCM.visit(It->second.getExact(L,
const_cast<ScalarEvolution *
>(
this)));
14501 const SCEV *Delta = GetDelta(CurBECount, NewBECount);
14502 if (Delta && !Delta->
isZero()) {
14503 dbgs() <<
"Trip Count for " << *L <<
" Changed!\n";
14504 dbgs() <<
"Old: " << *CurBECount <<
"\n";
14505 dbgs() <<
"New: " << *NewBECount <<
"\n";
14506 dbgs() <<
"Delta: " << *Delta <<
"\n";
14514 while (!Worklist.
empty()) {
14516 if (ValidLoops.
insert(L).second)
14517 Worklist.
append(L->begin(), L->end());
14519 for (
const auto &KV : ValueExprMap) {
14522 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second)) {
14524 "AddRec references invalid loop");
14529 auto It = ExprValueMap.
find(KV.second);
14530 if (It == ExprValueMap.
end() || !It->second.contains(KV.first)) {
14531 dbgs() <<
"Value " << *KV.first
14532 <<
" is in ValueExprMap but not in ExprValueMap\n";
14536 if (
auto *
I = dyn_cast<Instruction>(&*KV.first)) {
14537 if (!ReachableBlocks.
contains(
I->getParent()))
14539 const SCEV *OldSCEV = SCM.visit(KV.second);
14541 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV);
14542 if (Delta && !Delta->
isZero()) {
14543 dbgs() <<
"SCEV for value " << *
I <<
" changed!\n"
14544 <<
"Old: " << *OldSCEV <<
"\n"
14545 <<
"New: " << *NewSCEV <<
"\n"
14546 <<
"Delta: " << *Delta <<
"\n";
14552 for (
const auto &KV : ExprValueMap) {
14553 for (
Value *V : KV.second) {
14554 const SCEV *S = ValueExprMap.lookup(V);
14556 dbgs() <<
"Value " << *V
14557 <<
" is in ExprValueMap but not in ValueExprMap\n";
14560 if (S != KV.first) {
14561 dbgs() <<
"Value " << *V <<
" mapped to " << *S <<
" rather than "
14562 << *KV.first <<
"\n";
14569 for (
const auto &S : UniqueSCEVs) {
14572 if (isa<SCEVConstant>(
Op))
14574 auto It = SCEVUsers.find(
Op);
14575 if (It != SCEVUsers.end() && It->second.count(&S))
14577 dbgs() <<
"Use of operand " << *
Op <<
" by user " << S
14578 <<
" is not being tracked!\n";
14584 for (
const auto &ValueAndVec : ValuesAtScopes) {
14586 for (
const auto &LoopAndValueAtScope : ValueAndVec.second) {
14587 const Loop *L = LoopAndValueAtScope.first;
14588 const SCEV *ValueAtScope = LoopAndValueAtScope.second;
14589 if (!isa<SCEVConstant>(ValueAtScope)) {
14590 auto It = ValuesAtScopesUsers.find(ValueAtScope);
14591 if (It != ValuesAtScopesUsers.end() &&
14594 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14595 << *ValueAtScope <<
" missing in ValuesAtScopesUsers\n";
14601 for (
const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) {
14602 const SCEV *ValueAtScope = ValueAtScopeAndVec.first;
14603 for (
const auto &LoopAndValue : ValueAtScopeAndVec.second) {
14604 const Loop *L = LoopAndValue.first;
14605 const SCEV *
Value = LoopAndValue.second;
14607 auto It = ValuesAtScopes.find(
Value);
14608 if (It != ValuesAtScopes.end() &&
14609 is_contained(It->second, std::make_pair(L, ValueAtScope)))
14611 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14612 << *ValueAtScope <<
" missing in ValuesAtScopes\n";
14618 auto VerifyBECountUsers = [&](
bool Predicated) {
14620 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14621 for (
const auto &LoopAndBEInfo : BECounts) {
14622 for (
const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) {
14623 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14624 if (!isa<SCEVConstant>(S)) {
14625 auto UserIt = BECountUsers.find(S);
14626 if (UserIt != BECountUsers.end() &&
14627 UserIt->second.contains({ LoopAndBEInfo.first,
Predicated }))
14629 dbgs() <<
"Value " << *S <<
" for loop " << *LoopAndBEInfo.first
14630 <<
" missing from BECountUsers\n";
14637 VerifyBECountUsers(
false);
14638 VerifyBECountUsers(
true);
14641 for (
auto &[S, Values] : LoopDispositions) {
14642 for (
auto [
Loop, CachedDisposition] : Values) {
14644 if (CachedDisposition != RecomputedDisposition) {
14645 dbgs() <<
"Cached disposition of " << *S <<
" for loop " << *
Loop
14646 <<
" is incorrect: cached " << CachedDisposition <<
", actual "
14647 << RecomputedDisposition <<
"\n";
14654 for (
auto &[S, Values] : BlockDispositions) {
14655 for (
auto [BB, CachedDisposition] : Values) {
14657 if (CachedDisposition != RecomputedDisposition) {
14658 dbgs() <<
"Cached disposition of " << *S <<
" for block %"
14659 << BB->
getName() <<
" is incorrect: cached " << CachedDisposition
14660 <<
", actual " << RecomputedDisposition <<
"\n";
14667 for (
auto [
FoldID, Expr] : FoldCache) {
14668 auto I = FoldCacheUser.find(Expr);
14669 if (
I == FoldCacheUser.end()) {
14670 dbgs() <<
"Missing entry in FoldCacheUser for cached expression " << *Expr
14675 dbgs() <<
"Missing FoldID in cached users of " << *Expr <<
"!\n";
14679 for (
auto [Expr, IDs] : FoldCacheUser) {
14680 for (
auto &
FoldID : IDs) {
14683 dbgs() <<
"Missing entry in FoldCache for expression " << *Expr
14688 dbgs() <<
"Entry in FoldCache doesn't match FoldCacheUser: " << *S
14689 <<
" != " << *Expr <<
"!\n";
14700 for (
auto [S, Multiple] : ConstantMultipleCache) {
14702 if ((Multiple != 0 && RecomputedMultiple != 0 &&
14703 Multiple.
urem(RecomputedMultiple) != 0 &&
14704 RecomputedMultiple.
urem(Multiple) != 0)) {
14705 dbgs() <<
"Incorrect cached computation in ConstantMultipleCache for "
14706 << *S <<
" : Computed " << RecomputedMultiple
14707 <<
" but cache contains " << Multiple <<
"!\n";
14747 OS <<
"Printing analysis 'Scalar Evolution Analysis' for function '"
14748 <<
F.getName() <<
"':\n";
14754 "Scalar Evolution Analysis",
false,
true)
14768 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F),
14769 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F),
14770 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
14771 getAnalysis<LoopInfoWrapperPass>().getLoopInfo()));
14803 const SCEV *LHS,
const SCEV *RHS) {
14806 "Type mismatch between LHS and RHS");
14809 ID.AddInteger(Pred);
14810 ID.AddPointer(
LHS);
14811 ID.AddPointer(
RHS);
14812 void *IP =
nullptr;
14813 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
14817 UniquePreds.InsertNode(Eq, IP);
14828 ID.AddInteger(AddedFlags);
14829 void *IP =
nullptr;
14830 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
14832 auto *OF =
new (SCEVAllocator)
14834 UniquePreds.InsertNode(OF, IP);
14854 SCEVPredicateRewriter
Rewriter(L, SE, NewPreds, Pred);
14860 if (
auto *U = dyn_cast<SCEVUnionPredicate>(Pred)) {
14861 for (
const auto *Pred : U->getPredicates())
14862 if (
const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred))
14863 if (IPred->getLHS() == Expr &&
14864 IPred->getPredicate() == ICmpInst::ICMP_EQ)
14865 return IPred->getRHS();
14866 }
else if (
const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred)) {
14867 if (IPred->getLHS() == Expr &&
14868 IPred->getPredicate() == ICmpInst::ICMP_EQ)
14869 return IPred->getRHS();
14872 return convertToAddRecWithPreds(Expr);
14908 explicit SCEVPredicateRewriter(
14917 return Pred && Pred->
implies(
P, SE);
14926 return addOverflowAssumption(
A);
14936 if (!isa<PHINode>(Expr->
getValue()))
14939 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
14941 if (!PredicatedRewrite)
14943 for (
const auto *
P : PredicatedRewrite->second){
14945 if (
auto *WP = dyn_cast<const SCEVWrapPredicate>(
P)) {
14946 if (L != WP->getExpr()->getLoop())
14949 if (!addOverflowAssumption(
P))
14952 return PredicatedRewrite->first;
14965 return SCEVPredicateRewriter::rewrite(S, L, *
this,
nullptr, &Preds);
14972 S = SCEVPredicateRewriter::rewrite(S, L, *
this, &TransformPreds,
nullptr);
14973 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S);
14982 auto *WrapPred = dyn_cast<SCEVWrapPredicate>(Pred);
14988 if (isa<SCEVCouldNotCompute>(ExitCount))
14992 if (!Step->
isOne())
15011 : FastID(
ID), Kind(Kind) {}
15018 assert(
LHS !=
RHS &&
"LHS and RHS are the same SCEV");
15023 const auto *
Op = dyn_cast<SCEVComparePredicate>(
N);
15054 const auto *
Op = dyn_cast<SCEVWrapPredicate>(
N);
15066 const SCEV *OpStart =
Op->AR->getStart();
15071 if (Start->getType()->isPointerTy() && Start->getType() != OpStart->
getType())
15075 const SCEV *OpStep =
Op->AR->getStepRecurrence(SE);
15128 if (Step->getValue()->getValue().isNonNegative())
15132 return ImpliedFlags;
15139 for (
const auto *
P : Preds)
15150 if (
const auto *Set = dyn_cast<SCEVUnionPredicate>(
N))
15152 return this->implies(I, SE);
15160 for (
const auto *Pred : Preds)
15165 if (
const auto *Set = dyn_cast<SCEVUnionPredicate>(
N)) {
15166 for (
const auto *Pred : Set->Preds)
15178 for (
auto *
P : Preds) {
15179 if (
N->implies(
P, SE))
15183 Preds = std::move(PrunedPreds);
15184 Preds.push_back(
N);
15191 Preds = std::make_unique<SCEVUnionPredicate>(Empty, SE);
15196 for (
const auto *
Op : Ops)
15200 if (!isa<SCEVConstant>(
Op))
15201 SCEVUsers[
Op].insert(
User);
15206 RewriteEntry &Entry = RewriteMap[Expr];
15209 if (Entry.second && Generation == Entry.first)
15210 return Entry.second;
15215 Expr = Entry.second;
15218 Entry = {Generation, NewSCEV};
15224 if (!BackedgeCount) {
15227 for (
const auto *
P : Preds)
15230 return BackedgeCount;
15234 if (!SymbolicMaxBackedgeCount) {
15236 SymbolicMaxBackedgeCount =
15238 for (
const auto *
P : Preds)
15241 return SymbolicMaxBackedgeCount;
15245 if (!SmallConstantMaxTripCount) {
15248 for (
const auto *
P : Preds)
15251 return *SmallConstantMaxTripCount;
15255 if (Preds->implies(&Pred, SE))
15260 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds, SE);
15261 updateGeneration();
15268void PredicatedScalarEvolution::updateGeneration() {
15270 if (++Generation == 0) {
15271 for (
auto &
II : RewriteMap) {
15272 const SCEV *Rewritten =
II.second.second;
15281 const auto *AR = cast<SCEVAddRecExpr>(Expr);
15289 auto II = FlagsMap.insert({V, Flags});
15297 const auto *AR = cast<SCEVAddRecExpr>(Expr);
15302 auto II = FlagsMap.find(V);
15304 if (
II != FlagsMap.end())
15318 for (
const auto *
P : NewPreds)
15321 RewriteMap[SE.
getSCEV(V)] = {Generation, New};
15327 : RewriteMap(
Init.RewriteMap), SE(
Init.SE), L(
Init.L),
15330 Generation(
Init.Generation), BackedgeCount(
Init.BackedgeCount) {
15331 for (
auto I :
Init.FlagsMap)
15332 FlagsMap.insert(
I);
15337 for (
auto *BB : L.getBlocks())
15338 for (
auto &
I : *BB) {
15343 auto II = RewriteMap.find(Expr);
15345 if (
II == RewriteMap.end())
15349 if (
II->second.second == Expr)
15363bool ScalarEvolution::matchURem(
const SCEV *Expr,
const SCEV *&LHS,
15364 const SCEV *&RHS) {
15371 if (
const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr))
15372 if (
const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) {
15373 LHS = Trunc->getOperand();
15385 const auto *
Add = dyn_cast<SCEVAddExpr>(Expr);
15386 if (
Add ==
nullptr ||
Add->getNumOperands() != 2)
15389 const SCEV *
A =
Add->getOperand(1);
15390 const auto *
Mul = dyn_cast<SCEVMulExpr>(
Add->getOperand(0));
15392 if (
Mul ==
nullptr)
15395 const auto MatchURemWithDivisor = [&](
const SCEV *
B) {
15406 if (
Mul->getNumOperands() == 3 && isa<SCEVConstant>(
Mul->getOperand(0)))
15407 return MatchURemWithDivisor(
Mul->getOperand(1)) ||
15408 MatchURemWithDivisor(
Mul->getOperand(2));
15411 if (
Mul->getNumOperands() == 2)
15412 return MatchURemWithDivisor(
Mul->getOperand(1)) ||
15413 MatchURemWithDivisor(
Mul->getOperand(0)) ||
15427 collectFromBlock(SE, Guards, Header, Pred, VisitedBlocks);
15431void ScalarEvolution::LoopGuards::collectFromPHI(
15439 using MinMaxPattern = std::pair<const SCEVConstant *, SCEVTypes>;
15440 auto GetMinMaxConst = [&](
unsigned IncomingIdx) -> MinMaxPattern {
15446 collectFromBlock(SE,
G->second, Phi.getParent(),
InBlock, VisitedBlocks,
15448 auto &RewriteMap =
G->second.RewriteMap;
15449 if (RewriteMap.empty())
15451 auto S = RewriteMap.find(SE.
getSCEV(Phi.getIncomingValue(IncomingIdx)));
15452 if (S == RewriteMap.end())
15454 auto *SM = dyn_cast_if_present<SCEVMinMaxExpr>(S->second);
15457 if (
const SCEVConstant *C0 = dyn_cast<SCEVConstant>(SM->getOperand(0)))
15458 return {C0, SM->getSCEVType()};
15461 auto MergeMinMaxConst = [](MinMaxPattern
P1,
15462 MinMaxPattern P2) -> MinMaxPattern {
15463 auto [C1,
T1] =
P1;
15464 auto [C2, T2] = P2;
15465 if (!C1 || !C2 || T1 != T2)
15469 return {C1->getAPInt().
ult(C2->getAPInt()) ? C1 : C2, T1};
15471 return {C1->getAPInt().
slt(C2->getAPInt()) ? C1 : C2, T1};
15473 return {C1->getAPInt().
ugt(C2->getAPInt()) ? C1 : C2, T1};
15475 return {C1->getAPInt().
sgt(C2->getAPInt()) ? C1 : C2, T1};
15480 auto P = GetMinMaxConst(0);
15481 for (
unsigned int In = 1;
In <
Phi.getNumIncomingValues();
In++) {
15484 P = MergeMinMaxConst(
P, GetMinMaxConst(In));
15490 Guards.RewriteMap.insert({
LHS,
RHS});
15494void ScalarEvolution::LoopGuards::collectFromBlock(
15509 if (isa<SCEVConstant>(LHS)) {
15518 &ExprsToRewrite]() {
15521 auto *C2 = dyn_cast<SCEVConstant>(RHS);
15532 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
15534 auto [
I,
Inserted] = RewriteMap.try_emplace(LHSUnknown);
15535 const SCEV *RewrittenLHS =
Inserted ? LHSUnknown :
I->second;
15543 if (MatchRangeCheckIdiom())
15549 auto IsMinMaxSCEVWithNonNegativeConstant =
15552 if (
auto *
MinMax = dyn_cast<SCEVMinMaxExpr>(Expr)) {
15553 if (
MinMax->getNumOperands() != 2)
15555 if (
auto *
C = dyn_cast<SCEVConstant>(
MinMax->getOperand(0))) {
15556 if (
C->getAPInt().isNegative())
15558 SCTy =
MinMax->getSCEVType();
15569 auto GetNonNegExprAndPosDivisor = [&](
const SCEV *Expr,
const SCEV *Divisor,
15571 auto *ConstExpr = dyn_cast<SCEVConstant>(Expr);
15572 auto *ConstDivisor = dyn_cast<SCEVConstant>(Divisor);
15573 if (!ConstExpr || !ConstDivisor)
15575 ExprVal = ConstExpr->getAPInt();
15576 DivisorVal = ConstDivisor->getAPInt();
15577 return ExprVal.isNonNegative() && !DivisorVal.isNonPositive();
15583 auto GetNextSCEVDividesByDivisor = [&](
const SCEV *Expr,
15584 const SCEV *Divisor) {
15587 if (!GetNonNegExprAndPosDivisor(Expr, Divisor, ExprVal, DivisorVal))
15592 return SE.
getConstant(ExprVal + DivisorVal - Rem);
15599 auto GetPreviousSCEVDividesByDivisor = [&](
const SCEV *Expr,
15600 const SCEV *Divisor) {
15603 if (!GetNonNegExprAndPosDivisor(Expr, Divisor, ExprVal, DivisorVal))
15613 std::function<
const SCEV *(
const SCEV *,
const SCEV *)>
15614 ApplyDivisibiltyOnMinMaxExpr = [&](
const SCEV *MinMaxExpr,
15615 const SCEV *Divisor) {
15616 const SCEV *MinMaxLHS =
nullptr, *MinMaxRHS =
nullptr;
15618 if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
15622 isa<SCEVSMinExpr>(MinMaxExpr) || isa<SCEVUMinExpr>(MinMaxExpr);
15624 "Expected non-negative operand!");
15625 auto *DivisibleExpr =
15626 IsMin ? GetPreviousSCEVDividesByDivisor(MinMaxLHS, Divisor)
15627 : GetNextSCEVDividesByDivisor(MinMaxLHS, Divisor);
15629 ApplyDivisibiltyOnMinMaxExpr(MinMaxRHS, Divisor), DivisibleExpr};
15638 const SCEV *URemLHS =
nullptr;
15639 const SCEV *URemRHS =
nullptr;
15640 if (SE.matchURem(LHS, URemLHS, URemRHS)) {
15641 if (
const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) {
15642 auto I = RewriteMap.find(LHSUnknown);
15643 const SCEV *RewrittenLHS =
15644 I != RewriteMap.end() ?
I->second : LHSUnknown;
15645 RewrittenLHS = ApplyDivisibiltyOnMinMaxExpr(RewrittenLHS, URemRHS);
15646 const auto *Multiple =
15648 RewriteMap[LHSUnknown] = Multiple;
15660 if (!isa<SCEVUnknown>(LHS) && isa<SCEVUnknown>(RHS)) {
15669 auto AddRewrite = [&](
const SCEV *
From,
const SCEV *FromRewritten,
15671 if (
From == FromRewritten)
15673 RewriteMap[
From] = To;
15679 auto GetMaybeRewritten = [&](
const SCEV *S) {
15680 return RewriteMap.lookup_or(S, S);
15690 std::function<
bool(
const SCEV *,
const SCEV *&)> HasDivisibiltyInfo =
15691 [&](
const SCEV *Expr,
const SCEV *&DividesBy) {
15692 if (
auto *
Mul = dyn_cast<SCEVMulExpr>(Expr)) {
15693 if (
Mul->getNumOperands() != 2)
15695 auto *MulLHS =
Mul->getOperand(0);
15696 auto *MulRHS =
Mul->getOperand(1);
15697 if (isa<SCEVConstant>(MulLHS))
15699 if (
auto *Div = dyn_cast<SCEVUDivExpr>(MulLHS))
15700 if (Div->getOperand(1) == MulRHS) {
15701 DividesBy = MulRHS;
15705 if (
auto *
MinMax = dyn_cast<SCEVMinMaxExpr>(Expr))
15706 return HasDivisibiltyInfo(
MinMax->getOperand(0), DividesBy) ||
15707 HasDivisibiltyInfo(
MinMax->getOperand(1), DividesBy);
15712 std::function<
bool(
const SCEV *,
const SCEV *&)> IsKnownToDivideBy =
15713 [&](
const SCEV *Expr,
const SCEV *DividesBy) {
15716 if (
auto *
MinMax = dyn_cast<SCEVMinMaxExpr>(Expr))
15717 return IsKnownToDivideBy(
MinMax->getOperand(0), DividesBy) &&
15718 IsKnownToDivideBy(
MinMax->getOperand(1), DividesBy);
15722 const SCEV *RewrittenLHS = GetMaybeRewritten(LHS);
15723 const SCEV *DividesBy =
nullptr;
15724 if (HasDivisibiltyInfo(RewrittenLHS, DividesBy))
15727 IsKnownToDivideBy(RewrittenLHS, DividesBy) ? DividesBy :
nullptr;
15749 RHS = DividesBy ? GetPreviousSCEVDividesByDivisor(RHS, DividesBy) :
RHS;
15755 RHS = DividesBy ? GetNextSCEVDividesByDivisor(RHS, DividesBy) :
RHS;
15759 RHS = DividesBy ? GetPreviousSCEVDividesByDivisor(RHS, DividesBy) :
RHS;
15763 RHS = DividesBy ? GetNextSCEVDividesByDivisor(RHS, DividesBy) :
RHS;
15772 auto EnqueueOperands = [&Worklist](
const SCEVNAryExpr *S) {
15776 while (!Worklist.
empty()) {
15778 if (isa<SCEVConstant>(
From))
15782 const SCEV *FromRewritten = GetMaybeRewritten(
From);
15783 const SCEV *To =
nullptr;
15789 if (
auto *
UMax = dyn_cast<SCEVUMaxExpr>(FromRewritten))
15790 EnqueueOperands(
UMax);
15795 if (
auto *
SMax = dyn_cast<SCEVSMaxExpr>(FromRewritten))
15796 EnqueueOperands(
SMax);
15801 if (
auto *
UMin = dyn_cast<SCEVUMinExpr>(FromRewritten))
15802 EnqueueOperands(
UMin);
15807 if (
auto *
SMin = dyn_cast<SCEVSMinExpr>(FromRewritten))
15808 EnqueueOperands(
SMin);
15811 if (isa<SCEVConstant>(RHS))
15816 const SCEV *OneAlignedUp =
15817 DividesBy ? GetNextSCEVDividesByDivisor(One, DividesBy) : One;
15818 To = SE.
getUMaxExpr(FromRewritten, OneAlignedUp);
15826 AddRewrite(
From, FromRewritten, To);
15835 auto *AssumeI = cast<CallInst>(AssumeVH);
15843 SE.F.
getParent(), Intrinsic::experimental_guard);
15845 for (
const auto *GU : GuardDecl->users())
15846 if (
const auto *Guard = dyn_cast<IntrinsicInst>(GU))
15847 if (Guard->getFunction() ==
Block->getParent() &&
15856 unsigned NumCollectedConditions = 0;
15858 std::pair<const BasicBlock *, const BasicBlock *> Pair(Pred,
Block);
15860 Pair = SE.getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
15861 VisitedBlocks.
insert(Pair.second);
15863 dyn_cast<BranchInst>(Pair.first->getTerminator());
15869 NumCollectedConditions++;
15873 if (
Depth > 0 && NumCollectedConditions == 2)
15881 if (Pair.second->hasNPredecessorsOrMore(2) &&
15884 for (
auto &Phi : Pair.second->phis())
15885 collectFromPHI(SE, Guards, Phi, VisitedBlocks, IncomingGuards,
Depth);
15892 for (
auto [Term, EnterIfTrue] :
reverse(Terms)) {
15896 while (!Worklist.
empty()) {
15901 if (
auto *Cmp = dyn_cast<ICmpInst>(
Cond)) {
15903 EnterIfTrue ?
Cmp->getPredicate() :
Cmp->getInversePredicate();
15906 CollectCondition(
Predicate, LHS, RHS, Guards.RewriteMap);
15922 Guards.PreserveNUW =
true;
15923 Guards.PreserveNSW =
true;
15924 for (
const SCEV *Expr : ExprsToRewrite) {
15925 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
15926 Guards.PreserveNUW &=
15928 Guards.PreserveNSW &=
15935 if (ExprsToRewrite.size() > 1) {
15936 for (
const SCEV *Expr : ExprsToRewrite) {
15937 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
15938 Guards.RewriteMap.erase(Expr);
15939 Guards.RewriteMap.insert({Expr, Guards.
rewrite(RewriteTo)});
15948 class SCEVLoopGuardRewriter
15958 if (Guards.PreserveNUW)
15960 if (Guards.PreserveNSW)
15967 return Map.lookup_or(Expr, Expr);
15971 if (
const SCEV *S = Map.lookup(Expr))
15979 while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
15980 Bitwidth >
Op->getType()->getScalarSizeInBits()) {
15983 if (
const SCEV *S = Map.lookup(NarrowExt))
15985 Bitwidth = Bitwidth / 2;
15993 if (
const SCEV *S = Map.lookup(Expr))
16000 if (
const SCEV *S = Map.lookup(Expr))
16006 if (
const SCEV *S = Map.lookup(Expr))
16013 bool Changed =
false;
16021 return !Changed ? Expr
16029 bool Changed =
false;
16037 return !Changed ? Expr
16044 if (RewriteMap.empty())
16047 SCEVLoopGuardRewriter
Rewriter(SE, *
this);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
block Block Frequency Analysis
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
This file defines a hash set that can be used to remove duplication of nodes in a graph.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
iv Induction Variable Users
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
mir Rename Register Operands
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
PowerPC Reduce CR logical Operation
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
SI optimize exec mask operations pre RA
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file provides utility classes that use RAII to save and restore values.
bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, SCEVTypes RootKind)
static cl::opt< unsigned > MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, cl::desc("Max coefficients in AddRec during evolving"), cl::init(8))
static cl::opt< unsigned > RangeIterThreshold("scev-range-iter-threshold", cl::Hidden, cl::desc("Threshold for switching to iteratively computing SCEV ranges"), cl::init(32))
static const Loop * isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI)
static unsigned getConstantTripCount(const SCEVConstant *ExitCount)
static int CompareValueComplexity(const LoopInfo *const LI, Value *LV, Value *RV, unsigned Depth)
Compare the two values LV and RV in terms of their "complexity" where "complexity" is a partial (and ...
static void PushLoopPHIs(const Loop *L, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push PHI nodes in the header of the given loop onto the given Worklist.
static void insertFoldCacheEntry(const ScalarEvolution::FoldID &ID, const SCEV *S, DenseMap< ScalarEvolution::FoldID, const SCEV * > &FoldCache, DenseMap< const SCEV *, SmallVector< ScalarEvolution::FoldID, 2 > > &FoldCacheUser)
static cl::opt< bool > ClassifyExpressions("scalar-evolution-classify-expressions", cl::Hidden, cl::init(true), cl::desc("When printing analysis, include information on every instruction"))
static bool CanConstantFold(const Instruction *I)
Return true if we can constant fold an instruction of the specified type, assuming that all operands ...
static cl::opt< unsigned > AddOpsInlineThreshold("scev-addops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining addition operands into a SCEV"), cl::init(500))
static cl::opt< unsigned > MaxLoopGuardCollectionDepth("scalar-evolution-max-loop-guard-collection-depth", cl::Hidden, cl::desc("Maximum depth for recursive loop guard collection"), cl::init(1))
static cl::opt< bool > VerifyIR("scev-verify-ir", cl::Hidden, cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), cl::init(false))
static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, Value *&C, Value *&LHS, Value *&RHS)
static const SCEV * getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static std::optional< APInt > MinOptional(std::optional< APInt > X, std::optional< APInt > Y)
Helper function to compare optional APInts: (a) if X and Y both exist, return min(X,...
static cl::opt< unsigned > MulOpsInlineThreshold("scev-mulops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining multiplication operands into a SCEV"), cl::init(32))
static void GroupByComplexity(SmallVectorImpl< const SCEV * > &Ops, LoopInfo *LI, DominatorTree &DT)
Given a list of SCEV objects, order them by their complexity, and group objects of the same complexit...
static const SCEV * constantFoldAndGroupOps(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, SmallVectorImpl< const SCEV * > &Ops, FoldT Fold, IsIdentityT IsIdentity, IsAbsorberT IsAbsorber)
Performs a number of common optimizations on the passed Ops.
static std::optional< const SCEV * > createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, const SCEV *TrueExpr, const SCEV *FalseExpr)
static Constant * BuildConstantFromSCEV(const SCEV *V)
This builds up a Constant using the ConstantExpr interface.
static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE)
static const SCEV * BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy)
Compute BC(It, K). The result has width W. Assume, K > 0.
static cl::opt< unsigned > MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), cl::init(8))
static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, const SCEV *Candidate)
Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
static PHINode * getConstantEvolvingPHI(Value *V, const Loop *L)
getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node in the loop that V is deri...
static cl::opt< unsigned > MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100))
static bool MatchBinarySub(const SCEV *S, const SCEV *&LHS, const SCEV *&RHS)
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static void PrintSCEVWithTypeHint(raw_ostream &OS, const SCEV *S)
When printing a top-level SCEV for trip counts, it's helpful to include a type for constants which ar...
static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L)
static bool containsConstantInAddMulChain(const SCEV *StartExpr)
Determine if any of the operands in this SCEV are a constant or if any of the add or multiply express...
static const SCEV * getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static bool hasHugeExpression(ArrayRef< const SCEV * > Ops)
Returns true if Ops contains a huge SCEV (the subtree of S contains at least HugeExprThreshold nodes)...
static cl::opt< unsigned > MaxPhiSCCAnalysisSize("scalar-evolution-max-scc-analysis-depth", cl::Hidden, cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " "Phi strongly connected components"), cl::init(8))
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static cl::opt< unsigned > MaxSCEVOperationsImplicationDepth("scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV operations implication analysis"), cl::init(2))
static void PushDefUseChildren(Instruction *I, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push users of the given Instruction onto the given Worklist.
static std::optional< APInt > SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, const ConstantRange &Range, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n iterations.
static cl::opt< bool > UseContextForNoWrapFlagInference("scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, cl::desc("Infer nuw/nsw flags using context where suitable"), cl::init(true))
static cl::opt< bool > EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, cl::desc("Handle <= and >= in finite loops"), cl::init(true))
static std::optional< std::tuple< APInt, APInt, APInt, APInt, unsigned > > GetQuadraticEquation(const SCEVAddRecExpr *AddRec)
For a given quadratic addrec, generate coefficients of the corresponding quadratic equation,...
static bool isKnownPredicateExtendIdiom(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static std::optional< BinaryOp > MatchBinaryOp(Value *V, const DataLayout &DL, AssumptionCache &AC, const DominatorTree &DT, const Instruction *CxtI)
Try to map V into a BinaryOp, and return std::nullopt on failure.
static std::optional< APInt > SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n iterations.
static std::optional< APInt > TruncIfPossible(std::optional< APInt > X, unsigned BitWidth)
Helper function to truncate an optional APInt to a given BitWidth.
static cl::opt< unsigned > MaxSCEVCompareDepth("scalar-evolution-max-scev-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV complexity comparisons"), cl::init(32))
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const SCEVConstant *ConstantTerm, const SCEVAddExpr *WholeAddExpr)
static cl::opt< unsigned > MaxConstantEvolvingDepth("scalar-evolution-max-constant-evolving-depth", cl::Hidden, cl::desc("Maximum depth of recursive constant evolving"), cl::init(32))
static ConstantRange getRangeForAffineARHelper(APInt Step, const ConstantRange &StartRange, const APInt &MaxBECount, bool Signed)
static std::optional< ConstantRange > GetRangeFromMetadata(Value *V)
Helper method to assign a range to V from metadata present in the IR.
static const SCEV * SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, SmallVectorImpl< const SCEVPredicate * > *Predicates, ScalarEvolution &SE)
Finds the minimum unsigned root of the following equation:
static cl::opt< unsigned > HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, cl::desc("Size of the expression which is considered huge"), cl::init(4096))
static Type * isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, bool &Signed, ScalarEvolution &SE)
Helper function to createAddRecFromPHIWithCasts.
static Constant * EvaluateExpression(Value *V, const Loop *L, DenseMap< Instruction *, Constant * > &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI)
EvaluateExpression - Given an expression that passes the getConstantEvolvingPHI predicate,...
static const SCEV * MatchNotExpr(const SCEV *Expr)
If Expr computes ~A, return A else return nullptr.
static cl::opt< unsigned > MaxValueCompareDepth("scalar-evolution-max-value-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive value complexity comparisons"), cl::init(2))
static cl::opt< bool, true > VerifySCEVOpt("verify-scev", cl::Hidden, cl::location(VerifySCEV), cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"))
static const SCEV * getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, const ArrayRef< const SCEV * > Ops, SCEV::NoWrapFlags Flags)
static cl::opt< unsigned > MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, cl::desc("Maximum depth of recursive arithmetics"), cl::init(32))
static bool HasSameValue(const SCEV *A, const SCEV *B)
SCEV structural equivalence is usually sufficient for testing whether two expressions are equal,...
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow)
Compute the result of "n choose k", the binomial coefficient.
static std::optional< int > CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, DominatorTree &DT, unsigned Depth=0)
static bool CollectAddOperandsWithScales(SmallDenseMap< const SCEV *, APInt, 16 > &M, SmallVectorImpl< const SCEV * > &NewOps, APInt &AccumulatedConstant, ArrayRef< const SCEV * > Ops, const APInt &Scale, ScalarEvolution &SE)
Process the given Ops list, which is a list of operands to be added under the given scale,...
static bool canConstantEvolve(Instruction *I, const Loop *L)
Determine whether this instruction can constant evolve within this loop assuming its operands can all...
static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap< Instruction *, PHINode * > &PHIMap, unsigned Depth)
getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by recursing through each instructi...
static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind)
static cl::opt< bool > VerifySCEVStrict("verify-scev-strict", cl::Hidden, cl::desc("Enable stricter verification with -verify-scev is passed"))
static Constant * getOtherIncomingValue(PHINode *PN, BasicBlock *BB)
static cl::opt< bool > UseExpensiveRangeSharpening("scalar-evolution-use-expensive-range-sharpening", cl::Hidden, cl::init(false), cl::desc("Use more powerful methods of sharpening expression ranges. May " "be costly in terms of compile time"))
static const SCEV * getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Is LHS Pred RHS true on the virtue of LHS or RHS being a Min or Max expression?
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool InBlock(const Value *V, const BasicBlock *BB)
Provides some synthesis utilities to produce sequences of values.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Virtual Register Rewriter
static const uint32_t IV[8]
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned countTrailingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt multiplicativeInverse() const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
size_t size() const
size - Get the array size.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptions()
Access the list of assumption handles currently tracked for this function.
LLVM_ABI bool isSingleEdge() const
Check if this is the only edge between Start and End.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Instruction & front() const
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Conditional or Unconditional Branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
This class represents a function call, abstracting a target machine's calling convention.
Value handle with callbacks on RAUW and destruction.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
LLVM_ABI ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt & getLower() const
Return the lower value for this range.
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other? NOTE: false does not mean that inverse pr...
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI bool isSignWrappedSet() const
Return true if this set wraps around the signed domain.
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
LLVM_ABI void print(raw_ostream &OS) const
Print out the bounds to a stream.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
static LLVM_ABI ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
LLVM_ABI unsigned getMinSignedBits() const
Compute the maximal number of bits needed to represent every value in this signed range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
LLVM_ABI ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
bool erase(const KeyT &Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FoldingSetNodeIDRef - This class describes a reference to an interned FoldingSetNodeID,...
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
FunctionPass class - This class is used to implement most global optimizations.
const BasicBlock & getEntryBlock() const
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
static GEPNoWrapFlags none()
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
Module * getParent()
Get the module that this global value is contained inside of...
static bool isPrivateLinkage(LinkageTypes Linkage)
static bool isInternalLinkage(LinkageTypes Linkage)
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
Predicate getNonStrictCmpPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
bool isEquality() const
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
BlockT * getLoopPredecessor() const
If the given loop's header has exactly one unique predecessor outside the loop, return it.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
unsigned getLoopDepth(const BlockT *BB) const
Return the loop nesting level of the specified block.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V, bool HasCoroSuspendInst=false) const
Return true if the specified value is loop invariant.
A Module instance is used to store all the information related to an LLVM module.
This is a utility class that provides an abstraction for the common functionality between Instruction...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
PointerIntPair - This class implements a pair of a pointer and small integer.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
LLVM_ABI bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const
Check if AR1 and AR2 are equal, while taking into account Equal predicates in Preds.
LLVM_ABI PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L)
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
constexpr bool isValid() const
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStart() const
LLVM_ABI const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a recurrence without clearing any previously set flags.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
bool isQuadratic() const
Return true if this represents an expression A + B*x + C*x^2 where A, B and C are loop invariant valu...
LLVM_ABI const SCEV * getNumIterationsInRange(const ConstantRange &Range, ScalarEvolution &SE) const
Return the number of iterations of this loop that produce values in the specified constant range.
LLVM_ABI const SCEVAddRecExpr * getPostIncExpr(ScalarEvolution &SE) const
Return an expression representing the value of this expression one iteration of the loop ahead.
const Loop * getLoop() const
This is the base class for unary cast operator classes.
const SCEV * getOperand() const
LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty)
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a non-recurrence without clearing previously set flags.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
SCEVComparePredicate(const FoldingSetNodeIDRef ID, const ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Implementation of the SCEVPredicate interface.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
This is the base class for unary integral cast operator classes.
LLVM_ABI SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty)
This node is the base class min/max selections.
static enum SCEVTypes negate(enum SCEVTypes T)
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
bool hasNoSelfWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
const SCEV * getOperand(unsigned i) const
const SCEV *const * Operands
ArrayRef< const SCEV * > operands() const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
SCEVPredicate(const SCEVPredicate &)=default
virtual bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const =0
Returns true if this predicate implies N.
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This class represents a cast from a pointer to a pointer-sized integer value.
This visitor recursively visits a SCEV expression and re-writes it.
const SCEV * visitSignExtendExpr(const SCEVSignExtendExpr *Expr)
const SCEV * visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr)
const SCEV * visitSMinExpr(const SCEVSMinExpr *Expr)
const SCEV * visitUMinExpr(const SCEVUMinExpr *Expr)
This class represents a signed maximum selection.
This class represents a signed minimum selection.
This node is the base class for sequential/in-order min/max selections.
SCEVTypes getEquivalentNonSequentialSCEVType() const
This class represents a sequential/in-order unsigned minimum selection.
This class represents a sign extension of a small integer value to a larger integer value.
Visit all nodes in the expression tree using worklist traversal.
void visitAll(const SCEV *Root)
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
const SCEV * getLHS() const
const SCEV * getRHS() const
This class represents an unsigned maximum selection.
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
void print(raw_ostream &OS, unsigned Depth) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
SCEVUnionPredicate(ArrayRef< const SCEVPredicate * > Preds, ScalarEvolution &SE)
Union predicates don't get cached so create a dummy set ID for it.
bool isAlwaysTrue() const override
Implementation of the SCEVPredicate interface.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
IncrementWrapFlags
Similar to SCEV::NoWrapFlags, but with slightly different semantics for FlagNUSW.
SCEVWrapPredicate(const FoldingSetNodeIDRef ID, const SCEVAddRecExpr *AR, IncrementWrapFlags Flags)
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
static SCEVWrapPredicate::IncrementWrapFlags setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OnFlags)
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
static SCEVWrapPredicate::IncrementWrapFlags clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OffFlags)
Convenient IncrementWrapFlags manipulation methods.
static SCEVWrapPredicate::IncrementWrapFlags getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Returns the set of SCEVWrapPredicate no wrap flags implied by a SCEVAddRecExpr.
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
LLVM_ABI ArrayRef< const SCEV * > operands() const
Return operands of this SCEV expression.
unsigned short getExpressionSize() const
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI void dump() const
This method is used for debugging.
LLVM_ABI bool isAllOnesValue() const
Return true if the expression is a constant all-ones value.
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
LLVM_ABI void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
SCEVTypes getSCEVType() const
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
Analysis pass that exposes the ScalarEvolution for a function.
LLVM_ABI ScalarEvolution run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *=nullptr) const override
print - Print out the internal state of the pass.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
LLVM_ABI const SCEV * rewrite(const SCEV *Expr) const
Try to apply the collected loop guards to Expr.
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
static bool hasFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags TestFlags)
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI bool isKnownOnEveryIteration(CmpPredicate Pred, const SCEVAddRecExpr *LHS, const SCEV *RHS)
Test if the condition described by Pred, LHS, RHS is known to be true on every iteration of the loop ...
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterationsImpl(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
LLVM_ABI const SCEV * getSMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getUDivCeilSCEV(const SCEV *N, const SCEV *D)
Compute ceil(N / D).
LLVM_ABI const SCEV * getGEPExpr(GEPOperator *GEP, const SmallVectorImpl< const SCEV * > &IndexExprs)
Returns an expression for a GEP.
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterations(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L at given Context duri...
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI APInt getConstantMultiple(const SCEV *S)
Returns the max constant multiple of S.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the loop is protected by a conditional between LHS and RHS.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
Return a SCEV expression for the specified value at the specified scope in the program.
LLVM_ABI const SCEV * getSMinExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags)
Update no-wrap flags of an AddRec.
LLVM_ABI const SCEV * getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
Promote the operands to the wider of the types using zero-extension, and then perform a umax operatio...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?...
LLVM_ABI ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit, bool AllowPredicates=false)
Compute the number of times the backedge of the specified loop will execute if its exit condition wer...
LLVM_ABI const SCEV * getZeroExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool loopHasNoAbnormalExits(const Loop *L)
Return true if the loop has no abnormal exits.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
LLVM_ABI ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI)
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty)
LLVM_ABI const SCEV * getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< const SCEV * > &Operands)
LLVM_ABI const SCEV * getLosslessPtrToIntExpr(const SCEV *Op, unsigned Depth=0)
LLVM_ABI std::optional< bool > evaluatePredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Check whether the condition described by Pred, LHS, and RHS is true or false in the given Context.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getConstantMaxBackedgeTakenCo...
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
APInt getUnsignedRangeMin(const SCEV *S)
Determine the min of the unsigned range for a particular SCEV.
LLVM_ABI bool SimplifyICmpOperands(CmpPredicate &Pred, const SCEV *&LHS, const SCEV *&RHS, unsigned Depth=0)
Simplify LHS and RHS in a comparison with predicate Pred.
LLVM_ABI const SCEV * getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo)
Return an expression for offsetof on the given field with type IntTy.
LLVM_ABI LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L)
Return the "disposition" of the given SCEV with respect to the given loop.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getSignExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
LLVM_ABI bool hasOperand(const SCEV *S, const SCEV *Op) const
Test whether the given SCEV has Op as a direct or indirect operand.
LLVM_ABI const SCEV * getUDivExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEVPredicate * getComparePredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getNotSCEV(const SCEV *V)
Return the SCEV object corresponding to ~V.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B)
Return true if there exists a point in the program at which both A and B could be operands to the sam...
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI uint32_t getMinTrailingZeros(const SCEV *S)
Determine the minimum number of zero bits that S is guaranteed to end in (at every loop iteration).
LLVM_ABI void print(raw_ostream &OS) const
LLVM_ABI const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
LLVM_ABI const SCEV * getPredicatedExitCount(const Loop *L, const BasicBlock *ExitingBlock, SmallVectorImpl< const SCEVPredicate * > *Predicates, ExitCountKind Kind=Exact)
Same as above except this uses the predicated backedge taken info and may require predicates.
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
LLVM_ABI void forgetTopmostLoop(const Loop *L)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
@ MonotonicallyDecreasing
@ MonotonicallyIncreasing
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI=nullptr)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L, return a LoopInvaria...
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEVPredicate * getWrapPredicate(const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags)
LLVM_ABI bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether the backedge of the loop is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI APInt getNonZeroConstantMultiple(const SCEV *S)
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags)
LLVM_ABI bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
LLVM_ABI BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB)
Return the "disposition" of the given SCEV with respect to the given block.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const SCEV * getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
Promote the operands to the wider of the types using zero-extension, and then perform a umin operatio...
LLVM_ABI bool loopIsFiniteByAssumption(const Loop *L)
Return true if this loop is finite by assumption.
LLVM_ABI const SCEV * getExistingSCEV(Value *V)
Return an existing SCEV for V if there is one, otherwise return nullptr.
LoopDisposition
An enum describing the relationship between a SCEV and a loop.
@ LoopComputable
The SCEV varies predictably with the loop.
@ LoopVariant
The SCEV is loop-variant (unknown).
@ LoopInvariant
The SCEV is loop-invariant.
friend class SCEVCallbackVH
LLVM_ABI bool isKnownMultipleOf(const SCEV *S, uint64_t M, SmallVectorImpl< const SCEVPredicate * > &Assumptions)
Check that S is a multiple of M.
LLVM_ABI const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero=false, bool OrNegative=false)
Test if the given expression is known to be a power of 2.
LLVM_ABI std::optional< SCEV::NoWrapFlags > getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO)
Parse NSW/NUW flags from add/sub/mul IR binary operation Op into SCEV no-wrap flags,...
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI bool containsUndefs(const SCEV *S) const
Return true if the SCEV expression contains an undef value.
LLVM_ABI std::optional< MonotonicPredicateType > getMonotonicPredicateType(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred)
If, for all loop invariant X, the predicate "LHS `Pred` X" is monotonically increasing or decreasing,...
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L)
Determine if the SCEV can be evaluated at loop's entry.
BlockDisposition
An enum describing the relationship between a SCEV and a basic block.
@ DominatesBlock
The SCEV dominates the block.
@ ProperlyDominatesBlock
The SCEV properly dominates the block.
@ DoesNotDominateBlock
The SCEV does not dominate the block.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI void getPoisonGeneratingValues(SmallPtrSetImpl< const Value * > &Result, const SCEV *S)
Return the set of Values that, if poison, will definitively result in S being poison as well.
LLVM_ABI void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
LLVM_ABI const SCEV * getVScale(Type *Ty)
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
LLVM_ABI bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI const SCEV * getMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< const SCEV * > &Operands)
LLVM_ABI void forgetAllLoops()
LLVM_ABI bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
ExitCountKind
The terms "backedge taken count" and "exit count" are used interchangeably to refer to the number of ...
@ SymbolicMaximum
An expression which provides an upper bound on the exact trip count.
@ ConstantMaximum
A constant which provides an upper bound on the exact trip count.
@ Exact
An expression exactly describing the number of times the backedge has executed when a loop is exited.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEVAddRecExpr * convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Preds)
Tries to convert the S expression to an AddRec expression, adding additional predicates to Preds as r...
LLVM_ABI const SCEV * getElementSize(Instruction *Inst)
Return the size of an element read or written by Inst.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< bool > evaluatePredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Check whether the condition described by Pred, LHS, and RHS is true or false.
LLVM_ABI const SCEV * getUnknown(Value *V)
LLVM_ABI std::optional< std::pair< const SCEV *, SmallVector< const SCEVPredicate *, 3 > > > createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI)
Checks if SymbolicPHI can be rewritten as an AddRecExpr under some Predicates.
LLVM_ABI const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
LLVM_ABI const SCEV * rewriteUsingPredicate(const SCEV *S, const Loop *L, const SCEVPredicate &A)
Re-writes the SCEV according to the Predicates in A.
LLVM_ABI std::pair< const SCEV *, const SCEV * > SplitIntoInitAndPostInc(const Loop *L, const SCEV *S)
Splits SCEV expression S into two SCEVs.
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI ~ScalarEvolution()
LLVM_ABI const SCEV * getUDivExactExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI void registerUser(const SCEV *User, ArrayRef< const SCEV * > Ops)
Notify this ScalarEvolution that User directly uses SCEVs in Ops.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the basic block is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsErasedValue(const SCEV *S) const
Return true if the SCEV expression contains a Value that has been optimised out and is now a nullptr.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI bool isKnownViaInduction(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
We'd like to check the predicate on every iteration of the most dominated loop between loops used in ...
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
APInt getSignedRangeMax(const SCEV *S)
Determine the max of the signed range for a particular SCEV.
LLVM_ABI void verify() const
LLVMContext & getContext() const
This class represents the LLVM 'select' instruction.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
TypeSize getSizeInBits() const
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
unsigned getValueID() const
Return an ID for the concrete type of this object.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Represents an op.with.overflow intrinsic.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
LLVM_ABI std::optional< APInt > SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, unsigned RangeWidth)
Let q(n) = An^2 + Bn + C, and BW = bit width of the value range (e.g.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
SCEVUnaryExpr_match< SCEVZeroExtendExpr, Op0_t > m_scev_ZExt(const Op0_t &Op0)
class_match< const SCEVConstant > m_SCEVConstant()
cst_pred_ty< is_one > m_scev_One()
Match an integer 1.
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
SCEVUnaryExpr_match< SCEVSignExtendExpr, Op0_t > m_scev_SExt(const Op0_t &Op0)
cst_pred_ty< is_zero > m_scev_Zero()
Match an integer 0.
bool match(const SCEV *S, const Pattern &P)
SCEVBinaryExpr_match< SCEVUDivExpr, Op0_t, Op1_t > m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
class_match< const Loop > m_Loop()
bind_ty< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
bind_ty< const SCEVUnknown > m_SCEVUnknown(const SCEVUnknown *&V)
class_match< const SCEV > m_SCEV()
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
NodeAddr< PhiNode * > Phi
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt gcd(const DynamicAPInt &A, const DynamicAPInt &B)
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
InterleavedRange< Range > interleaved(const Range &R, StringRef Separator=", ", StringRef Prefix="", StringRef Suffix="")
Output range R as a sequence of interleaved elements.
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
auto successors(const MachineBasicBlock *BB)
constexpr from_range_t from_range
bool set_is_subset(const S1Ty &S1, const S2Ty &S2)
set_is_subset(A, B) - Return true iff A in B
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
unsigned short computeExpressionSize(ArrayRef< const SCEV * > Args)
auto uninitialized_copy(R &&Src, IterTy Dst)
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
auto reverse(ContainerTy &&C)
LLVM_ABI bool isMustProgress(const Loop *L)
Return true if this loop can be assumed to make progress.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
iterator_range< df_iterator< T > > depth_first(const T &G)
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
An object of this class is returned by queries that could not be answered.
LLVM_ABI SCEVCouldNotCompute()
static LLVM_ABI bool classof(const SCEV *S)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class defines a simple visitor class that may be used for various SCEV analysis purposes.
A utility class that uses RAII to save and restore the value of a variable.
Information about the number of loop iterations for which a loop exit's branch condition evaluates to...
LLVM_ABI ExitLimit(const SCEV *E)
Construct either an exact exit limit from a constant, or an unknown one from a SCEVCouldNotCompute.
const SCEV * ExactNotTaken
const SCEV * SymbolicMaxNotTaken
SmallVector< const SCEVPredicate *, 4 > Predicates
A vector of predicate guards for this ExitLimit.
const SCEV * ConstantMaxNotTaken