83#include "llvm/Config/llvm-config.h"
138#define DEBUG_TYPE "scalar-evolution"
141 "Number of loop exits with predictable exit counts");
143 "Number of loop exits without predictable exit counts");
145 "Number of loops with trip counts computed by force");
147#ifdef EXPENSIVE_CHECKS
155 cl::desc(
"Maximum number of iterations SCEV will "
156 "symbolically execute a constant "
162 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
165 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
169 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
174 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
179 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
183 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
184 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
188 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
189 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
193 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
194 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
199 cl::desc(
"Maximum depth of recursive arithmetics"),
203 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
208 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
213 cl::desc(
"Max coefficients in AddRec during evolving"),
218 cl::desc(
"Size of the expression which is considered huge"),
223 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
227 "scalar-evolution-max-loop-guard-collection-depth",
cl::Hidden,
228 cl::desc(
"Maximum depth for recursive loop guard collection"),
cl::init(1));
233 cl::desc(
"When printing analysis, include information on every instruction"));
236 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
238 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
239 "be costly in terms of compile time"));
242 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
243 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
244 "Phi strongly connected components"),
249 cl::desc(
"Handle <= and >= in finite loops"),
253 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
254 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
265#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
283 OS <<
"(ptrtoint " << *
Op->getType() <<
" " << *
Op <<
" to "
284 << *PtrToInt->
getType() <<
")";
290 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
297 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
304 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
333 const char *OpStr =
nullptr;
346 OpStr =
" umin_seq ";
370 OS <<
"(" << *UDiv->
getLHS() <<
" /u " << *UDiv->
getRHS() <<
")";
377 OS <<
"***COULDNOTCOMPUTE***";
453 if (!
Mul)
return false;
457 if (!SC)
return false;
475 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
477 UniqueSCEVs.InsertNode(S, IP);
496 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
499 UniqueSCEVs.InsertNode(S, IP);
519 "Must be a non-bit-width-changing pointer-to-integer cast!");
531 "Cannot truncate non-integer value!");
538 "Cannot zero extend non-integer value!");
545 "Cannot sign extend non-integer value!");
550 SE->forgetMemoizedResults(
this);
553 SE->UniqueSCEVs.RemoveNode(
this);
559void SCEVUnknown::allUsesReplacedWith(
Value *New) {
561 SE->forgetMemoizedResults(
this);
564 SE->UniqueSCEVs.RemoveNode(
this);
586 if (LIsPointer != RIsPointer)
587 return (
int)LIsPointer - (int)RIsPointer;
592 return (
int)LID - (int)RID;
597 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
598 return (
int)LArgNo - (int)RArgNo;
604 if (
auto L = LGV->getLinkage() - RGV->getLinkage())
607 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
608 auto LT = GV->getLinkage();
615 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
616 return LGV->getName().compare(RGV->getName());
627 if (LParent != RParent) {
630 if (LDepth != RDepth)
631 return (
int)LDepth - (int)RDepth;
635 unsigned LNumOps = LInst->getNumOperands(),
636 RNumOps = RInst->getNumOperands();
637 if (LNumOps != RNumOps)
638 return (
int)LNumOps - (int)RNumOps;
640 for (
unsigned Idx :
seq(LNumOps)) {
642 RInst->getOperand(Idx),
Depth + 1);
656static std::optional<int>
666 return (
int)LType - (int)RType;
691 unsigned LBitWidth = LA.
getBitWidth(), RBitWidth =
RA.getBitWidth();
692 if (LBitWidth != RBitWidth)
693 return (
int)LBitWidth - (int)RBitWidth;
694 return LA.
ult(
RA) ? -1 : 1;
700 return LTy->getBitWidth() - RTy->getBitWidth();
711 if (LLoop != RLoop) {
713 assert(LHead != RHead &&
"Two loops share the same header?");
717 "No dominance between recurrences used by one SCEV?");
740 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
741 if (LNumOps != RNumOps)
742 return (
int)LNumOps - (int)RNumOps;
744 for (
unsigned i = 0; i != LNumOps; ++i) {
769 if (
Ops.size() < 2)
return;
774 return Complexity && *Complexity < 0;
776 if (
Ops.size() == 2) {
780 if (IsLessComplex(
RHS,
LHS))
787 return IsLessComplex(
LHS,
RHS);
794 for (
unsigned i = 0, e =
Ops.size(); i != e-2; ++i) {
800 for (
unsigned j = i+1; j != e &&
Ops[j]->getSCEVType() == Complexity; ++j) {
805 if (i == e-2)
return;
827template <
typename FoldT,
typename IsIdentityT,
typename IsAbsorberT>
831 IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
833 for (
unsigned Idx = 0; Idx <
Ops.size();) {
841 Ops.erase(
Ops.begin() + Idx);
848 assert(Folded &&
"Must have folded value");
852 if (Folded && IsAbsorber(Folded->
getAPInt()))
856 if (Folded && !IsIdentity(Folded->
getAPInt()))
857 Ops.insert(
Ops.begin(), Folded);
859 return Ops.size() == 1 ?
Ops[0] :
nullptr;
934 APInt OddFactorial(W, 1);
936 for (
unsigned i = 3; i <= K; ++i) {
939 OddFactorial *= (i >> TwoFactors);
943 unsigned CalculationBits = W +
T;
957 for (
unsigned i = 1; i != K; ++i) {
990 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
1010 "getLosslessPtrToIntExpr() should self-recurse at most once.");
1014 if (!
Op->getType()->isPointerTy())
1025 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1055 SCEV *S =
new (SCEVAllocator)
1057 UniqueSCEVs.InsertNode(S, IP);
1062 assert(
Depth == 0 &&
"getLosslessPtrToIntExpr() should not self-recurse for "
1063 "non-SCEVUnknown's.");
1075 class SCEVPtrToIntSinkingRewriter
1083 SCEVPtrToIntSinkingRewriter
Rewriter(SE);
1084 return Rewriter.visit(Scev);
1093 return Base::visit(S);
1118 "Should only reach pointer-typed SCEVUnknown's.");
1124 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(
Op, *
this);
1126 "We must have succeeded in sinking the cast, "
1127 "and ending up with an integer-typed expression!");
1132 assert(Ty->isIntegerTy() &&
"Target type must be an integer type!");
1144 "This is not a truncating conversion!");
1146 "This is not a conversion to a SCEVable type!");
1147 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1155 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1177 UniqueSCEVs.InsertNode(S, IP);
1189 unsigned numTruncs = 0;
1190 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1198 if (numTruncs < 2) {
1208 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1215 for (
const SCEV *
Op : AddRec->operands())
1230 UniqueSCEVs.InsertNode(S, IP);
1270struct ExtendOpTraitsBase {
1271 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(
const SCEV *,
Type *,
1276template <
typename ExtendOp>
struct ExtendOpTraits {
1292 static const GetExtendExprTy GetExtendExpr;
1294 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1295 ICmpInst::Predicate *Pred,
1296 ScalarEvolution *SE) {
1301const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1308 static const GetExtendExprTy GetExtendExpr;
1310 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1311 ICmpInst::Predicate *Pred,
1312 ScalarEvolution *SE) {
1317const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1329template <
typename ExtendOpTy>
1332 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1333 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1349 for (
auto It = DiffOps.
begin(); It != DiffOps.
end(); ++It)
1362 auto PreStartFlags =
1380 const SCEV *OperandExtendedStart =
1382 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1383 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1395 const SCEV *OverflowLimit =
1396 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1398 if (OverflowLimit &&
1406template <
typename ExtendOpTy>
1410 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1418 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1453template <
typename ExtendOpTy>
1454bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1457 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1467 APInt StartAI = StartC->
getAPInt();
1469 for (
unsigned Delta : {-2, -1, 1, 2}) {
1470 const SCEV *PreStart =
getConstant(StartAI - Delta);
1472 FoldingSetNodeID
ID;
1474 ID.AddPointer(PreStart);
1475 ID.AddPointer(Step);
1479 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
1483 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1486 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1487 DeltaS, &Pred,
this);
1505 const unsigned BitWidth =
C.getBitWidth();
1523 const APInt &ConstantStart,
1542 auto &UserIDs = FoldCacheUser[
I.first->second];
1543 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1544 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1545 if (UserIDs[
I] ==
ID) {
1550 I.first->second = S;
1552 FoldCacheUser[S].push_back(
ID);
1558 "This is not an extending conversion!");
1560 "This is not a conversion to a SCEVable type!");
1561 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1565 if (
const SCEV *S = FoldCache.lookup(
ID))
1577 "This is not an extending conversion!");
1579 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1596 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1600 UniqueSCEVs.InsertNode(S, IP);
1609 const SCEV *
X = ST->getOperand();
1623 if (AR->isAffine()) {
1624 const SCEV *Start = AR->getStart();
1625 const SCEV *Step = AR->getStepRecurrence(*
this);
1627 const Loop *L = AR->getLoop();
1631 if (AR->hasNoUnsignedWrap()) {
1652 const SCEV *CastedMaxBECount =
1656 if (MaxBECount == RecastedMaxBECount) {
1666 const SCEV *WideMaxBECount =
1668 const SCEV *OperandExtendedAdd =
1674 if (ZAdd == OperandExtendedAdd) {
1685 OperandExtendedAdd =
1691 if (ZAdd == OperandExtendedAdd) {
1712 !AC.assumptions().empty()) {
1714 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1716 if (AR->hasNoUnsignedWrap()) {
1751 const APInt &
C = SC->getAPInt();
1755 const SCEV *SResidual =
1764 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1777 if (matchURem(
Op, LHS, RHS))
1789 if (SA->hasNoUnsignedWrap()) {
1793 for (
const auto *
Op : SA->operands())
1810 const SCEV *SResidual =
1822 if (SM->hasNoUnsignedWrap()) {
1826 for (
const auto *
Op : SM->operands())
1843 if (SM->getNumOperands() == 2)
1845 if (MulLHS->getAPInt().isPowerOf2())
1848 MulLHS->getAPInt().logBase2();
1863 for (
auto *Operand :
MinMax->operands())
1874 for (
auto *Operand :
MinMax->operands())
1881 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1884 UniqueSCEVs.InsertNode(S, IP);
1892 "This is not an extending conversion!");
1894 "This is not a conversion to a SCEVable type!");
1895 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1899 if (
const SCEV *S = FoldCache.lookup(
ID))
1911 "This is not an extending conversion!");
1913 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1935 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1940 UniqueSCEVs.InsertNode(S, IP);
1949 const SCEV *
X = ST->getOperand();
1960 if (SA->hasNoSignedWrap()) {
1964 for (
const auto *
Op : SA->operands())
1982 const SCEV *SResidual =
1996 if (AR->isAffine()) {
1997 const SCEV *Start = AR->getStart();
1998 const SCEV *Step = AR->getStepRecurrence(*
this);
2000 const Loop *L = AR->getLoop();
2004 if (AR->hasNoSignedWrap()) {
2026 const SCEV *CastedMaxBECount =
2030 if (MaxBECount == RecastedMaxBECount) {
2040 const SCEV *WideMaxBECount =
2042 const SCEV *OperandExtendedAdd =
2048 if (SAdd == OperandExtendedAdd) {
2059 OperandExtendedAdd =
2065 if (SAdd == OperandExtendedAdd) {
2085 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2087 if (AR->hasNoSignedWrap()) {
2102 const APInt &
C = SC->getAPInt();
2106 const SCEV *SResidual =
2115 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2134 for (
auto *Operand :
MinMax->operands())
2143 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2146 UniqueSCEVs.InsertNode(S, IP);
2172 "This is not an extending conversion!");
2174 "This is not a conversion to a SCEVable type!");
2179 if (SC->getAPInt().isNegative())
2184 const SCEV *NewOp =
T->getOperand();
2203 for (
const SCEV *
Op : AR->operands())
2242 APInt &AccumulatedConstant,
2245 bool Interesting =
false;
2252 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2254 AccumulatedConstant += Scale *
C->getAPInt();
2259 for (; i !=
Ops.size(); ++i) {
2269 Add->operands(), NewScale, SE);
2275 auto Pair = M.insert({
Key, NewScale});
2279 Pair.first->second += NewScale;
2287 std::pair<DenseMap<const SCEV *, APInt>::iterator,
bool> Pair =
2288 M.insert({
Ops[i], Scale});
2292 Pair.first->second += Scale;
2311 case Instruction::Add:
2314 case Instruction::Sub:
2317 case Instruction::Mul:
2331 const SCEV *
A = (this->*Extension)(
2333 const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0);
2334 const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0);
2342 if (BinOp == Instruction::Mul)
2348 APInt C = RHSC->getAPInt();
2349 unsigned NumBits =
C.getBitWidth();
2350 bool IsSub = (BinOp == Instruction::Sub);
2351 bool IsNegativeConst = (
Signed &&
C.isNegative());
2353 bool OverflowDown = IsSub ^ IsNegativeConst;
2355 if (IsNegativeConst) {
2368 APInt Limit = Min + Magnitude;
2374 APInt Limit = Max - Magnitude;
2379std::optional<SCEV::NoWrapFlags>
2384 return std::nullopt;
2393 bool Deduced =
false;
2395 if (OBO->
getOpcode() != Instruction::Add &&
2398 return std::nullopt;
2407 false, LHS, RHS, CtxI)) {
2414 true, LHS, RHS, CtxI)) {
2421 return std::nullopt;
2431 using namespace std::placeholders;
2438 assert(CanAnalyze &&
"don't call from other places!");
2445 auto IsKnownNonNegative = [&](
const SCEV *S) {
2455 if (SignOrUnsignWrap != SignOrUnsignMask &&
2462 return Instruction::Add;
2464 return Instruction::Mul;
2475 Opcode,
C, OBO::NoSignedWrap);
2483 Opcode,
C, OBO::NoUnsignedWrap);
2493 Ops[0]->isZero() && IsKnownNonNegative(
Ops[1]))
2500 if (UDiv->getOperand(1) ==
Ops[1])
2503 if (UDiv->getOperand(1) ==
Ops[0])
2519 "only nuw or nsw allowed");
2520 assert(!
Ops.empty() &&
"Cannot get empty add!");
2521 if (
Ops.size() == 1)
return Ops[0];
2524 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
2526 "SCEVAddExpr operand types don't match!");
2528 Ops, [](
const SCEV *
Op) {
return Op->getType()->isPointerTy(); });
2529 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2534 [](
const APInt &C1,
const APInt &C2) {
return C1 + C2; },
2535 [](
const APInt &
C) {
return C.isZero(); },
2536 [](
const APInt &
C) {
return false; });
2549 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
2554 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2555 Add->setNoWrapFlags(ComputeFlags(
Ops));
2563 bool FoundMatch =
false;
2564 for (
unsigned i = 0, e =
Ops.size(); i != e-1; ++i)
2565 if (
Ops[i] ==
Ops[i+1]) {
2577 --i; e -=
Count - 1;
2587 auto FindTruncSrcType = [&]() ->
Type * {
2593 return T->getOperand()->getType();
2595 const auto *LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2597 return T->getOperand()->getType();
2601 if (
auto *SrcType = FindTruncSrcType()) {
2608 if (
T->getOperand()->getType() != SrcType) {
2617 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2620 if (
T->getOperand()->getType() != SrcType) {
2648 if (
Ops.size() == 2) {
2658 auto C2 =
C->getAPInt();
2661 APInt ConstAdd = C1 + C2;
2662 auto AddFlags = AddExpr->getNoWrapFlags();
2702 if (
Ops.size() == 2) {
2704 if (
Mul &&
Mul->getNumOperands() == 2 &&
2705 Mul->getOperand(0)->isAllOnesValue()) {
2708 if (matchURem(
Mul->getOperand(1),
X,
Y) &&
X ==
Ops[1]) {
2719 if (Idx <
Ops.size()) {
2720 bool DeletedAdd =
false;
2731 Ops.erase(
Ops.begin()+Idx);
2734 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2757 struct APIntCompare {
2758 bool operator()(
const APInt &LHS,
const APInt &RHS)
const {
2759 return LHS.ult(RHS);
2766 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2767 for (
const SCEV *NewOp : NewOps)
2768 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2771 if (AccumulatedConstant != 0)
2773 for (
auto &MulOp : MulOpLists) {
2774 if (MulOp.first == 1) {
2776 }
else if (MulOp.first != 0) {
2785 if (
Ops.size() == 1)
2796 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2797 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2800 for (
unsigned AddOp = 0, e =
Ops.size(); AddOp != e; ++AddOp)
2801 if (MulOpSCEV ==
Ops[AddOp]) {
2803 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2804 if (
Mul->getNumOperands() != 2) {
2808 Mul->operands().take_front(MulOp));
2816 if (
Ops.size() == 2)
return OuterMul;
2818 Ops.erase(
Ops.begin()+AddOp);
2819 Ops.erase(
Ops.begin()+Idx-1);
2821 Ops.erase(
Ops.begin()+Idx);
2822 Ops.erase(
Ops.begin()+AddOp-1);
2824 Ops.push_back(OuterMul);
2829 for (
unsigned OtherMulIdx = Idx+1;
2836 OMulOp != e; ++OMulOp)
2837 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2839 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2840 if (
Mul->getNumOperands() != 2) {
2842 Mul->operands().take_front(MulOp));
2849 OtherMul->
operands().take_front(OMulOp));
2854 const SCEV *InnerMulSum =
2858 if (
Ops.size() == 2)
return OuterMul;
2859 Ops.erase(
Ops.begin()+Idx);
2860 Ops.erase(
Ops.begin()+OtherMulIdx-1);
2861 Ops.push_back(OuterMul);
2881 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
2884 Ops.erase(
Ops.begin()+i);
2889 if (!LIOps.
empty()) {
2914 auto *DefI = getDefiningScopeBound(LIOps);
2916 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
2928 if (
Ops.size() == 1)
return NewRec;
2931 for (
unsigned i = 0;; ++i)
2932 if (
Ops[i] == AddRec) {
2942 for (
unsigned OtherIdx = Idx+1;
2950 "AddRecExprs are not sorted in reverse dominance order?");
2957 if (OtherAddRec->getLoop() == AddRecLoop) {
2958 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
2960 if (i >= AddRecOps.
size()) {
2961 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
2965 AddRecOps[i], OtherAddRec->getOperand(i)};
2968 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
2983 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
2995 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
2999 S =
new (SCEVAllocator)
3001 UniqueSCEVs.InsertNode(S, IP);
3011 FoldingSetNodeID
ID;
3013 for (
const SCEV *
Op :
Ops)
3018 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3020 const SCEV **
O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
3022 S =
new (SCEVAllocator)
3023 SCEVAddRecExpr(
ID.Intern(SCEVAllocator), O,
Ops.size(), L);
3024 UniqueSCEVs.InsertNode(S, IP);
3025 LoopUsers[
L].push_back(S);
3035 FoldingSetNodeID
ID;
3037 for (
const SCEV *
Op :
Ops)
3041 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3043 const SCEV **
O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
3045 S =
new (SCEVAllocator) SCEVMulExpr(
ID.Intern(SCEVAllocator),
3047 UniqueSCEVs.InsertNode(S, IP);
3056 if (j > 1 && k / j != i) Overflow =
true;
3072 if (n == 0 || n == k)
return 1;
3073 if (k > n)
return 0;
3079 for (
uint64_t i = 1; i <= k; ++i) {
3080 r =
umul_ov(r, n-(i-1), Overflow);
3089 struct FindConstantInAddMulChain {
3090 bool FoundConstant =
false;
3092 bool follow(
const SCEV *S) {
3097 bool isDone()
const {
3098 return FoundConstant;
3102 FindConstantInAddMulChain
F;
3104 ST.visitAll(StartExpr);
3105 return F.FoundConstant;
3113 "only nuw or nsw allowed");
3114 assert(!
Ops.empty() &&
"Cannot get empty mul!");
3115 if (
Ops.size() == 1)
return Ops[0];
3117 Type *ETy =
Ops[0]->getType();
3119 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
3121 "SCEVMulExpr operand types don't match!");
3126 [](
const APInt &C1,
const APInt &C2) {
return C1 * C2; },
3127 [](
const APInt &
C) {
return C.isOne(); },
3128 [](
const APInt &
C) {
return C.isZero(); });
3139 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3144 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3145 Mul->setNoWrapFlags(ComputeFlags(
Ops));
3150 if (
Ops.size() == 2) {
3167 if (
Ops[0]->isAllOnesValue()) {
3172 bool AnyFolded =
false;
3173 for (
const SCEV *AddOp :
Add->operands()) {
3200 AddRec->getNoWrapFlags(FlagsMask));
3222 APInt C1V = LHSC->getAPInt();
3243 if (Idx <
Ops.size()) {
3244 bool DeletedMul =
false;
3250 Ops.erase(
Ops.begin()+Idx);
3274 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
3277 Ops.erase(
Ops.begin()+i);
3282 if (!LIOps.
empty()) {
3295 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
3311 if (
Ops.size() == 1)
return NewRec;
3314 for (
unsigned i = 0;; ++i)
3315 if (
Ops[i] == AddRec) {
3336 bool OpsModified =
false;
3337 for (
unsigned OtherIdx = Idx+1;
3351 bool Overflow =
false;
3358 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3362 z < ze && !Overflow; ++z) {
3365 if (LargerThan64Bits)
3366 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3368 Coeff = Coeff1*Coeff2;
3383 if (
Ops.size() == 2)
return NewAddRec;
3384 Ops[Idx] = NewAddRec;
3385 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
3401 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3409 "SCEVURemExpr operand types don't match!");
3414 if (RHSC->getValue()->isOne())
3415 return getZero(LHS->getType());
3418 if (RHSC->getAPInt().isPowerOf2()) {
3419 Type *FullTy = LHS->getType();
3436 assert(!LHS->getType()->isPointerTy() &&
3437 "SCEVUDivExpr operand can't be pointer!");
3438 assert(LHS->getType() == RHS->getType() &&
3439 "SCEVUDivExpr operand types don't match!");
3446 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3454 if (RHSC->getValue()->isOne())
3459 if (!RHSC->getValue()->isZero()) {
3463 Type *Ty = LHS->getType();
3464 unsigned LZ = RHSC->getAPInt().countl_zero();
3468 if (!RHSC->getAPInt().isPowerOf2())
3476 const APInt &StepInt = Step->getAPInt();
3477 const APInt &DivInt = RHSC->getAPInt();
3478 if (!StepInt.
urem(DivInt) &&
3484 for (
const SCEV *
Op : AR->operands())
3492 if (StartC && !DivInt.
urem(StepInt) &&
3498 const APInt &StartRem = StartInt.
urem(StepInt);
3499 if (StartRem != 0) {
3500 const SCEV *NewLHS =
3503 if (LHS != NewLHS) {
3513 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3522 for (
const SCEV *
Op : M->operands())
3526 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3527 const SCEV *
Op = M->getOperand(i);
3539 if (
auto *DivisorConstant =
3541 bool Overflow =
false;
3543 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3554 for (
const SCEV *
Op :
A->operands())
3558 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3565 if (
Operands.size() ==
A->getNumOperands())
3572 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3578 AE && AE->getNumOperands() == 2) {
3580 const APInt &NegC = VC->getAPInt();
3583 if (MME && MME->getNumOperands() == 2 &&
3586 MME->getOperand(1) == RHS)
3587 return getZero(LHS->getType());
3595 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3598 UniqueSCEVs.InsertNode(S, IP);
3628 if (!
Mul || !
Mul->hasNoUnsignedWrap())
3635 if (LHSCst == RHSCst) {
3643 APInt Factor =
gcd(LHSCst, RHSCst);
3661 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3662 if (
Mul->getOperand(i) == RHS) {
3681 if (StepChrec->getLoop() == L) {
3700 "SCEVAddRecExpr operand types don't match!");
3701 assert(!
Op->getType()->isPointerTy() &&
"Step must be integer");
3705 "SCEVAddRecExpr operand is not available at loop entry!");
3723 const Loop *NestedLoop = NestedAR->getLoop();
3724 if (L->contains(NestedLoop)
3727 DT.dominates(L->getHeader(), NestedLoop->
getHeader()))) {
3729 Operands[0] = NestedAR->getStart();
3733 bool AllInvariant =
all_of(
3745 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3756 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3766 return getOrCreateAddRecExpr(
Operands, L, Flags);
3784 if (!GEPI || !isSCEVExprNeverPoison(GEPI))
3795 bool FirstIter =
true;
3797 for (
const SCEV *IndexExpr : IndexExprs) {
3804 Offsets.push_back(FieldOffset);
3807 CurTy = STy->getTypeAtIndex(Index);
3812 "The first index of a GEP indexes a pointer");
3813 CurTy =
GEP->getSourceElementType();
3824 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3825 Offsets.push_back(LocalOffset);
3830 if (Offsets.empty())
3843 "GEP should not change type mid-flight.");
3847SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3850 ID.AddInteger(SCEVType);
3854 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3864 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
3865 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
3866 if (
Ops.size() == 1)
return Ops[0];
3869 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
3871 "Operand types don't match!");
3874 "min/max should be consistently pointerish");
3900 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3902 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3907 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3909 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3915 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops)) {
3921 while (Idx <
Ops.size() &&
Ops[Idx]->getSCEVType() < Kind)
3926 if (Idx <
Ops.size()) {
3927 bool DeletedAny =
false;
3928 while (
Ops[Idx]->getSCEVType() == Kind) {
3930 Ops.erase(
Ops.begin()+Idx);
3948 for (
unsigned i = 0, e =
Ops.size() - 1; i != e; ++i) {
3949 if (
Ops[i] ==
Ops[i + 1] ||
3950 isKnownViaNonRecursiveReasoning(FirstPred,
Ops[i],
Ops[i + 1])) {
3953 Ops.erase(
Ops.begin() + i + 1,
Ops.begin() + i + 2);
3956 }
else if (isKnownViaNonRecursiveReasoning(SecondPred,
Ops[i],
3959 Ops.erase(
Ops.begin() + i,
Ops.begin() + i + 1);
3965 if (
Ops.size() == 1)
return Ops[0];
3967 assert(!
Ops.empty() &&
"Reduced smax down to nothing!");
3972 ID.AddInteger(Kind);
3976 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3978 return ExistingSCEV;
3979 const SCEV **O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
3981 SCEV *S =
new (SCEVAllocator)
3984 UniqueSCEVs.InsertNode(S, IP);
3991class SCEVSequentialMinMaxDeduplicatingVisitor final
3992 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
3993 std::optional<const SCEV *>> {
3994 using RetVal = std::optional<const SCEV *>;
4002 bool canRecurseInto(
SCEVTypes Kind)
const {
4005 return RootKind == Kind || NonSequentialRootKind == Kind;
4008 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
4010 "Only for min/max expressions.");
4013 if (!canRecurseInto(Kind))
4023 return std::nullopt;
4030 RetVal
visit(
const SCEV *S) {
4032 if (!SeenOps.
insert(S).second)
4033 return std::nullopt;
4034 return Base::visit(S);
4038 SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE,
4040 : SE(SE), RootKind(RootKind),
4041 NonSequentialRootKind(
4042 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
4046 SmallVectorImpl<const SCEV *> &NewOps) {
4051 for (
const SCEV *
Op : OrigOps) {
4056 Ops.emplace_back(*NewOp);
4060 NewOps = std::move(
Ops);
4064 RetVal visitConstant(
const SCEVConstant *Constant) {
return Constant; }
4066 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4068 RetVal visitPtrToIntExpr(
const SCEVPtrToIntExpr *Expr) {
return Expr; }
4070 RetVal visitTruncateExpr(
const SCEVTruncateExpr *Expr) {
return Expr; }
4072 RetVal visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
return Expr; }
4074 RetVal visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
return Expr; }
4076 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4078 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4080 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4082 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4084 RetVal visitSMaxExpr(
const SCEVSMaxExpr *Expr) {
4085 return visitAnyMinMaxExpr(Expr);
4088 RetVal visitUMaxExpr(
const SCEVUMaxExpr *Expr) {
4089 return visitAnyMinMaxExpr(Expr);
4092 RetVal visitSMinExpr(
const SCEVSMinExpr *Expr) {
4093 return visitAnyMinMaxExpr(Expr);
4096 RetVal visitUMinExpr(
const SCEVUMinExpr *Expr) {
4097 return visitAnyMinMaxExpr(Expr);
4100 RetVal visitSequentialUMinExpr(
const SCEVSequentialUMinExpr *Expr) {
4101 return visitAnyMinMaxExpr(Expr);
4104 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4106 RetVal visitCouldNotCompute(
const SCEVCouldNotCompute *Expr) {
return Expr; }
4148struct SCEVPoisonCollector {
4149 bool LookThroughMaybePoisonBlocking;
4150 SmallPtrSet<const SCEVUnknown *, 4> MaybePoison;
4151 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4152 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4154 bool follow(
const SCEV *S) {
4155 if (!LookThroughMaybePoisonBlocking &&
4165 bool isDone()
const {
return false; }
4175 SCEVPoisonCollector PC1(
true);
4180 if (PC1.MaybePoison.empty())
4186 SCEVPoisonCollector PC2(
false);
4196 SCEVPoisonCollector PC(
false);
4219 while (!Worklist.
empty()) {
4221 if (!Visited.
insert(V).second)
4225 if (Visited.
size() > 16)
4230 if (PoisonVals.
contains(V) || ::isGuaranteedNotToBePoison(V))
4241 if (PDI->isDisjoint())
4248 II &&
II->getIntrinsicID() == Intrinsic::vscale)
4255 if (
I->hasPoisonGeneratingAnnotations())
4266 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4267 "Not a SCEVSequentialMinMaxExpr!");
4268 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
4269 if (
Ops.size() == 1)
4273 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4275 "Operand types don't match!");
4278 "min/max should be consistently pointerish");
4286 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops))
4293 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4303 bool DeletedAny =
false;
4304 while (Idx <
Ops.size()) {
4305 if (
Ops[Idx]->getSCEVType() != Kind) {
4310 Ops.erase(
Ops.begin() + Idx);
4311 Ops.insert(
Ops.begin() + Idx, SMME->operands().begin(),
4312 SMME->operands().end());
4320 const SCEV *SaturationPoint;
4331 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4332 if (!isGuaranteedNotToCauseUB(
Ops[i]))
4344 Ops.erase(
Ops.begin() + i);
4349 if (isKnownViaNonRecursiveReasoning(Pred,
Ops[i - 1],
Ops[i])) {
4350 Ops.erase(
Ops.begin() + i);
4358 ID.AddInteger(Kind);
4362 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4364 return ExistingSCEV;
4366 const SCEV **O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
4368 SCEV *S =
new (SCEVAllocator)
4371 UniqueSCEVs.InsertNode(S, IP);
4419 if (
Size.isScalable())
4440 "Cannot get offset for structure containing scalable vector types");
4454 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4456 "Stale SCEVUnknown in uniquing map!");
4462 UniqueSCEVs.InsertNode(S, IP);
4476 return Ty->isIntOrPtrTy();
4483 if (Ty->isPointerTy())
4494 if (Ty->isIntegerTy())
4498 assert(Ty->isPointerTy() &&
"Unexpected non-pointer non-integer type!");
4510 bool PreciseA, PreciseB;
4511 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);
4512 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4513 if (!PreciseA || !PreciseB)
4516 return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) ||
4517 DT.dominates(ScopeB, ScopeA);
4521 return CouldNotCompute.get();
4524bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
4527 return SU && SU->getValue() ==
nullptr;
4530 return !ContainsNulls;
4535 if (
I != HasRecMap.end())
4540 HasRecMap.insert({S, FoundAddRec});
4548 if (
SI == ExprValueMap.
end())
4550 return SI->second.getArrayRef();
4556void ScalarEvolution::eraseValueFromMap(
Value *V) {
4558 if (
I != ValueExprMap.end()) {
4559 auto EVIt = ExprValueMap.find(
I->second);
4560 bool Removed = EVIt->second.remove(V);
4562 assert(Removed &&
"Value not in ExprValueMap?");
4563 ValueExprMap.erase(
I);
4567void ScalarEvolution::insertValueToMap(
Value *V,
const SCEV *S) {
4571 auto It = ValueExprMap.find_as(V);
4572 if (It == ValueExprMap.end()) {
4574 ExprValueMap[S].insert(V);
4585 return createSCEVIter(V);
4592 if (
I != ValueExprMap.end()) {
4593 const SCEV *S =
I->second;
4594 assert(checkValidity(S) &&
4595 "existing SCEV has not been properly invalidated");
4608 Type *Ty = V->getType();
4616 if (!
Add ||
Add->getNumOperands() != 2 ||
4617 !
Add->getOperand(0)->isAllOnesValue())
4630 assert(!V->getType()->isPointerTy() &&
"Can't negate pointer");
4643 return (
const SCEV *)
nullptr;
4649 if (
const SCEV *Replaced = MatchMinMaxNegation(MME))
4653 Type *Ty = V->getType();
4659 assert(
P->getType()->isPointerTy());
4672 const SCEV **PtrOp =
nullptr;
4673 for (
const SCEV *&AddOp :
Ops) {
4674 if (AddOp->getType()->isPointerTy()) {
4675 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4693 return getZero(LHS->getType());
4698 if (RHS->getType()->isPointerTy()) {
4699 if (!LHS->getType()->isPointerTy() ||
4709 const bool RHSIsNotMinSigned =
4740 Type *SrcTy = V->getType();
4741 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4742 "Cannot truncate or zero extend with non-integer arguments!");
4752 Type *SrcTy = V->getType();
4753 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4754 "Cannot truncate or zero extend with non-integer arguments!");
4764 Type *SrcTy = V->getType();
4765 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4766 "Cannot noop or zero extend with non-integer arguments!");
4768 "getNoopOrZeroExtend cannot truncate!");
4776 Type *SrcTy = V->getType();
4777 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4778 "Cannot noop or sign extend with non-integer arguments!");
4780 "getNoopOrSignExtend cannot truncate!");
4788 Type *SrcTy = V->getType();
4789 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4790 "Cannot noop or any extend with non-integer arguments!");
4792 "getNoopOrAnyExtend cannot truncate!");
4800 Type *SrcTy = V->getType();
4801 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4802 "Cannot truncate or noop with non-integer arguments!");
4804 "getTruncateOrNoop cannot extend!");
4812 const SCEV *PromotedLHS = LHS;
4813 const SCEV *PromotedRHS = RHS;
4833 assert(!
Ops.empty() &&
"At least one operand must be!");
4835 if (
Ops.size() == 1)
4839 Type *MaxType =
nullptr;
4840 for (
const auto *S :
Ops)
4845 assert(MaxType &&
"Failed to find maximum type!");
4849 for (
const auto *S :
Ops)
4858 if (!V->getType()->isPointerTy())
4863 V = AddRec->getStart();
4865 const SCEV *PtrOp =
nullptr;
4866 for (
const SCEV *AddOp :
Add->operands()) {
4867 if (AddOp->getType()->isPointerTy()) {
4868 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4872 assert(PtrOp &&
"Must have pointer op");
4884 for (
User *U :
I->users()) {
4886 if (Visited.
insert(UserInsn).second)
4900 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE,
4901 bool IgnoreOtherLoops =
true) {
4904 if (
Rewriter.hasSeenLoopVariantSCEVUnknown())
4906 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4911 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
4913 SeenLoopVariantSCEVUnknown =
true;
4917 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
4921 SeenOtherLoops =
true;
4925 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4927 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4930 explicit SCEVInitRewriter(
const Loop *L, ScalarEvolution &SE)
4931 : SCEVRewriteVisitor(SE),
L(
L) {}
4934 bool SeenLoopVariantSCEVUnknown =
false;
4935 bool SeenOtherLoops =
false;
4944 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE) {
4945 SCEVPostIncRewriter
Rewriter(L, SE);
4947 return Rewriter.hasSeenLoopVariantSCEVUnknown()
4952 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
4954 SeenLoopVariantSCEVUnknown =
true;
4958 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
4962 SeenOtherLoops =
true;
4966 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4968 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4971 explicit SCEVPostIncRewriter(
const Loop *L, ScalarEvolution &SE)
4972 : SCEVRewriteVisitor(SE),
L(
L) {}
4975 bool SeenLoopVariantSCEVUnknown =
false;
4976 bool SeenOtherLoops =
false;
4982class SCEVBackedgeConditionFolder
4985 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
4986 ScalarEvolution &SE) {
4987 bool IsPosBECond =
false;
4988 Value *BECond =
nullptr;
4989 if (BasicBlock *Latch =
L->getLoopLatch()) {
4993 "Both outgoing branches should not target same header!");
5000 SCEVBackedgeConditionFolder
Rewriter(L, BECond, IsPosBECond, SE);
5004 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5005 const SCEV *
Result = Expr;
5010 switch (
I->getOpcode()) {
5011 case Instruction::Select: {
5013 std::optional<const SCEV *> Res =
5014 compareWithBackedgeCondition(
SI->getCondition());
5022 std::optional<const SCEV *> Res = compareWithBackedgeCondition(
I);
5033 explicit SCEVBackedgeConditionFolder(
const Loop *L,
Value *BECond,
5034 bool IsPosBECond, ScalarEvolution &SE)
5035 : SCEVRewriteVisitor(SE),
L(
L), BackedgeCond(BECond),
5036 IsPositiveBECond(IsPosBECond) {}
5038 std::optional<const SCEV *> compareWithBackedgeCondition(
Value *IC);
5042 Value *BackedgeCond =
nullptr;
5044 bool IsPositiveBECond;
5047std::optional<const SCEV *>
5048SCEVBackedgeConditionFolder::compareWithBackedgeCondition(
Value *IC) {
5053 if (BackedgeCond == IC)
5056 return std::nullopt;
5061 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5062 ScalarEvolution &SE) {
5068 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5075 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5082 bool isValid() {
return Valid; }
5085 explicit SCEVShiftRewriter(
const Loop *L, ScalarEvolution &SE)
5086 : SCEVRewriteVisitor(SE),
L(
L) {}
5095ScalarEvolution::proveNoWrapViaConstantRanges(
const SCEVAddRecExpr *AR) {
5099 using OBO = OverflowingBinaryOperator;
5107 const APInt &BECountAP = BECountMax->getAPInt();
5108 unsigned NoOverflowBitWidth =
5120 Instruction::Add, IncRange, OBO::NoSignedWrap);
5121 if (NSWRegion.contains(AddRecRange))
5130 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
5131 if (NUWRegion.contains(AddRecRange))
5139ScalarEvolution::proveNoSignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5149 if (!SignedWrapViaInductionTried.insert(AR).second)
5174 AC.assumptions().empty())
5182 const SCEV *OverflowLimit =
5184 if (OverflowLimit &&
5192ScalarEvolution::proveNoUnsignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5202 if (!UnsignedWrapViaInductionTried.insert(AR).second)
5228 AC.assumptions().empty())
5261 Operator *
Op =
nullptr;
5263 explicit BinaryOp(Operator *
Op)
5267 IsNSW = OBO->hasNoSignedWrap();
5268 IsNUW = OBO->hasNoUnsignedWrap();
5272 explicit BinaryOp(
unsigned Opcode,
Value *
LHS,
Value *
RHS,
bool IsNSW =
false,
5274 : Opcode(Opcode),
LHS(
LHS),
RHS(
RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
5286 return std::nullopt;
5292 switch (
Op->getOpcode()) {
5293 case Instruction::Add:
5294 case Instruction::Sub:
5295 case Instruction::Mul:
5296 case Instruction::UDiv:
5297 case Instruction::URem:
5298 case Instruction::And:
5299 case Instruction::AShr:
5300 case Instruction::Shl:
5301 return BinaryOp(
Op);
5303 case Instruction::Or: {
5306 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1),
5308 return BinaryOp(
Op);
5311 case Instruction::Xor:
5315 if (RHSC->getValue().isSignMask())
5316 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5318 if (V->getType()->isIntegerTy(1))
5319 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5320 return BinaryOp(
Op);
5322 case Instruction::LShr:
5331 if (SA->getValue().ult(
BitWidth)) {
5333 ConstantInt::get(SA->getContext(),
5335 return BinaryOp(Instruction::UDiv,
Op->getOperand(0),
X);
5338 return BinaryOp(
Op);
5340 case Instruction::ExtractValue: {
5342 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
5350 bool Signed = WO->isSigned();
5353 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
5358 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
5369 if (
II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
5370 return BinaryOp(Instruction::Sub,
II->getOperand(0),
II->getOperand(1));
5372 return std::nullopt;
5398 if (
Op == SymbolicPHI)
5403 if (SourceBits != NewBits)
5416 if (
X != SymbolicPHI)
5418 Signed = SExt !=
nullptr;
5426 if (!L || L->getHeader() != PN->
getParent())
5484std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5485ScalarEvolution::createAddRecFromPHIWithCastsImpl(
const SCEVUnknown *SymbolicPHI) {
5493 assert(L &&
"Expecting an integer loop header phi");
5498 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5499 for (
unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5500 Value *
V = PN->getIncomingValue(i);
5501 if (
L->contains(PN->getIncomingBlock(i))) {
5504 }
else if (BEValueV != V) {
5508 }
else if (!StartValueV) {
5510 }
else if (StartValueV != V) {
5511 StartValueV =
nullptr;
5515 if (!BEValueV || !StartValueV)
5516 return std::nullopt;
5518 const SCEV *BEValue =
getSCEV(BEValueV);
5525 return std::nullopt;
5529 unsigned FoundIndex =
Add->getNumOperands();
5530 Type *TruncTy =
nullptr;
5532 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5535 if (FoundIndex == e) {
5540 if (FoundIndex ==
Add->getNumOperands())
5541 return std::nullopt;
5545 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5546 if (i != FoundIndex)
5547 Ops.push_back(
Add->getOperand(i));
5553 return std::nullopt;
5606 const SCEV *StartVal =
getSCEV(StartValueV);
5607 const SCEV *PHISCEV =
5634 auto getExtendedExpr = [&](
const SCEV *Expr,
5635 bool CreateSignExtend) ->
const SCEV * {
5638 const SCEV *ExtendedExpr =
5641 return ExtendedExpr;
5649 auto PredIsKnownFalse = [&](
const SCEV *Expr,
5650 const SCEV *ExtendedExpr) ->
bool {
5651 return Expr != ExtendedExpr &&
5655 const SCEV *StartExtended = getExtendedExpr(StartVal,
Signed);
5656 if (PredIsKnownFalse(StartVal, StartExtended)) {
5658 return std::nullopt;
5663 const SCEV *AccumExtended = getExtendedExpr(Accum,
true);
5664 if (PredIsKnownFalse(Accum, AccumExtended)) {
5666 return std::nullopt;
5669 auto AppendPredicate = [&](
const SCEV *Expr,
5670 const SCEV *ExtendedExpr) ->
void {
5671 if (Expr != ExtendedExpr &&
5679 AppendPredicate(StartVal, StartExtended);
5680 AppendPredicate(Accum, AccumExtended);
5688 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5689 std::make_pair(NewAR, Predicates);
5691 PredicatedSCEVRewrites[{SymbolicPHI,
L}] = PredRewrite;
5695std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5700 return std::nullopt;
5703 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5704 if (
I != PredicatedSCEVRewrites.end()) {
5705 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5708 if (Rewrite.first == SymbolicPHI)
5709 return std::nullopt;
5713 assert(!(Rewrite.second).empty() &&
"Expected to find Predicates");
5717 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5718 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5723 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5724 return std::nullopt;
5741 auto areExprsEqual = [&](
const SCEV *Expr1,
const SCEV *Expr2) ->
bool {
5742 if (Expr1 != Expr2 &&
5743 !Preds->implies(SE.getEqualPredicate(Expr1, Expr2), SE) &&
5744 !Preds->implies(SE.getEqualPredicate(Expr2, Expr1), SE))
5761const SCEV *ScalarEvolution::createSimpleAffineAddRec(
PHINode *PN,
5763 Value *StartValueV) {
5766 assert(BEValueV && StartValueV);
5772 if (BO->Opcode != Instruction::Add)
5775 const SCEV *Accum =
nullptr;
5776 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5778 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5792 insertValueToMap(PN, PHISCEV);
5797 proveNoWrapViaConstantRanges(AR)));
5805 "Accum is defined outside L, but is not invariant?");
5806 if (isAddRecNeverPoison(BEInst, L))
5813const SCEV *ScalarEvolution::createAddRecFromPHI(
PHINode *PN) {
5814 const Loop *
L = LI.getLoopFor(PN->
getParent());
5821 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5827 }
else if (BEValueV != V) {
5831 }
else if (!StartValueV) {
5833 }
else if (StartValueV != V) {
5834 StartValueV =
nullptr;
5838 if (!BEValueV || !StartValueV)
5841 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5842 "PHI node already processed?");
5846 if (
auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5851 insertValueToMap(PN, SymbolicName);
5855 const SCEV *BEValue =
getSCEV(BEValueV);
5865 unsigned FoundIndex =
Add->getNumOperands();
5866 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5867 if (
Add->getOperand(i) == SymbolicName)
5868 if (FoundIndex == e) {
5873 if (FoundIndex !=
Add->getNumOperands()) {
5876 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5877 if (i != FoundIndex)
5878 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(
Add->getOperand(i),
5890 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
5897 if (
GEP->getOperand(0) == PN) {
5898 GEPNoWrapFlags NW =
GEP->getNoWrapFlags();
5916 const SCEV *StartVal =
getSCEV(StartValueV);
5917 const SCEV *PHISCEV =
getAddRecExpr(StartVal, Accum, L, Flags);
5922 forgetMemoizedResults(SymbolicName);
5923 insertValueToMap(PN, PHISCEV);
5928 proveNoWrapViaConstantRanges(AR)));
5953 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *
this);
5954 const SCEV *
Start = SCEVInitRewriter::rewrite(Shifted, L, *
this,
false);
5956 isGuaranteedNotToCauseUB(Shifted) &&
::impliesPoison(Shifted, Start)) {
5957 const SCEV *StartVal =
getSCEV(StartValueV);
5958 if (Start == StartVal) {
5962 forgetMemoizedResults(SymbolicName);
5963 insertValueToMap(PN, Shifted);
5973 eraseValueFromMap(PN);
5993 Use &LeftUse =
Merge->getOperandUse(0);
5994 Use &RightUse =
Merge->getOperandUse(1);
6011const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(
PHINode *PN) {
6013 [&](
BasicBlock *BB) {
return DT.isReachableFromEntry(BB); };
6028 assert(IDom &&
"At least the entry block should dominate PN");
6037 return createNodeForSelectOrPHI(PN,
Cond,
LHS,
RHS);
6053ScalarEvolution::createNodeForPHIWithIdenticalOperands(
PHINode *PN) {
6054 BinaryOperator *CommonInst =
nullptr;
6065 CommonInst = IncomingInst;
6072 const SCEV *CommonSCEV =
getSCEV(CommonInst);
6073 bool SCEVExprsIdentical =
6075 [
this, CommonSCEV](
Value *V) { return CommonSCEV == getSCEV(V); });
6076 return SCEVExprsIdentical ? CommonSCEV :
nullptr;
6079const SCEV *ScalarEvolution::createNodeForPHI(
PHINode *PN) {
6080 if (
const SCEV *S = createAddRecFromPHI(PN))
6090 if (
const SCEV *S = createNodeForPHIWithIdenticalOperands(PN))
6093 if (
const SCEV *S = createNodeFromSelectLikePHI(PN))
6102 struct FindClosure {
6103 const SCEV *OperandToFind;
6109 bool canRecurseInto(
SCEVTypes Kind)
const {
6112 return RootKind == Kind || NonSequentialRootKind == Kind ||
6117 : OperandToFind(OperandToFind), RootKind(RootKind),
6118 NonSequentialRootKind(
6122 bool follow(
const SCEV *S) {
6123 Found = S == OperandToFind;
6125 return !isDone() && canRecurseInto(S->
getSCEVType());
6128 bool isDone()
const {
return Found; }
6131 FindClosure FC(OperandToFind, RootKind);
6136std::optional<const SCEV *>
6137ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
Type *Ty,
6147 switch (ICI->getPredicate()) {
6161 bool Signed = ICI->isSigned();
6162 const SCEV *LA =
getSCEV(TrueVal);
6170 if (LA == LS &&
RA == RS)
6172 if (LA == RS &&
RA == LS)
6175 auto CoerceOperand = [&](
const SCEV *
Op) ->
const SCEV * {
6176 if (
Op->getType()->isPointerTy()) {
6187 LS = CoerceOperand(LS);
6188 RS = CoerceOperand(RS);
6212 const SCEV *TrueValExpr =
getSCEV(TrueVal);
6213 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6227 X = ZExt->getOperand();
6229 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6240 return std::nullopt;
6243static std::optional<const SCEV *>
6245 const SCEV *TrueExpr,
const SCEV *FalseExpr) {
6249 "Unexpected operands of a select.");
6261 return std::nullopt;
6276static std::optional<const SCEV *>
6280 return std::nullopt;
6283 const auto *SETrue = SE->
getSCEV(TrueVal);
6284 const auto *SEFalse = SE->
getSCEV(FalseVal);
6288const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
6290 assert(
Cond->getType()->isIntegerTy(1) &&
"Select condition is not an i1?");
6292 V->getType() ==
TrueVal->getType() &&
6293 "Types of select hands and of the result must match.");
6296 if (!
V->getType()->isIntegerTy(1))
6299 if (std::optional<const SCEV *> S =
6312 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
6316 if (std::optional<const SCEV *> S =
6317 createNodeForSelectOrPHIInstWithICmpInstCond(
I->getType(), ICI,
6323 return createNodeForSelectOrPHIViaUMinSeq(V,
Cond, TrueVal, FalseVal);
6329 assert(
GEP->getSourceElementType()->isSized() &&
6330 "GEP source element type must be sized");
6333 for (
Value *Index :
GEP->indices())
6338APInt ScalarEvolution::getConstantMultipleImpl(
const SCEV *S) {
6340 auto GetShiftedByZeros = [
BitWidth](uint32_t TrailingZeros) {
6343 : APInt::getOneBitSet(
BitWidth, TrailingZeros);
6345 auto GetGCDMultiple = [
this](
const SCEVNAryExpr *
N) {
6348 for (
unsigned I = 1,
E =
N->getNumOperands();
I <
E && Res != 1; ++
I)
6366 return GetShiftedByZeros(TZ);
6376 return GetShiftedByZeros(TZ);
6380 if (
M->hasNoUnsignedWrap()) {
6383 for (
const SCEV *Operand :
M->operands().drop_front())
6391 for (
const SCEV *Operand :
M->operands())
6393 return GetShiftedByZeros(TZ);
6398 if (
N->hasNoUnsignedWrap())
6399 return GetGCDMultiple(
N);
6402 for (
const SCEV *Operand :
N->operands().drop_front())
6404 return GetShiftedByZeros(TZ);
6417 .countMinTrailingZeros();
6418 return GetShiftedByZeros(Known);
6427 auto I = ConstantMultipleCache.find(S);
6428 if (
I != ConstantMultipleCache.end())
6431 APInt Result = getConstantMultipleImpl(S);
6432 auto InsertPair = ConstantMultipleCache.insert({S, Result});
6433 assert(InsertPair.second &&
"Should insert a new key");
6434 return InsertPair.first->second;
6450 if (
MDNode *MD =
I->getMetadata(LLVMContext::MD_range))
6453 if (std::optional<ConstantRange>
Range = CB->getRange())
6457 if (std::optional<ConstantRange>
Range =
A->getRange())
6460 return std::nullopt;
6467 UnsignedRanges.erase(AddRec);
6468 SignedRanges.erase(AddRec);
6469 ConstantMultipleCache.erase(AddRec);
6474getRangeForUnknownRecurrence(
const SCEVUnknown *U) {
6500 Value *Start, *Step;
6507 assert(L && L->getHeader() ==
P->getParent());
6520 case Instruction::AShr:
6521 case Instruction::LShr:
6522 case Instruction::Shl:
6537 KnownStep.getBitWidth() ==
BitWidth);
6540 auto MaxShiftAmt = KnownStep.getMaxValue();
6542 bool Overflow =
false;
6543 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
6550 case Instruction::AShr: {
6558 if (KnownStart.isNonNegative())
6561 KnownStart.getMaxValue() + 1);
6562 if (KnownStart.isNegative())
6565 KnownEnd.getMaxValue() + 1);
6568 case Instruction::LShr: {
6577 KnownStart.getMaxValue() + 1);
6579 case Instruction::Shl: {
6583 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
6584 return ConstantRange(KnownStart.getMinValue(),
6585 KnownEnd.getMaxValue() + 1);
6593ScalarEvolution::getRangeRefIter(
const SCEV *S,
6594 ScalarEvolution::RangeSignHint SignHint) {
6595 DenseMap<const SCEV *, ConstantRange> &Cache =
6596 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6599 SmallPtrSet<const SCEV *, 8> Seen;
6603 auto AddToWorklist = [&WorkList, &Seen, &Cache](
const SCEV *Expr) {
6604 if (!Seen.
insert(Expr).second)
6637 for (
unsigned I = 0;
I != WorkList.
size(); ++
I) {
6638 const SCEV *
P = WorkList[
I];
6642 for (
const SCEV *
Op :
P->operands())
6648 if (!PendingPhiRangesIter.insert(
P).second)
6655 if (!WorkList.
empty()) {
6660 getRangeRef(
P, SignHint);
6664 PendingPhiRangesIter.erase(
P);
6668 return getRangeRef(S, SignHint, 0);
6675 const SCEV *S, ScalarEvolution::RangeSignHint SignHint,
unsigned Depth) {
6676 DenseMap<const SCEV *, ConstantRange> &Cache =
6677 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6685 if (
I != Cache.
end())
6689 return setRange(
C, SignHint, ConstantRange(
C->getAPInt()));
6694 return getRangeRefIter(S, SignHint);
6697 ConstantRange ConservativeResult(
BitWidth,
true);
6698 using OBO = OverflowingBinaryOperator;
6702 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
6706 ConservativeResult =
6713 ConservativeResult = ConstantRange(
6729 ConservativeResult.intersectWith(
X.truncate(
BitWidth), RangeType));
6736 ConservativeResult.intersectWith(
X.zeroExtend(
BitWidth), RangeType));
6743 ConservativeResult.intersectWith(
X.signExtend(
BitWidth), RangeType));
6747 ConstantRange
X = getRangeRef(PtrToInt->
getOperand(), SignHint,
Depth + 1);
6748 return setRange(PtrToInt, SignHint,
X);
6752 ConstantRange
X = getRangeRef(
Add->getOperand(0), SignHint,
Depth + 1);
6753 unsigned WrapType = OBO::AnyWrap;
6754 if (
Add->hasNoSignedWrap())
6755 WrapType |= OBO::NoSignedWrap;
6756 if (
Add->hasNoUnsignedWrap())
6757 WrapType |= OBO::NoUnsignedWrap;
6759 X =
X.addWithNoWrap(getRangeRef(
Op, SignHint,
Depth + 1), WrapType,
6761 return setRange(
Add, SignHint,
6762 ConservativeResult.intersectWith(
X, RangeType));
6766 ConstantRange
X = getRangeRef(
Mul->getOperand(0), SignHint,
Depth + 1);
6768 X =
X.multiply(getRangeRef(
Op, SignHint,
Depth + 1));
6769 return setRange(
Mul, SignHint,
6770 ConservativeResult.intersectWith(
X, RangeType));
6774 ConstantRange
X = getRangeRef(UDiv->
getLHS(), SignHint,
Depth + 1);
6775 ConstantRange
Y = getRangeRef(UDiv->
getRHS(), SignHint,
Depth + 1);
6776 return setRange(UDiv, SignHint,
6777 ConservativeResult.intersectWith(
X.udiv(
Y), RangeType));
6785 if (!UnsignedMinValue.
isZero())
6786 ConservativeResult = ConservativeResult.intersectWith(
6787 ConstantRange(UnsignedMinValue, APInt(
BitWidth, 0)), RangeType);
6796 bool AllNonNeg =
true;
6797 bool AllNonPos =
true;
6798 for (
unsigned i = 1, e = AddRec->
getNumOperands(); i != e; ++i) {
6805 ConservativeResult = ConservativeResult.intersectWith(
6810 ConservativeResult = ConservativeResult.intersectWith(
6819 const SCEV *MaxBEScev =
6833 auto RangeFromAffine = getRangeForAffineAR(
6835 ConservativeResult =
6836 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
6838 auto RangeFromFactoring = getRangeViaFactoring(
6840 ConservativeResult =
6841 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
6847 const SCEV *SymbolicMaxBECount =
6852 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
6853 AddRec, SymbolicMaxBECount,
BitWidth, SignHint);
6854 ConservativeResult =
6855 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
6860 return setRange(AddRec, SignHint, std::move(ConservativeResult));
6870 ID = Intrinsic::umax;
6873 ID = Intrinsic::smax;
6877 ID = Intrinsic::umin;
6880 ID = Intrinsic::smin;
6887 ConstantRange
X = getRangeRef(NAry->getOperand(0), SignHint,
Depth + 1);
6888 for (
unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i)
6890 ID, {
X, getRangeRef(NAry->getOperand(i), SignHint,
Depth + 1)});
6891 return setRange(S, SignHint,
6892 ConservativeResult.intersectWith(
X, RangeType));
6901 ConservativeResult =
6902 ConservativeResult.intersectWith(*MDRange, RangeType);
6907 auto CR = getRangeForUnknownRecurrence(U);
6908 ConservativeResult = ConservativeResult.intersectWith(CR);
6919 if (
U->getType()->isPointerTy()) {
6922 unsigned ptrSize = DL.getPointerTypeSizeInBits(
U->getType());
6923 int ptrIdxDiff = ptrSize -
BitWidth;
6924 if (ptrIdxDiff > 0 && ptrSize >
BitWidth && NS > (
unsigned)ptrIdxDiff)
6937 ConservativeResult = ConservativeResult.intersectWith(
6941 ConservativeResult = ConservativeResult.intersectWith(
6946 if (
U->getType()->isPointerTy() && SignHint == HINT_RANGE_UNSIGNED) {
6949 bool CanBeNull, CanBeFreed;
6950 uint64_t DerefBytes =
6951 V->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
6961 uint64_t
Align =
U->getValue()->getPointerAlignment(DL).value();
6962 uint64_t Rem = MaxVal.
urem(Align);
6967 ConservativeResult = ConservativeResult.intersectWith(
6975 if (PendingPhiRanges.insert(Phi).second) {
6976 ConstantRange RangeFromOps(
BitWidth,
false);
6978 for (
const auto &
Op :
Phi->operands()) {
6980 RangeFromOps = RangeFromOps.unionWith(OpRange);
6982 if (RangeFromOps.isFullSet())
6985 ConservativeResult =
6986 ConservativeResult.intersectWith(RangeFromOps, RangeType);
6987 bool Erased = PendingPhiRanges.erase(Phi);
6988 assert(Erased &&
"Failed to erase Phi properly?");
6995 if (
II->getIntrinsicID() == Intrinsic::vscale) {
6997 ConservativeResult = ConservativeResult.difference(Disallowed);
7000 return setRange(U, SignHint, std::move(ConservativeResult));
7006 return setRange(S, SignHint, std::move(ConservativeResult));
7015 const APInt &MaxBECount,
7022 if (Step == 0 || MaxBECount == 0)
7029 return ConstantRange::getFull(
BitWidth);
7045 return ConstantRange::getFull(
BitWidth);
7057 APInt MovedBoundary = Descending ? (StartLower - std::move(
Offset))
7058 : (StartUpper + std::move(
Offset));
7063 if (StartRange.
contains(MovedBoundary))
7064 return ConstantRange::getFull(
BitWidth);
7067 Descending ? std::move(MovedBoundary) : std::move(StartLower);
7069 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
7078 const APInt &MaxBECount) {
7082 "mismatched bit widths");
7091 StepSRange.
getSignedMin(), StartSRange, MaxBECount,
true);
7093 StartSRange, MaxBECount,
7105ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
7107 ScalarEvolution::RangeSignHint SignHint) {
7108 assert(AddRec->
isAffine() &&
"Non-affine AddRecs are not suppored!\n");
7110 "This only works for non-self-wrapping AddRecs!");
7111 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
7115 return ConstantRange::getFull(
BitWidth);
7123 return ConstantRange::getFull(
BitWidth);
7127 const SCEV *MaxItersWithoutWrap =
getUDivExpr(RangeWidth, StepAbs);
7129 MaxItersWithoutWrap))
7130 return ConstantRange::getFull(
BitWidth);
7151 ConstantRange StartRange = getRangeRef(Start, SignHint);
7152 ConstantRange EndRange = getRangeRef(End, SignHint);
7153 ConstantRange RangeBetween = StartRange.
unionWith(EndRange);
7157 return RangeBetween;
7162 return ConstantRange::getFull(
BitWidth);
7165 isKnownPredicateViaConstantRanges(LEPred, Start, End))
7166 return RangeBetween;
7168 isKnownPredicateViaConstantRanges(GEPred, Start, End))
7169 return RangeBetween;
7170 return ConstantRange::getFull(
BitWidth);
7175 const APInt &MaxBECount) {
7182 "mismatched bit widths");
7184 struct SelectPattern {
7185 Value *Condition =
nullptr;
7189 explicit SelectPattern(ScalarEvolution &SE,
unsigned BitWidth,
7191 std::optional<unsigned> CastOp;
7205 CastOp = SCast->getSCEVType();
7206 S = SCast->getOperand();
7209 using namespace llvm::PatternMatch;
7216 Condition =
nullptr;
7248 bool isRecognized() {
return Condition !=
nullptr; }
7251 SelectPattern StartPattern(*
this,
BitWidth, Start);
7252 if (!StartPattern.isRecognized())
7253 return ConstantRange::getFull(
BitWidth);
7255 SelectPattern StepPattern(*
this,
BitWidth, Step);
7256 if (!StepPattern.isRecognized())
7257 return ConstantRange::getFull(
BitWidth);
7259 if (StartPattern.Condition != StepPattern.Condition) {
7263 return ConstantRange::getFull(
BitWidth);
7274 const SCEV *TrueStart = this->
getConstant(StartPattern.TrueValue);
7275 const SCEV *TrueStep = this->
getConstant(StepPattern.TrueValue);
7276 const SCEV *FalseStart = this->
getConstant(StartPattern.FalseValue);
7277 const SCEV *FalseStep = this->
getConstant(StepPattern.FalseValue);
7279 ConstantRange TrueRange =
7280 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount);
7281 ConstantRange FalseRange =
7282 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount);
7304ScalarEvolution::getNonTrivialDefiningScopeBound(
const SCEV *S) {
7318 SmallPtrSet<const SCEV *, 16> Visited;
7320 auto pushOp = [&](
const SCEV *S) {
7321 if (!Visited.
insert(S).second)
7324 if (Visited.
size() > 30) {
7331 for (
const auto *S :
Ops)
7335 while (!Worklist.
empty()) {
7337 if (
auto *DefI = getNonTrivialDefiningScopeBound(S)) {
7338 if (!Bound || DT.dominates(Bound, DefI))
7345 return Bound ? Bound : &*F.getEntryBlock().begin();
7351 return getDefiningScopeBound(
Ops, Discard);
7354bool ScalarEvolution::isGuaranteedToTransferExecutionTo(
const Instruction *
A,
7356 if (
A->getParent() ==
B->getParent() &&
7361 auto *BLoop = LI.getLoopFor(
B->getParent());
7362 if (BLoop && BLoop->getHeader() ==
B->getParent() &&
7363 BLoop->getLoopPreheader() ==
A->getParent() &&
7365 A->getParent()->end()) &&
7372bool ScalarEvolution::isGuaranteedNotToBePoison(
const SCEV *
Op) {
7373 SCEVPoisonCollector PC(
true);
7375 return PC.MaybePoison.empty();
7378bool ScalarEvolution::isGuaranteedNotToCauseUB(
const SCEV *
Op) {
7384 return M && (!
isKnownNonZero(Op1) || !isGuaranteedNotToBePoison(Op1));
7388bool ScalarEvolution::isSCEVExprNeverPoison(
const Instruction *
I) {
7405 for (
const Use &
Op :
I->operands()) {
7411 auto *DefI = getDefiningScopeBound(SCEVOps);
7412 return isGuaranteedToTransferExecutionTo(DefI,
I);
7415bool ScalarEvolution::isAddRecNeverPoison(
const Instruction *
I,
const Loop *L) {
7417 if (isSCEVExprNeverPoison(
I))
7428 auto *ExitingBB =
L->getExitingBlock();
7432 SmallPtrSet<const Value *, 16> KnownPoison;
7441 while (!Worklist.
empty()) {
7444 for (
const Use &U :
Poison->uses()) {
7447 DT.dominates(PoisonUser->
getParent(), ExitingBB))
7451 if (KnownPoison.
insert(PoisonUser).second)
7459ScalarEvolution::LoopProperties
7460ScalarEvolution::getLoopProperties(
const Loop *L) {
7461 using LoopProperties = ScalarEvolution::LoopProperties;
7463 auto Itr = LoopPropertiesCache.find(L);
7464 if (Itr == LoopPropertiesCache.end()) {
7467 return !
SI->isSimple();
7477 return I->mayWriteToMemory();
7480 LoopProperties LP = {
true,
7483 for (
auto *BB :
L->getBlocks())
7484 for (
auto &
I : *BB) {
7486 LP.HasNoAbnormalExits =
false;
7487 if (HasSideEffects(&
I))
7488 LP.HasNoSideEffects =
false;
7489 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
7493 auto InsertPair = LoopPropertiesCache.insert({
L, LP});
7494 assert(InsertPair.second &&
"We just checked!");
7495 Itr = InsertPair.first;
7508const SCEV *ScalarEvolution::createSCEVIter(
Value *V) {
7514 Stack.emplace_back(V,
true);
7515 Stack.emplace_back(V,
false);
7516 while (!Stack.empty()) {
7517 auto E = Stack.pop_back_val();
7518 Value *CurV = E.getPointer();
7524 const SCEV *CreatedSCEV =
nullptr;
7527 CreatedSCEV = createSCEV(CurV);
7532 CreatedSCEV = getOperandsToCreate(CurV,
Ops);
7536 insertValueToMap(CurV, CreatedSCEV);
7540 Stack.emplace_back(CurV,
true);
7542 Stack.emplace_back(
Op,
false);
7559 if (!DT.isReachableFromEntry(
I->getParent()))
7572 switch (BO->Opcode) {
7573 case Instruction::Add:
7574 case Instruction::Mul: {
7581 Ops.push_back(BO->
Op);
7585 Ops.push_back(BO->RHS);
7589 (BO->Opcode == Instruction::Add &&
7590 (NewBO->Opcode != Instruction::Add &&
7591 NewBO->Opcode != Instruction::Sub)) ||
7592 (BO->Opcode == Instruction::Mul &&
7593 NewBO->Opcode != Instruction::Mul)) {
7594 Ops.push_back(BO->LHS);
7599 if (BO->
Op && (BO->IsNSW || BO->IsNUW)) {
7602 Ops.push_back(BO->LHS);
7610 case Instruction::Sub:
7611 case Instruction::UDiv:
7612 case Instruction::URem:
7614 case Instruction::AShr:
7615 case Instruction::Shl:
7616 case Instruction::Xor:
7620 case Instruction::And:
7621 case Instruction::Or:
7625 case Instruction::LShr:
7632 Ops.push_back(BO->LHS);
7633 Ops.push_back(BO->RHS);
7637 switch (
U->getOpcode()) {
7638 case Instruction::Trunc:
7639 case Instruction::ZExt:
7640 case Instruction::SExt:
7641 case Instruction::PtrToInt:
7642 Ops.push_back(
U->getOperand(0));
7645 case Instruction::BitCast:
7647 Ops.push_back(
U->getOperand(0));
7652 case Instruction::SDiv:
7653 case Instruction::SRem:
7654 Ops.push_back(
U->getOperand(0));
7655 Ops.push_back(
U->getOperand(1));
7658 case Instruction::GetElementPtr:
7660 "GEP source element type must be sized");
7664 case Instruction::IntToPtr:
7667 case Instruction::PHI:
7671 case Instruction::Select: {
7673 auto CanSimplifyToUnknown = [
this,
U]() {
7691 if (CanSimplifyToUnknown())
7698 case Instruction::Call:
7699 case Instruction::Invoke:
7706 switch (
II->getIntrinsicID()) {
7707 case Intrinsic::abs:
7708 Ops.push_back(
II->getArgOperand(0));
7710 case Intrinsic::umax:
7711 case Intrinsic::umin:
7712 case Intrinsic::smax:
7713 case Intrinsic::smin:
7714 case Intrinsic::usub_sat:
7715 case Intrinsic::uadd_sat:
7716 Ops.push_back(
II->getArgOperand(0));
7717 Ops.push_back(
II->getArgOperand(1));
7719 case Intrinsic::start_loop_iterations:
7720 case Intrinsic::annotation:
7721 case Intrinsic::ptr_annotation:
7722 Ops.push_back(
II->getArgOperand(0));
7734const SCEV *ScalarEvolution::createSCEV(
Value *V) {
7743 if (!DT.isReachableFromEntry(
I->getParent()))
7758 switch (BO->Opcode) {
7759 case Instruction::Add: {
7785 if (BO->Opcode == Instruction::Sub)
7793 if (BO->Opcode == Instruction::Sub)
7800 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
7801 NewBO->Opcode != Instruction::Sub)) {
7811 case Instruction::Mul: {
7832 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
7841 case Instruction::UDiv:
7845 case Instruction::URem:
7849 case Instruction::Sub: {
7852 Flags = getNoWrapFlagsFromUB(BO->
Op);
7857 case Instruction::And:
7863 if (CI->isMinusOne())
7865 const APInt &
A = CI->getValue();
7871 unsigned LZ =
A.countl_zero();
7872 unsigned TZ =
A.countr_zero();
7877 APInt EffectiveMask =
7879 if ((LZ != 0 || TZ != 0) && !((~
A & ~Known.
Zero) & EffectiveMask)) {
7882 const SCEV *ShiftedLHS =
nullptr;
7886 unsigned MulZeros = OpC->getAPInt().countr_zero();
7887 unsigned GCD = std::min(MulZeros, TZ);
7892 auto *NewMul =
getMulExpr(MulOps, LHSMul->getNoWrapFlags());
7914 case Instruction::Or:
7923 case Instruction::Xor:
7926 if (CI->isMinusOne())
7935 if (LBO->getOpcode() == Instruction::And &&
7936 LCI->getValue() == CI->getValue())
7937 if (
const SCEVZeroExtendExpr *Z =
7940 const SCEV *Z0 =
Z->getOperand();
7947 if (CI->getValue().isMask(Z0TySize))
7953 APInt Trunc = CI->getValue().trunc(Z0TySize);
7962 case Instruction::Shl:
7980 auto MulFlags = getNoWrapFlagsFromUB(BO->
Op);
7988 ConstantInt *
X = ConstantInt::get(
7994 case Instruction::AShr:
8016 const SCEV *AddTruncateExpr =
nullptr;
8017 ConstantInt *ShlAmtCI =
nullptr;
8018 const SCEV *AddConstant =
nullptr;
8020 if (L &&
L->getOpcode() == Instruction::Add) {
8028 if (LShift && LShift->
getOpcode() == Instruction::Shl) {
8035 APInt AddOperand = AddOperandCI->
getValue().
ashr(AShrAmt);
8043 }
else if (L &&
L->getOpcode() == Instruction::Shl) {
8048 const SCEV *ShlOp0SCEV =
getSCEV(
L->getOperand(0));
8053 if (AddTruncateExpr && ShlAmtCI) {
8065 const APInt &ShlAmt = ShlAmtCI->
getValue();
8069 const SCEV *CompositeExpr =
8071 if (
L->getOpcode() != Instruction::Shl)
8072 CompositeExpr =
getAddExpr(CompositeExpr, AddConstant);
8081 switch (
U->getOpcode()) {
8082 case Instruction::Trunc:
8085 case Instruction::ZExt:
8088 case Instruction::SExt:
8098 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
8099 Type *Ty =
U->getType();
8107 case Instruction::BitCast:
8113 case Instruction::PtrToInt: {
8116 Type *DstIntTy =
U->getType();
8124 case Instruction::IntToPtr:
8128 case Instruction::SDiv:
8135 case Instruction::SRem:
8142 case Instruction::GetElementPtr:
8145 case Instruction::PHI:
8148 case Instruction::Select:
8149 return createNodeForSelectOrPHI(U,
U->getOperand(0),
U->getOperand(1),
8152 case Instruction::Call:
8153 case Instruction::Invoke:
8158 switch (
II->getIntrinsicID()) {
8159 case Intrinsic::abs:
8163 case Intrinsic::umax:
8167 case Intrinsic::umin:
8171 case Intrinsic::smax:
8175 case Intrinsic::smin:
8179 case Intrinsic::usub_sat: {
8180 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8181 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8185 case Intrinsic::uadd_sat: {
8186 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8187 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8191 case Intrinsic::start_loop_iterations:
8192 case Intrinsic::annotation:
8193 case Intrinsic::ptr_annotation:
8197 case Intrinsic::vscale:
8217 auto *ExitCountType = ExitCount->
getType();
8218 assert(ExitCountType->isIntegerTy());
8220 1 + ExitCountType->getScalarSizeInBits());
8233 auto CanAddOneWithoutOverflow = [&]() {
8235 getRangeRef(ExitCount, RangeSignHint::HINT_RANGE_UNSIGNED);
8246 if (EvalSize > ExitCountSize && CanAddOneWithoutOverflow())
8276 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8277 assert(L->isLoopExiting(ExitingBlock) &&
8278 "Exiting block must actually branch out of the loop!");
8287 const auto *MaxExitCount =
8295 L->getExitingBlocks(ExitingBlocks);
8297 std::optional<unsigned> Res;
8298 for (
auto *ExitingBB : ExitingBlocks) {
8302 Res = std::gcd(*Res, Multiple);
8304 return Res.value_or(1);
8308 const SCEV *ExitCount) {
8338 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8339 assert(L->isLoopExiting(ExitingBlock) &&
8340 "Exiting block must actually branch out of the loop!");
8350 return getBackedgeTakenInfo(L).getExact(ExitingBlock,
this);
8352 return getBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this);
8354 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this);
8364 return getPredicatedBackedgeTakenInfo(L).getExact(ExitingBlock,
this,
8367 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this,
8370 return getPredicatedBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this,
8378 return getPredicatedBackedgeTakenInfo(L).getExact(L,
this, &Preds);
8385 return getBackedgeTakenInfo(L).getExact(L,
this);
8387 return getBackedgeTakenInfo(L).getConstantMax(
this);
8389 return getBackedgeTakenInfo(L).getSymbolicMax(L,
this);
8396 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(L,
this, &Preds);
8401 return getPredicatedBackedgeTakenInfo(L).getConstantMax(
this, &Preds);
8405 return getBackedgeTakenInfo(L).isConstantMaxOrZero(
this);
8415 for (
PHINode &PN : Header->phis())
8416 if (Visited.
insert(&PN).second)
8420ScalarEvolution::BackedgeTakenInfo &
8421ScalarEvolution::getPredicatedBackedgeTakenInfo(
const Loop *L) {
8422 auto &BTI = getBackedgeTakenInfo(L);
8423 if (BTI.hasFullInfo())
8426 auto Pair = PredicatedBackedgeTakenCounts.try_emplace(L);
8429 return Pair.first->second;
8431 BackedgeTakenInfo
Result =
8432 computeBackedgeTakenCount(L,
true);
8434 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
8437ScalarEvolution::BackedgeTakenInfo &
8438ScalarEvolution::getBackedgeTakenInfo(
const Loop *L) {
8444 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator,
bool> Pair =
8445 BackedgeTakenCounts.try_emplace(L);
8447 return Pair.first->second;
8452 BackedgeTakenInfo
Result = computeBackedgeTakenCount(L);
8459 if (
Result.hasAnyInfo()) {
8462 auto LoopUsersIt = LoopUsers.find(L);
8463 if (LoopUsersIt != LoopUsers.end())
8465 forgetMemoizedResults(ToForget);
8468 for (PHINode &PN :
L->getHeader()->phis())
8469 ConstantEvolutionLoopExitValue.erase(&PN);
8477 return BackedgeTakenCounts.find(L)->second = std::move(Result);
8486 BackedgeTakenCounts.clear();
8487 PredicatedBackedgeTakenCounts.clear();
8488 BECountUsers.clear();
8489 LoopPropertiesCache.clear();
8490 ConstantEvolutionLoopExitValue.clear();
8491 ValueExprMap.clear();
8492 ValuesAtScopes.clear();
8493 ValuesAtScopesUsers.clear();
8494 LoopDispositions.clear();
8495 BlockDispositions.clear();
8496 UnsignedRanges.clear();
8497 SignedRanges.clear();
8498 ExprValueMap.clear();
8500 ConstantMultipleCache.clear();
8501 PredicatedSCEVRewrites.clear();
8503 FoldCacheUser.clear();
8505void ScalarEvolution::visitAndClearUsers(
8509 while (!Worklist.
empty()) {
8516 if (It != ValueExprMap.
end()) {
8517 eraseValueFromMap(It->first);
8520 ConstantEvolutionLoopExitValue.erase(PN);
8534 while (!LoopWorklist.
empty()) {
8538 forgetBackedgeTakenCounts(CurrL,
false);
8539 forgetBackedgeTakenCounts(CurrL,
true);
8542 for (
auto I = PredicatedSCEVRewrites.begin();
8543 I != PredicatedSCEVRewrites.end();) {
8544 std::pair<const SCEV *, const Loop *> Entry =
I->first;
8545 if (Entry.second == CurrL)
8546 PredicatedSCEVRewrites.erase(
I++);
8551 auto LoopUsersItr = LoopUsers.find(CurrL);
8552 if (LoopUsersItr != LoopUsers.end())
8557 visitAndClearUsers(Worklist, Visited, ToForget);
8559 LoopPropertiesCache.erase(CurrL);
8562 LoopWorklist.
append(CurrL->begin(), CurrL->end());
8564 forgetMemoizedResults(ToForget);
8581 visitAndClearUsers(Worklist, Visited, ToForget);
8583 forgetMemoizedResults(ToForget);
8595 struct InvalidationRootCollector {
8599 InvalidationRootCollector(
Loop *L) : L(L) {}
8601 bool follow(
const SCEV *S) {
8607 if (L->contains(AddRec->
getLoop()))
8612 bool isDone()
const {
return false; }
8615 InvalidationRootCollector
C(L);
8617 forgetMemoizedResults(
C.Roots);
8630 BlockDispositions.clear();
8631 LoopDispositions.clear();
8648 while (!Worklist.
empty()) {
8650 bool LoopDispoRemoved = LoopDispositions.erase(Curr);
8651 bool BlockDispoRemoved = BlockDispositions.erase(Curr);
8652 if (!LoopDispoRemoved && !BlockDispoRemoved)
8654 auto Users = SCEVUsers.find(Curr);
8655 if (
Users != SCEVUsers.end())
8668const SCEV *ScalarEvolution::BackedgeTakenInfo::getExact(
8672 if (!isComplete() || ExitNotTaken.
empty())
8683 for (
const auto &ENT : ExitNotTaken) {
8684 const SCEV *BECount = ENT.ExactNotTaken;
8687 "We should only have known counts for exiting blocks that dominate "
8690 Ops.push_back(BECount);
8695 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
8696 "Predicate should be always true!");
8705const ScalarEvolution::ExitNotTakenInfo *
8706ScalarEvolution::BackedgeTakenInfo::getExitNotTaken(
8707 const BasicBlock *ExitingBlock,
8708 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8709 for (
const auto &ENT : ExitNotTaken)
8710 if (ENT.ExitingBlock == ExitingBlock) {
8711 if (ENT.hasAlwaysTruePredicate())
8713 else if (Predicates) {
8723const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
8725 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8726 if (!getConstantMax())
8729 for (
const auto &ENT : ExitNotTaken)
8730 if (!ENT.hasAlwaysTruePredicate()) {
8738 "No point in having a non-constant max backedge taken count!");
8739 return getConstantMax();
8742const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
8744 SmallVectorImpl<const SCEVPredicate *> *Predicates) {
8752 for (
const auto &ENT : ExitNotTaken) {
8753 const SCEV *ExitCount = ENT.SymbolicMaxNotTaken;
8756 "We should only have known counts for exiting blocks that "
8762 assert((Predicates || ENT.hasAlwaysTruePredicate()) &&
8763 "Predicate should be always true!");
8766 if (ExitCounts.
empty())
8775bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
8777 auto PredicateNotAlwaysTrue = [](
const ExitNotTakenInfo &ENT) {
8778 return !ENT.hasAlwaysTruePredicate();
8780 return MaxOrZero && !
any_of(ExitNotTaken, PredicateNotAlwaysTrue);
8796 this->ExactNotTaken = E = ConstantMaxNotTaken;
8797 this->SymbolicMaxNotTaken = SymbolicMaxNotTaken = ConstantMaxNotTaken;
8802 "Exact is not allowed to be less precise than Constant Max");
8805 "Exact is not allowed to be less precise than Symbolic Max");
8808 "Symbolic Max is not allowed to be less precise than Constant Max");
8811 "No point in having a non-constant max backedge taken count!");
8813 for (
const auto PredList : PredLists)
8814 for (
const auto *
P : PredList) {
8822 "Backedge count should be int");
8825 "Max backedge count should be int");
8838ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
8840 bool IsComplete,
const SCEV *ConstantMax,
bool MaxOrZero)
8841 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
8842 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8844 ExitNotTaken.reserve(ExitCounts.
size());
8845 std::transform(ExitCounts.
begin(), ExitCounts.
end(),
8846 std::back_inserter(ExitNotTaken),
8847 [&](
const EdgeExitInfo &EEI) {
8848 BasicBlock *ExitBB = EEI.first;
8849 const ExitLimit &EL = EEI.second;
8850 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken,
8851 EL.ConstantMaxNotTaken, EL.SymbolicMaxNotTaken,
8856 "No point in having a non-constant max backedge taken count!");
8860ScalarEvolution::BackedgeTakenInfo
8861ScalarEvolution::computeBackedgeTakenCount(
const Loop *L,
8862 bool AllowPredicates) {
8864 L->getExitingBlocks(ExitingBlocks);
8866 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8869 bool CouldComputeBECount =
true;
8871 const SCEV *MustExitMaxBECount =
nullptr;
8872 const SCEV *MayExitMaxBECount =
nullptr;
8873 bool MustExitMaxOrZero =
false;
8874 bool IsOnlyExit = ExitingBlocks.
size() == 1;
8886 if (ExitIfTrue == CI->
isZero())
8890 ExitLimit EL = computeExitLimit(L, ExitBB, IsOnlyExit, AllowPredicates);
8892 assert((AllowPredicates || EL.Predicates.empty()) &&
8893 "Predicated exit limit when predicates are not allowed!");
8898 ++NumExitCountsComputed;
8902 CouldComputeBECount =
false;
8909 "Exact is known but symbolic isn't?");
8910 ++NumExitCountsNotComputed;
8925 DT.dominates(ExitBB, Latch)) {
8926 if (!MustExitMaxBECount) {
8927 MustExitMaxBECount = EL.ConstantMaxNotTaken;
8928 MustExitMaxOrZero = EL.MaxOrZero;
8931 EL.ConstantMaxNotTaken);
8935 MayExitMaxBECount = EL.ConstantMaxNotTaken;
8938 EL.ConstantMaxNotTaken);
8942 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
8946 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
8952 for (
const auto &Pair : ExitCounts) {
8954 BECountUsers[Pair.second.ExactNotTaken].insert({
L, AllowPredicates});
8956 BECountUsers[Pair.second.SymbolicMaxNotTaken].insert(
8957 {
L, AllowPredicates});
8959 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
8960 MaxBECount, MaxOrZero);
8963ScalarEvolution::ExitLimit
8964ScalarEvolution::computeExitLimit(
const Loop *L, BasicBlock *ExitingBlock,
8965 bool IsOnlyExit,
bool AllowPredicates) {
8966 assert(
L->contains(ExitingBlock) &&
"Exit count for non-loop block?");
8970 if (!Latch || !DT.dominates(ExitingBlock, Latch))
8978 "It should have one successor in loop and one exit block!");
8989 if (!
L->contains(SBB)) {
8994 assert(Exit &&
"Exiting block must have at least one exit");
8995 return computeExitLimitFromSingleExitSwitch(
8996 L, SI, Exit, IsOnlyExit);
9003 const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9004 bool AllowPredicates) {
9005 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
9006 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
9007 ControlsOnlyExit, AllowPredicates);
9010std::optional<ScalarEvolution::ExitLimit>
9011ScalarEvolution::ExitLimitCache::find(
const Loop *L,
Value *ExitCond,
9012 bool ExitIfTrue,
bool ControlsOnlyExit,
9013 bool AllowPredicates) {
9015 (void)this->ExitIfTrue;
9016 (void)this->AllowPredicates;
9018 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9019 this->AllowPredicates == AllowPredicates &&
9020 "Variance in assumed invariant key components!");
9021 auto Itr = TripCountMap.find({ExitCond, ControlsOnlyExit});
9022 if (Itr == TripCountMap.end())
9023 return std::nullopt;
9027void ScalarEvolution::ExitLimitCache::insert(
const Loop *L,
Value *ExitCond,
9029 bool ControlsOnlyExit,
9030 bool AllowPredicates,
9032 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9033 this->AllowPredicates == AllowPredicates &&
9034 "Variance in assumed invariant key components!");
9036 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsOnlyExit}, EL});
9037 assert(InsertResult.second &&
"Expected successful insertion!");
9042ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
9043 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9044 bool ControlsOnlyExit,
bool AllowPredicates) {
9046 if (
auto MaybeEL = Cache.find(L, ExitCond, ExitIfTrue, ControlsOnlyExit,
9050 ExitLimit EL = computeExitLimitFromCondImpl(
9051 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates);
9052 Cache.insert(L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates, EL);
9056ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
9057 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9058 bool ControlsOnlyExit,
bool AllowPredicates) {
9060 if (
auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
9061 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates))
9062 return *LimitFromBinOp;
9068 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsOnlyExit);
9069 if (EL.hasFullInfo() || !AllowPredicates)
9073 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue,
9093 const WithOverflowInst *WO;
9108 auto EL = computeExitLimitFromICmp(L, Pred,
LHS,
getConstant(NewRHSC),
9109 ControlsOnlyExit, AllowPredicates);
9110 if (EL.hasAnyInfo())
9115 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9118std::optional<ScalarEvolution::ExitLimit>
9119ScalarEvolution::computeExitLimitFromCondFromBinOp(
9120 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9121 bool ControlsOnlyExit,
bool AllowPredicates) {
9130 return std::nullopt;
9135 bool EitherMayExit = IsAnd ^ ExitIfTrue;
9136 ExitLimit EL0 = computeExitLimitFromCondCached(
9137 Cache, L, Op0, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9139 ExitLimit EL1 = computeExitLimitFromCondCached(
9140 Cache, L, Op1, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9144 const Constant *NeutralElement = ConstantInt::get(ExitCond->
getType(), IsAnd);
9146 return Op1 == NeutralElement ? EL0 : EL1;
9148 return Op0 == NeutralElement ? EL1 : EL0;
9153 if (EitherMayExit) {
9163 ConstantMaxBECount = EL1.ConstantMaxNotTaken;
9165 ConstantMaxBECount = EL0.ConstantMaxNotTaken;
9168 EL1.ConstantMaxNotTaken);
9170 SymbolicMaxBECount = EL1.SymbolicMaxNotTaken;
9172 SymbolicMaxBECount = EL0.SymbolicMaxNotTaken;
9175 EL0.SymbolicMaxNotTaken, EL1.SymbolicMaxNotTaken, UseSequentialUMin);
9179 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
9180 BECount = EL0.ExactNotTaken;
9193 SymbolicMaxBECount =
9195 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
9199ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9200 const Loop *L, ICmpInst *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9201 bool AllowPredicates) {
9213 ExitLimit EL = computeExitLimitFromICmp(L, Pred,
LHS,
RHS, ControlsOnlyExit,
9215 if (EL.hasAnyInfo())
9218 auto *ExhaustiveCount =
9219 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9222 return ExhaustiveCount;
9224 return computeShiftCompareExitLimit(ExitCond->
getOperand(0),
9227ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9228 const Loop *L, CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS,
9229 bool ControlsOnlyExit,
bool AllowPredicates) {
9254 ConstantRange CompRange =
9270 auto *InnerLHS =
LHS;
9272 InnerLHS = ZExt->getOperand();
9319 if (EL.hasAnyInfo())
9336 if (EL.hasAnyInfo())
return EL;
9368 ExitLimit EL = howManyLessThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9370 if (EL.hasAnyInfo())
9386 ExitLimit EL = howManyGreaterThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9388 if (EL.hasAnyInfo())
9399ScalarEvolution::ExitLimit
9400ScalarEvolution::computeExitLimitFromSingleExitSwitch(
const Loop *L,
9402 BasicBlock *ExitingBlock,
9403 bool ControlsOnlyExit) {
9404 assert(!
L->contains(ExitingBlock) &&
"Not an exiting block!");
9407 if (
Switch->getDefaultDest() == ExitingBlock)
9411 "Default case must not exit the loop!");
9417 if (EL.hasAnyInfo())
9429 "Evaluation of SCEV at constant didn't fold correctly?");
9433ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
9443 const BasicBlock *Predecessor =
L->getLoopPredecessor();
9449 auto MatchPositiveShift =
9452 using namespace PatternMatch;
9454 ConstantInt *ShiftAmt;
9456 OutOpCode = Instruction::LShr;
9458 OutOpCode = Instruction::AShr;
9460 OutOpCode = Instruction::Shl;
9475 auto MatchShiftRecurrence =
9477 std::optional<Instruction::BinaryOps> PostShiftOpCode;
9492 if (MatchPositiveShift(
LHS, V, OpC)) {
9493 PostShiftOpCode = OpC;
9499 if (!PNOut || PNOut->getParent() !=
L->getHeader())
9502 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
9508 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
9515 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut);
9520 if (!MatchShiftRecurrence(
LHS, PN, OpCode))
9532 ConstantInt *StableValue =
nullptr;
9537 case Instruction::AShr: {
9545 StableValue = ConstantInt::get(Ty, 0);
9547 StableValue = ConstantInt::get(Ty, -1,
true);
9553 case Instruction::LShr:
9554 case Instruction::Shl:
9564 "Otherwise cannot be an operand to a branch instruction");
9566 if (
Result->isZeroValue()) {
9568 const SCEV *UpperBound =
9585 if (
const Function *
F = CI->getCalledFunction())
9594 if (!L->contains(
I))
return false;
9599 return L->getHeader() ==
I->getParent();
9675 if (!
I)
return nullptr;
9688 std::vector<Constant*>
Operands(
I->getNumOperands());
9690 for (
unsigned i = 0, e =
I->getNumOperands(); i != e; ++i) {
9699 if (!
C)
return nullptr;
9721 if (IncomingVal != CurrentVal) {
9724 IncomingVal = CurrentVal;
9736ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
9739 auto [
I,
Inserted] = ConstantEvolutionLoopExitValue.try_emplace(PN);
9748 DenseMap<Instruction *, Constant *> CurrentIterVals;
9750 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9756 for (PHINode &
PHI : Header->phis()) {
9758 CurrentIterVals[&
PHI] = StartCST;
9760 if (!CurrentIterVals.
count(PN))
9761 return RetVal =
nullptr;
9767 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
9770 unsigned IterationNum = 0;
9772 for (; ; ++IterationNum) {
9773 if (IterationNum == NumIterations)
9774 return RetVal = CurrentIterVals[PN];
9778 DenseMap<Instruction *, Constant *> NextIterVals;
9783 NextIterVals[PN] = NextPHI;
9785 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
9791 for (
const auto &
I : CurrentIterVals) {
9793 if (!
PHI ||
PHI == PN ||
PHI->getParent() != Header)
continue;
9798 for (
const auto &
I : PHIsToCompute) {
9799 PHINode *
PHI =
I.first;
9802 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
9805 if (NextPHI !=
I.second)
9806 StoppedEvolving =
false;
9811 if (StoppedEvolving)
9812 return RetVal = CurrentIterVals[PN];
9814 CurrentIterVals.swap(NextIterVals);
9818const SCEV *ScalarEvolution::computeExitCountExhaustively(
const Loop *L,
9828 DenseMap<Instruction *, Constant *> CurrentIterVals;
9830 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9833 assert(Latch &&
"Should follow from NumIncomingValues == 2!");
9835 for (PHINode &
PHI : Header->phis()) {
9837 CurrentIterVals[&
PHI] = StartCST;
9839 if (!CurrentIterVals.
count(PN))
9847 for (
unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
9854 if (CondVal->getValue() == uint64_t(ExitWhen)) {
9855 ++NumBruteForceTripCountsComputed;
9860 DenseMap<Instruction *, Constant *> NextIterVals;
9866 for (
const auto &
I : CurrentIterVals) {
9868 if (!
PHI ||
PHI->getParent() != Header)
continue;
9871 for (PHINode *
PHI : PHIsToCompute) {
9873 if (NextPHI)
continue;
9875 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
9878 CurrentIterVals.
swap(NextIterVals);
9889 for (
auto &LS : Values)
9891 return LS.second ? LS.second : V;
9896 const SCEV *
C = computeSCEVAtScope(V, L);
9897 for (
auto &LS :
reverse(ValuesAtScopes[V]))
9898 if (LS.first == L) {
9901 ValuesAtScopesUsers[
C].push_back({L, V});
9912 switch (V->getSCEVType()) {
9945 assert(!
C->getType()->isPointerTy() &&
9946 "Can only have one pointer, and it must be last");
9973ScalarEvolution::getWithOperands(
const SCEV *S,
9974 SmallVectorImpl<const SCEV *> &NewOps) {
10008const SCEV *ScalarEvolution::computeSCEVAtScope(
const SCEV *V,
const Loop *L) {
10009 switch (
V->getSCEVType()) {
10020 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
10031 for (++i; i !=
e; ++i)
10075 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i) {
10077 if (OpAtScope !=
Ops[i]) {
10085 for (++i; i !=
e; ++i) {
10090 return getWithOperands(V, NewOps);
10105 const Loop *CurrLoop = this->LI[
I->getParent()];
10116 if (BackedgeTakenCount->
isZero()) {
10117 Value *InitValue =
nullptr;
10118 bool MultipleInitValues =
false;
10124 MultipleInitValues =
true;
10129 if (!MultipleInitValues && InitValue)
10138 unsigned InLoopPred =
10149 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), CurrLoop);
10165 bool MadeImprovement =
false;
10180 MadeImprovement |= OrigV != OpV;
10185 assert(
C->getType() ==
Op->getType() &&
"Type mismatch");
10190 if (!MadeImprovement)
10211const SCEV *ScalarEvolution::stripInjectiveFunctions(
const SCEV *S)
const {
10213 return stripInjectiveFunctions(ZExt->getOperand());
10215 return stripInjectiveFunctions(SExt->getOperand());
10234 assert(
A != 0 &&
"A must be non-zero.");
10270 APInt AD =
A.lshr(Mult2).trunc(BW - Mult2);
10290static std::optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
10296 LLVM_DEBUG(
dbgs() << __func__ <<
": analyzing quadratic addrec: "
10297 << *AddRec <<
'\n');
10300 if (!LC || !MC || !
NC) {
10301 LLVM_DEBUG(
dbgs() << __func__ <<
": coefficients are not constant\n");
10302 return std::nullopt;
10308 assert(!
N.isZero() &&
"This is not a quadratic addrec");
10316 N =
N.sext(NewWidth);
10317 M = M.sext(NewWidth);
10318 L = L.sext(NewWidth);
10335 <<
"x + " <<
C <<
", coeff bw: " << NewWidth
10336 <<
", multiplied by " <<
T <<
'\n');
10345 std::optional<APInt>
Y) {
10347 unsigned W = std::max(
X->getBitWidth(),
Y->getBitWidth());
10350 return XW.
slt(YW) ? *
X : *
Y;
10353 return std::nullopt;
10354 return X ? *
X : *
Y;
10371 return std::nullopt;
10372 unsigned W =
X->getBitWidth();
10392static std::optional<APInt>
10398 return std::nullopt;
10401 LLVM_DEBUG(
dbgs() << __func__ <<
": solving for unsigned overflow\n");
10402 std::optional<APInt>
X =
10405 return std::nullopt;
10410 return std::nullopt;
10425static std::optional<APInt>
10429 "Starting value of addrec should be 0");
10430 LLVM_DEBUG(
dbgs() << __func__ <<
": solving boundary crossing for range "
10431 <<
Range <<
", addrec " << *AddRec <<
'\n');
10435 "Addrec's initial value should be in range");
10441 return std::nullopt;
10451 auto SolveForBoundary =
10452 [&](
APInt Bound) -> std::pair<std::optional<APInt>,
bool> {
10455 LLVM_DEBUG(
dbgs() <<
"SolveQuadraticAddRecRange: checking boundary "
10456 << Bound <<
" (before multiplying by " << M <<
")\n");
10459 std::optional<APInt> SO;
10462 "signed overflow\n");
10466 "unsigned overflow\n");
10467 std::optional<APInt> UO =
10470 auto LeavesRange = [&] (
const APInt &
X) {
10487 return {std::nullopt,
false};
10492 if (LeavesRange(*Min))
10493 return { Min,
true };
10494 std::optional<APInt> Max = Min == SO ? UO : SO;
10495 if (LeavesRange(*Max))
10496 return { Max,
true };
10499 return {std::nullopt,
true};
10506 auto SL = SolveForBoundary(
Lower);
10507 auto SU = SolveForBoundary(
Upper);
10510 if (!SL.second || !SU.second)
10511 return std::nullopt;
10554ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(
const SCEV *V,
10556 bool ControlsOnlyExit,
10557 bool AllowPredicates) {
10568 if (
C->getValue()->isZero())
return C;
10572 const SCEVAddRecExpr *AddRec =
10575 if (!AddRec && AllowPredicates)
10581 if (!AddRec || AddRec->
getLoop() != L)
10592 return ExitLimit(R, R, R,
false, Predicates);
10650 const SCEV *DistancePlusOne =
getAddExpr(Distance, One);
10676 const SCEV *
Exact =
10684 const SCEV *SymbolicMax =
10686 return ExitLimit(
Exact, ConstantMax, SymbolicMax,
false, Predicates);
10695 AllowPredicates ? &Predicates :
nullptr, *
this);
10703 return ExitLimit(
E, M, S,
false, Predicates);
10706ScalarEvolution::ExitLimit
10707ScalarEvolution::howFarToNonZero(
const SCEV *V,
const Loop *L) {
10715 if (!
C->getValue()->isZero())
10725std::pair<const BasicBlock *, const BasicBlock *>
10726ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(
const BasicBlock *BB)
10737 if (
const Loop *L = LI.getLoopFor(BB))
10738 return {
L->getLoopPredecessor(),
L->getHeader()};
10740 return {
nullptr, BB};
10749 if (
A ==
B)
return true;
10764 if (ComputesEqualValues(AI, BI))
10773 if (!
Add ||
Add->getNumOperands() != 2)
10776 ME && ME->getNumOperands() == 2 && ME->getOperand(0)->isAllOnesValue()) {
10777 LHS =
Add->getOperand(1);
10778 RHS = ME->getOperand(1);
10782 ME && ME->getNumOperands() == 2 && ME->getOperand(0)->isAllOnesValue()) {
10783 LHS =
Add->getOperand(0);
10784 RHS = ME->getOperand(1);
10795 auto TrivialCase = [&](
bool TriviallyTrue) {
10809 return TrivialCase(
false);
10810 return TrivialCase(
true);
10833 const APInt &
RA = RC->getAPInt();
10835 bool SimplifiedByConstantRange =
false;
10840 return TrivialCase(
true);
10842 return TrivialCase(
false);
10851 Changed = SimplifiedByConstantRange =
true;
10855 if (!SimplifiedByConstantRange) {
10872 assert(!
RA.isMinValue() &&
"Should have been caught earlier!");
10878 assert(!
RA.isMaxValue() &&
"Should have been caught earlier!");
10884 assert(!
RA.isMinSignedValue() &&
"Should have been caught earlier!");
10890 assert(!
RA.isMaxSignedValue() &&
"Should have been caught earlier!");
10902 return TrivialCase(
true);
10904 return TrivialCase(
false);
11009 auto NonRecursive = [
this, OrNegative](
const SCEV *S) {
11011 return C->getAPInt().isPowerOf2() ||
11012 (OrNegative &&
C->getAPInt().isNegatedPowerOf2());
11015 return isa<SCEVVScale>(S) && F.hasFnAttribute(Attribute::VScaleRange);
11018 if (NonRecursive(S))
11044 APInt C = Cst->getAPInt();
11045 return C.urem(M) == 0;
11053 const SCEV *SmodM =
11068 for (
auto *
A : Assumptions)
11069 if (
A->implies(
P, *
this))
11077std::pair<const SCEV *, const SCEV *>
11080 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *
this);
11082 return { Start, Start };
11084 const SCEV *
PostInc = SCEVPostIncRewriter::rewrite(S, L, *
this);
11093 getUsedLoops(LHS, LoopsUsed);
11094 getUsedLoops(RHS, LoopsUsed);
11096 if (LoopsUsed.
empty())
11101 for (
const auto *L1 : LoopsUsed)
11102 for (
const auto *L2 : LoopsUsed)
11103 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
11104 DT.dominates(L2->getHeader(), L1->getHeader())) &&
11105 "Domination relationship is not a linear order");
11135 SplitRHS.second) &&
11147 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
11151 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
11161 return std::nullopt;
11176 if (KnownWithoutContext)
11177 return KnownWithoutContext;
11184 return std::nullopt;
11190 const Loop *L = LHS->getLoop();
11195std::optional<ScalarEvolution::MonotonicPredicateType>
11198 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
11204 auto ResultSwapped =
11207 assert(*ResultSwapped != *Result &&
11208 "monotonicity should flip as we flip the predicate");
11215std::optional<ScalarEvolution::MonotonicPredicateType>
11216ScalarEvolution::getMonotonicPredicateTypeImpl(
const SCEVAddRecExpr *LHS,
11230 return std::nullopt;
11234 "Should be greater or less!");
11238 if (!LHS->hasNoUnsignedWrap())
11239 return std::nullopt;
11243 "Relational predicate is either signed or unsigned!");
11244 if (!
LHS->hasNoSignedWrap())
11245 return std::nullopt;
11247 const SCEV *Step =
LHS->getStepRecurrence(*
this);
11255 return std::nullopt;
11258std::optional<ScalarEvolution::LoopInvariantPredicate>
11265 return std::nullopt;
11272 if (!ArLHS || ArLHS->
getLoop() != L)
11273 return std::nullopt;
11277 return std::nullopt;
11303 return std::nullopt;
11340 return std::nullopt;
11343std::optional<ScalarEvolution::LoopInvariantPredicate>
11348 Pred, LHS, RHS, L, CtxI, MaxIter))
11356 for (
auto *
Op :
UMin->operands())
11358 Pred, LHS, RHS, L, CtxI,
Op))
11360 return std::nullopt;
11363std::optional<ScalarEvolution::LoopInvariantPredicate>
11378 return std::nullopt;
11385 if (!AR || AR->
getLoop() != L)
11386 return std::nullopt;
11390 return std::nullopt;
11396 if (Step != One && Step != MinusOne)
11397 return std::nullopt;
11403 return std::nullopt;
11409 return std::nullopt;
11417 if (Step == MinusOne)
11421 return std::nullopt;
11427bool ScalarEvolution::isKnownPredicateViaConstantRanges(
CmpPredicate Pred,
11433 auto CheckRange = [&](
bool IsSigned) {
11436 return RangeLHS.
icmp(Pred, RangeRHS);
11445 if (CheckRange(
true) || CheckRange(
false))
11454bool ScalarEvolution::isKnownPredicateViaNoOverflow(CmpPredicate Pred,
11461 auto MatchBinaryAddToConst = [
this](
const SCEV *
X,
const SCEV *
Y,
11462 APInt &OutC1, APInt &OutC2,
11464 const SCEV *XNonConstOp, *XConstOp;
11465 const SCEV *YNonConstOp, *YConstOp;
11469 if (!splitBinaryAdd(
X, XConstOp, XNonConstOp, XFlagsPresent)) {
11472 XFlagsPresent = ExpectedFlags;
11477 if (!splitBinaryAdd(
Y, YConstOp, YNonConstOp, YFlagsPresent)) {
11480 YFlagsPresent = ExpectedFlags;
11483 if (YNonConstOp != XNonConstOp)
11491 if ((YFlagsPresent & ExpectedFlags) != ExpectedFlags)
11494 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) {
11554bool ScalarEvolution::isKnownPredicateViaSplitting(CmpPredicate Pred,
11576bool ScalarEvolution::isImpliedViaGuard(
const BasicBlock *BB, CmpPredicate Pred,
11577 const SCEV *
LHS,
const SCEV *
RHS) {
11582 return any_of(*BB, [&](
const Instruction &
I) {
11583 using namespace llvm::PatternMatch;
11588 isImpliedCond(Pred,
LHS,
RHS, Condition,
false);
11602 if (!L || !DT.isReachableFromEntry(L->getHeader()))
11607 "This cannot be done on broken IR!");
11610 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
11619 if (LoopContinuePredicate && LoopContinuePredicate->
isConditional() &&
11620 isImpliedCond(Pred, LHS, RHS,
11622 LoopContinuePredicate->
getSuccessor(0) != L->getHeader()))
11627 if (WalkingBEDominatingConds)
11633 const auto &BETakenInfo = getBackedgeTakenInfo(L);
11634 const SCEV *LatchBECount = BETakenInfo.getExact(Latch,
this);
11641 const SCEV *LoopCounter =
11649 for (
auto &AssumeVH : AC.assumptions()) {
11656 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0),
false))
11660 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
11663 for (
DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
11664 DTN != HeaderDTN; DTN = DTN->getIDom()) {
11665 assert(DTN &&
"should reach the loop header before reaching the root!");
11668 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
11676 if (!ContinuePredicate || !ContinuePredicate->
isConditional())
11690 assert(DT.dominates(DominatingEdge, Latch) &&
"should be!");
11692 if (isImpliedCond(Pred, LHS, RHS, Condition,
11706 if (!DT.isReachableFromEntry(BB))
11710 "This cannot be done on broken IR!");
11718 const bool ProvingStrictComparison =
11720 bool ProvedNonStrictComparison =
false;
11721 bool ProvedNonEquality =
false;
11724 if (!ProvedNonStrictComparison)
11725 ProvedNonStrictComparison = Fn(NonStrictPredicate);
11726 if (!ProvedNonEquality)
11728 if (ProvedNonStrictComparison && ProvedNonEquality)
11733 if (ProvingStrictComparison) {
11735 return isKnownViaNonRecursiveReasoning(
P, LHS, RHS);
11737 if (SplitAndProve(ProofFn))
11742 auto ProveViaCond = [&](
const Value *Condition,
bool Inverse) {
11744 if (isImpliedCond(Pred, LHS, RHS, Condition,
Inverse, CtxI))
11746 if (ProvingStrictComparison) {
11748 return isImpliedCond(
P, LHS, RHS, Condition,
Inverse, CtxI);
11750 if (SplitAndProve(ProofFn))
11759 const Loop *ContainingLoop = LI.getLoopFor(BB);
11761 if (ContainingLoop && ContainingLoop->
getHeader() == BB)
11765 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
11766 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
11778 for (
auto &AssumeVH : AC.assumptions()) {
11782 if (!DT.dominates(CI, BB))
11785 if (ProveViaCond(CI->getArgOperand(0),
false))
11791 F.getParent(), Intrinsic::experimental_guard);
11793 for (
const auto *GU : GuardDecl->users())
11795 if (Guard->getFunction() == BB->
getParent() && DT.dominates(Guard, BB))
11796 if (ProveViaCond(Guard->getArgOperand(0),
false))
11811 "LHS is not available at Loop Entry");
11813 "RHS is not available at Loop Entry");
11815 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
11826 if (FoundCondValue ==
11830 if (!PendingLoopPredicates.insert(FoundCondValue).second)
11834 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); });
11837 const Value *Op0, *Op1;
11840 return isImpliedCond(Pred,
LHS,
RHS, Op0,
Inverse, CtxI) ||
11844 return isImpliedCond(Pred,
LHS,
RHS, Op0, Inverse, CtxI) ||
11845 isImpliedCond(Pred,
LHS,
RHS, Op1, Inverse, CtxI);
11849 if (!ICI)
return false;
11853 CmpPredicate FoundPred;
11862 return isImpliedCond(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
11865bool ScalarEvolution::isImpliedCond(CmpPredicate Pred,
const SCEV *
LHS,
11866 const SCEV *
RHS, CmpPredicate FoundPred,
11867 const SCEV *FoundLHS,
const SCEV *FoundRHS,
11868 const Instruction *CtxI) {
11878 auto *WideType = FoundLHS->
getType();
11890 TruncFoundLHS, TruncFoundRHS, CtxI))
11916 return isImpliedCondBalancedTypes(Pred,
LHS,
RHS, FoundPred, FoundLHS,
11920bool ScalarEvolution::isImpliedCondBalancedTypes(
11921 CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS, CmpPredicate FoundPred,
11922 const SCEV *FoundLHS,
const SCEV *FoundRHS,
const Instruction *CtxI) {
11925 "Types should be balanced!");
11932 if (FoundLHS == FoundRHS)
11936 if (
LHS == FoundRHS ||
RHS == FoundLHS) {
11948 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
11965 LHS, FoundLHS, FoundRHS, CtxI);
11967 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundRHS, FoundLHS, CtxI);
11989 assert(P1 != P2 &&
"Handled earlier!");
11993 if (IsSignFlippedPredicate(Pred, FoundPred)) {
11998 return isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12001 CmpPredicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred;
12002 const SCEV *CanonicalLHS =
LHS, *CanonicalRHS =
RHS,
12003 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS;
12008 std::swap(CanonicalFoundLHS, CanonicalFoundRHS);
12019 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12020 CanonicalRHS, CanonicalFoundLHS,
12021 CanonicalFoundRHS);
12026 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12027 CanonicalRHS, CanonicalFoundLHS,
12028 CanonicalFoundRHS);
12035 const SCEVConstant *
C =
nullptr;
12036 const SCEV *
V =
nullptr;
12054 if (Min ==
C->getAPInt()) {
12059 APInt SharperMin = Min + 1;
12062 case ICmpInst::ICMP_SGE:
12063 case ICmpInst::ICMP_UGE:
12066 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
12071 case ICmpInst::ICMP_SGT:
12072 case ICmpInst::ICMP_UGT:
12082 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI))
12087 case ICmpInst::ICMP_SLE:
12088 case ICmpInst::ICMP_ULE:
12089 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12090 LHS, V, getConstant(SharperMin), CtxI))
12094 case ICmpInst::ICMP_SLT:
12095 case ICmpInst::ICMP_ULT:
12096 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12097 LHS, V, getConstant(Min), CtxI))
12111 if (isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12115 if (isImpliedCondOperands(FoundPred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12118 if (isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS))
12125bool ScalarEvolution::splitBinaryAdd(
const SCEV *Expr,
12126 const SCEV *&L,
const SCEV *&R,
12129 if (!AE || AE->getNumOperands() != 2)
12132 L = AE->getOperand(0);
12133 R = AE->getOperand(1);
12134 Flags = AE->getNoWrapFlags();
12138std::optional<APInt>
12145 APInt DiffMul(BW, 1);
12148 for (
unsigned I = 0;
I < 8; ++
I) {
12157 if (LAR->getLoop() != MAR->getLoop())
12158 return std::nullopt;
12162 if (!LAR->isAffine() || !MAR->isAffine())
12163 return std::nullopt;
12165 if (LAR->getStepRecurrence(*
this) != MAR->getStepRecurrence(*
this))
12166 return std::nullopt;
12168 Less = LAR->getStart();
12169 More = MAR->getStart();
12174 auto MatchConstMul =
12175 [](
const SCEV *S) -> std::optional<std::pair<const SCEV *, APInt>> {
12177 if (!M || M->getNumOperands() != 2 ||
12179 return std::nullopt;
12183 if (
auto MatchedMore = MatchConstMul(More)) {
12184 if (
auto MatchedLess = MatchConstMul(
Less)) {
12185 if (MatchedMore->second == MatchedLess->second) {
12186 More = MatchedMore->first;
12187 Less = MatchedLess->first;
12188 DiffMul *= MatchedMore->second;
12199 Diff +=
C->getAPInt() * DiffMul;
12202 Diff -=
C->getAPInt() * DiffMul;
12205 Multiplicity[S] +=
Mul;
12207 auto Decompose = [&](
const SCEV *S,
int Mul) {
12214 Decompose(More, 1);
12215 Decompose(
Less, -1);
12219 const SCEV *NewMore =
nullptr, *NewLess =
nullptr;
12220 for (
const auto &[S,
Mul] : Multiplicity) {
12225 return std::nullopt;
12227 }
else if (
Mul == -1) {
12229 return std::nullopt;
12232 return std::nullopt;
12236 if (NewMore == More || NewLess ==
Less)
12237 return std::nullopt;
12243 if (!More && !
Less)
12247 if (!More || !
Less)
12248 return std::nullopt;
12252 return std::nullopt;
12255bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
12279 if (!L->contains(ContextBB) || !DT.
dominates(ContextBB, L->getLoopLatch()))
12290 if (!L->contains(ContextBB) || !DT.
dominates(ContextBB, L->getLoopLatch()))
12300bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(CmpPredicate Pred,
12303 const SCEV *FoundLHS,
12304 const SCEV *FoundRHS) {
12313 if (!AddRecFoundLHS)
12320 const Loop *
L = AddRecFoundLHS->getLoop();
12321 if (L != AddRecLHS->getLoop())
12360 if (!RDiff || *LDiff != *RDiff)
12363 if (LDiff->isMinValue())
12366 APInt FoundRHSLimit;
12369 FoundRHSLimit = -(*RDiff);
12381bool ScalarEvolution::isImpliedViaMerge(CmpPredicate Pred,
const SCEV *
LHS,
12382 const SCEV *
RHS,
const SCEV *FoundLHS,
12383 const SCEV *FoundRHS,
unsigned Depth) {
12384 const PHINode *LPhi =
nullptr, *RPhi =
nullptr;
12388 bool Erased = PendingMerges.erase(LPhi);
12389 assert(Erased &&
"Failed to erase LPhi!");
12393 bool Erased = PendingMerges.erase(RPhi);
12394 assert(Erased &&
"Failed to erase RPhi!");
12402 if (!PendingMerges.insert(Phi).second)
12416 if (!PendingMerges.insert(Phi).second)
12422 if (!LPhi && !RPhi)
12433 assert(LPhi &&
"LPhi should definitely be a SCEVUnknown Phi!");
12437 auto ProvedEasily = [&](
const SCEV *
S1,
const SCEV *S2) {
12438 return isKnownViaNonRecursiveReasoning(Pred,
S1, S2) ||
12439 isImpliedCondOperandsViaRanges(Pred,
S1, S2, Pred, FoundLHS, FoundRHS) ||
12440 isImpliedViaOperations(Pred,
S1, S2, FoundLHS, FoundRHS,
Depth);
12443 if (RPhi && RPhi->getParent() == LBB) {
12450 const SCEV *
R =
getSCEV(RPhi->getIncomingValueForBlock(IncBB));
12451 if (!ProvedEasily(L, R))
12462 auto *RLoop = RAR->
getLoop();
12463 auto *Predecessor = RLoop->getLoopPredecessor();
12464 assert(Predecessor &&
"Loop with AddRec with no predecessor?");
12466 if (!ProvedEasily(L1, RAR->
getStart()))
12468 auto *Latch = RLoop->getLoopLatch();
12469 assert(Latch &&
"Loop with AddRec with no latch?");
12490 if (
auto *Loop = LI.getLoopFor(LBB))
12493 if (!ProvedEasily(L,
RHS))
12500bool ScalarEvolution::isImpliedCondOperandsViaShift(CmpPredicate Pred,
12503 const SCEV *FoundLHS,
12504 const SCEV *FoundRHS) {
12507 if (
RHS == FoundRHS) {
12512 if (
LHS != FoundLHS)
12519 Value *Shiftee, *ShiftValue;
12521 using namespace PatternMatch;
12522 if (
match(SUFoundRHS->getValue(),
12524 auto *ShifteeS =
getSCEV(Shiftee);
12542bool ScalarEvolution::isImpliedCondOperands(CmpPredicate Pred,
const SCEV *
LHS,
12544 const SCEV *FoundLHS,
12545 const SCEV *FoundRHS,
12546 const Instruction *CtxI) {
12547 return isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, Pred, FoundLHS,
12549 isImpliedCondOperandsViaNoOverflow(Pred,
LHS,
RHS, FoundLHS,
12551 isImpliedCondOperandsViaShift(Pred,
LHS,
RHS, FoundLHS, FoundRHS) ||
12552 isImpliedCondOperandsViaAddRecStart(Pred,
LHS,
RHS, FoundLHS, FoundRHS,
12554 isImpliedCondOperandsHelper(Pred,
LHS,
RHS, FoundLHS, FoundRHS);
12558template <
typename MinMaxExprType>
12560 const SCEV *Candidate) {
12565 return is_contained(MinMaxExpr->operands(), Candidate);
12578 const SCEV *LStart, *RStart, *Step;
12628bool ScalarEvolution::isImpliedViaOperations(CmpPredicate Pred,
const SCEV *
LHS,
12630 const SCEV *FoundLHS,
12631 const SCEV *FoundRHS,
12635 "LHS and RHS have different sizes?");
12638 "FoundLHS and FoundRHS have different sizes?");
12672 auto GetOpFromSExt = [&](
const SCEV *S) {
12674 return Ext->getOperand();
12681 auto *OrigLHS =
LHS;
12682 auto *OrigFoundLHS = FoundLHS;
12683 LHS = GetOpFromSExt(
LHS);
12684 FoundLHS = GetOpFromSExt(FoundLHS);
12687 auto IsSGTViaContext = [&](
const SCEV *
S1,
const SCEV *S2) {
12690 FoundRHS,
Depth + 1);
12703 if (!LHSAddExpr->hasNoSignedWrap())
12706 auto *LL = LHSAddExpr->getOperand(0);
12707 auto *LR = LHSAddExpr->getOperand(1);
12711 auto IsSumGreaterThanRHS = [&](
const SCEV *
S1,
const SCEV *S2) {
12712 return IsSGTViaContext(
S1, MinusOne) && IsSGTViaContext(S2,
RHS);
12717 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
12723 using namespace llvm::PatternMatch;
12742 if (!Numerator || Numerator->getType() != FoundLHS->
getType())
12750 auto *DTy = Denominator->getType();
12751 auto *FRHSTy = FoundRHS->
getType();
12752 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
12771 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
12782 auto *NegDenomMinusOne =
getMinusSCEV(MinusOne, DenominatorExt);
12784 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
12792 if (isImpliedViaMerge(Pred, OrigLHS,
RHS, OrigFoundLHS, FoundRHS,
Depth + 1))
12825bool ScalarEvolution::isKnownViaNonRecursiveReasoning(CmpPredicate Pred,
12829 isKnownPredicateViaConstantRanges(Pred,
LHS,
RHS) ||
12832 isKnownPredicateViaNoOverflow(Pred,
LHS,
RHS);
12835bool ScalarEvolution::isImpliedCondOperandsHelper(CmpPredicate Pred,
12838 const SCEV *FoundLHS,
12839 const SCEV *FoundRHS) {
12875 if (isImpliedViaOperations(Pred,
LHS,
RHS, FoundLHS, FoundRHS))
12881bool ScalarEvolution::isImpliedCondOperandsViaRanges(
12882 CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS, CmpPredicate FoundPred,
12883 const SCEV *FoundLHS,
const SCEV *FoundRHS) {
12897 ConstantRange FoundLHSRange =
12901 ConstantRange LHSRange = FoundLHSRange.
add(ConstantRange(*Addend));
12908 return LHSRange.
icmp(Pred, ConstRHS);
12911bool ScalarEvolution::canIVOverflowOnLT(
const SCEV *
RHS,
const SCEV *Stride,
12924 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
12932 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
12935bool ScalarEvolution::canIVOverflowOnGT(
const SCEV *
RHS,
const SCEV *Stride,
12947 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
12955 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
12967const SCEV *ScalarEvolution::computeMaxBECountForLT(
const SCEV *Start,
12968 const SCEV *Stride,
12999 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
13010 :
APIntOps::umax(MaxEnd, MinStart);
13017ScalarEvolution::howManyLessThans(
const SCEV *
LHS,
const SCEV *
RHS,
13018 const Loop *L,
bool IsSigned,
13019 bool ControlsOnlyExit,
bool AllowPredicates) {
13023 bool PredicatedIV =
false;
13028 auto canProveNUW = [&]() {
13031 if (!ControlsOnlyExit)
13052 Limit = Limit.
zext(OuterBitWidth);
13064 Type *Ty = ZExt->getType();
13075 if (!
IV && AllowPredicates) {
13080 PredicatedIV =
true;
13084 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13098 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13101 const SCEV *Stride =
IV->getStepRecurrence(*
this);
13106 if (!PositiveStride) {
13158 auto wouldZeroStrideBeUB = [&]() {
13170 if (!wouldZeroStrideBeUB()) {
13174 }
else if (!NoWrap) {
13177 if (canIVOverflowOnLT(
RHS, Stride, IsSigned))
13190 const SCEV *
Start =
IV->getStart();
13196 const SCEV *OrigStart =
Start;
13197 const SCEV *OrigRHS =
RHS;
13198 if (
Start->getType()->isPointerTy()) {
13209 const SCEV *End =
nullptr, *BECount =
nullptr,
13210 *BECountIfBackedgeTaken =
nullptr;
13213 if (PositiveStride && RHSAddRec !=
nullptr && RHSAddRec->getLoop() == L &&
13214 RHSAddRec->getNoWrapFlags()) {
13227 const SCEV *RHSStart = RHSAddRec->getStart();
13228 const SCEV *RHSStride = RHSAddRec->getStepRecurrence(*
this);
13240 const SCEV *Denominator =
getMinusSCEV(Stride, RHSStride);
13249 BECountIfBackedgeTaken =
13254 if (BECount ==
nullptr) {
13259 const SCEV *MaxBECount = computeMaxBECountForLT(
13262 MaxBECount,
false , Predicates);
13269 auto *OrigStartMinusStride =
getMinusSCEV(OrigStart, Stride);
13296 const SCEV *Numerator =
13302 auto canProveRHSGreaterThanEqualStart = [&]() {
13321 auto *StartMinusOne =
13328 if (canProveRHSGreaterThanEqualStart()) {
13343 BECountIfBackedgeTaken =
13359 bool MayAddOverflow = [&] {
13405 if (Start == Stride || Start ==
getMinusSCEV(Stride, One)) {
13419 if (!MayAddOverflow) {
13431 const SCEV *ConstantMaxBECount;
13432 bool MaxOrZero =
false;
13434 ConstantMaxBECount = BECount;
13435 }
else if (BECountIfBackedgeTaken &&
13440 ConstantMaxBECount = BECountIfBackedgeTaken;
13443 ConstantMaxBECount = computeMaxBECountForLT(
13451 const SCEV *SymbolicMaxBECount =
13453 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero,
13457ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
13458 const SCEV *
LHS,
const SCEV *
RHS,
const Loop *L,
bool IsSigned,
13459 bool ControlsOnlyExit,
bool AllowPredicates) {
13466 if (!
IV && AllowPredicates)
13473 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13477 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13490 if (!Stride->
isOne() && !NoWrap)
13491 if (canIVOverflowOnGT(
RHS, Stride, IsSigned))
13494 const SCEV *
Start =
IV->getStart();
13495 const SCEV *End =
RHS;
13506 if (
Start->getType()->isPointerTy()) {
13541 const SCEV *ConstantMaxBECount =
13548 ConstantMaxBECount = BECount;
13549 const SCEV *SymbolicMaxBECount =
13552 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
13558 if (
Range.isFullSet())
13563 if (!SC->getValue()->isZero()) {
13569 return ShiftedAddRec->getNumIterationsInRange(
13570 Range.subtract(SC->getAPInt()), SE);
13601 APInt ExitVal = (End +
A).udiv(
A);
13614 ConstantInt::get(SE.
getContext(), ExitVal - 1), SE)->getValue()) &&
13615 "Linear scev computation is off in a bad way!");
13646 assert(!
Last->isZero() &&
"Recurrency with zero step?");
13674 Ty = Store->getValueOperand()->getType();
13676 Ty = Load->getType();
13689 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13691 SE->ConstantEvolutionLoopExitValue.erase(PN);
13692 SE->eraseValueFromMap(getValPtr());
13696void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(
Value *V) {
13697 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13707 : CallbackVH(
V), SE(se) {}
13716 : F(F), DL(F.
getDataLayout()), TLI(TLI), AC(AC), DT(DT), LI(LI),
13718 LoopDispositions(64), BlockDispositions(64) {
13730 F.getParent(), Intrinsic::experimental_guard);
13731 HasGuards = GuardDecl && !GuardDecl->use_empty();
13735 : F(Arg.F), DL(Arg.DL), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC),
13736 DT(Arg.DT), LI(Arg.LI), CouldNotCompute(
std::
move(Arg.CouldNotCompute)),
13737 ValueExprMap(
std::
move(Arg.ValueExprMap)),
13738 PendingLoopPredicates(
std::
move(Arg.PendingLoopPredicates)),
13739 PendingPhiRanges(
std::
move(Arg.PendingPhiRanges)),
13740 PendingMerges(
std::
move(Arg.PendingMerges)),
13741 ConstantMultipleCache(
std::
move(Arg.ConstantMultipleCache)),
13742 BackedgeTakenCounts(
std::
move(Arg.BackedgeTakenCounts)),
13743 PredicatedBackedgeTakenCounts(
13744 std::
move(Arg.PredicatedBackedgeTakenCounts)),
13745 BECountUsers(
std::
move(Arg.BECountUsers)),
13746 ConstantEvolutionLoopExitValue(
13747 std::
move(Arg.ConstantEvolutionLoopExitValue)),
13748 ValuesAtScopes(
std::
move(Arg.ValuesAtScopes)),
13749 ValuesAtScopesUsers(
std::
move(Arg.ValuesAtScopesUsers)),
13750 LoopDispositions(
std::
move(Arg.LoopDispositions)),
13751 LoopPropertiesCache(
std::
move(Arg.LoopPropertiesCache)),
13752 BlockDispositions(
std::
move(Arg.BlockDispositions)),
13753 SCEVUsers(
std::
move(Arg.SCEVUsers)),
13754 UnsignedRanges(
std::
move(Arg.UnsignedRanges)),
13755 SignedRanges(
std::
move(Arg.SignedRanges)),
13756 UniqueSCEVs(
std::
move(Arg.UniqueSCEVs)),
13757 UniquePreds(
std::
move(Arg.UniquePreds)),
13758 SCEVAllocator(
std::
move(Arg.SCEVAllocator)),
13759 LoopUsers(
std::
move(Arg.LoopUsers)),
13760 PredicatedSCEVRewrites(
std::
move(Arg.PredicatedSCEVRewrites)),
13761 FirstUnknown(Arg.FirstUnknown) {
13762 Arg.FirstUnknown =
nullptr;
13771 Tmp->~SCEVUnknown();
13773 FirstUnknown =
nullptr;
13775 ExprValueMap.clear();
13776 ValueExprMap.clear();
13778 BackedgeTakenCounts.clear();
13779 PredicatedBackedgeTakenCounts.clear();
13781 assert(PendingLoopPredicates.empty() &&
"isImpliedCond garbage");
13782 assert(PendingPhiRanges.empty() &&
"getRangeRef garbage");
13783 assert(PendingMerges.empty() &&
"isImpliedViaMerge garbage");
13784 assert(!WalkingBEDominatingConds &&
"isLoopBackedgeGuardedByCond garbage!");
13785 assert(!ProvingSplitPredicate &&
"ProvingSplitPredicate garbage!");
13807 L->getHeader()->printAsOperand(OS,
false);
13811 L->getExitingBlocks(ExitingBlocks);
13812 if (ExitingBlocks.
size() != 1)
13813 OS <<
"<multiple exits> ";
13817 OS <<
"backedge-taken count is ";
13820 OS <<
"Unpredictable backedge-taken count.";
13823 if (ExitingBlocks.
size() > 1)
13824 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
13825 OS <<
" exit count for " << ExitingBlock->
getName() <<
": ";
13833 OS <<
"\n predicated exit count for " << ExitingBlock->
getName()
13836 OS <<
"\n Predicates:\n";
13837 for (
const auto *
P : Predicates)
13845 L->getHeader()->printAsOperand(OS,
false);
13850 OS <<
"constant max backedge-taken count is ";
13853 OS <<
", actual taken count either this or zero.";
13855 OS <<
"Unpredictable constant max backedge-taken count. ";
13860 L->getHeader()->printAsOperand(OS,
false);
13865 OS <<
"symbolic max backedge-taken count is ";
13868 OS <<
", actual taken count either this or zero.";
13870 OS <<
"Unpredictable symbolic max backedge-taken count. ";
13874 if (ExitingBlocks.
size() > 1)
13875 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
13876 OS <<
" symbolic max exit count for " << ExitingBlock->
getName() <<
": ";
13886 OS <<
"\n predicated symbolic max exit count for "
13887 << ExitingBlock->
getName() <<
": ";
13889 OS <<
"\n Predicates:\n";
13890 for (
const auto *
P : Predicates)
13900 assert(!Preds.
empty() &&
"Different predicated BTC, but no predicates");
13902 L->getHeader()->printAsOperand(OS,
false);
13905 OS <<
"Predicated backedge-taken count is ";
13908 OS <<
"Unpredictable predicated backedge-taken count.";
13910 OS <<
" Predicates:\n";
13911 for (
const auto *
P : Preds)
13916 auto *PredConstantMax =
13918 if (PredConstantMax != ConstantBTC) {
13920 "different predicated constant max BTC but no predicates");
13922 L->getHeader()->printAsOperand(OS,
false);
13925 OS <<
"Predicated constant max backedge-taken count is ";
13928 OS <<
"Unpredictable predicated constant max backedge-taken count.";
13930 OS <<
" Predicates:\n";
13931 for (
const auto *
P : Preds)
13936 auto *PredSymbolicMax =
13938 if (SymbolicBTC != PredSymbolicMax) {
13940 "Different predicated symbolic max BTC, but no predicates");
13942 L->getHeader()->printAsOperand(OS,
false);
13945 OS <<
"Predicated symbolic max backedge-taken count is ";
13948 OS <<
"Unpredictable predicated symbolic max backedge-taken count.";
13950 OS <<
" Predicates:\n";
13951 for (
const auto *
P : Preds)
13957 L->getHeader()->printAsOperand(OS,
false);
13973 OS <<
"Computable";
13982 OS <<
"DoesNotDominate";
13988 OS <<
"ProperlyDominates";
14005 OS <<
"Classifying expressions for: ";
14006 F.printAsOperand(OS,
false);
14021 const Loop *L = LI.getLoopFor(
I.getParent());
14036 OS <<
"\t\t" "Exits: ";
14039 OS <<
"<<Unknown>>";
14045 for (
const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
14047 OS <<
"\t\t" "LoopDispositions: { ";
14053 Iter->getHeader()->printAsOperand(OS,
false);
14061 OS <<
"\t\t" "LoopDispositions: { ";
14067 InnerL->getHeader()->printAsOperand(OS,
false);
14078 OS <<
"Determining loop execution counts for: ";
14079 F.printAsOperand(OS,
false);
14087 auto &Values = LoopDispositions[S];
14088 for (
auto &V : Values) {
14089 if (V.getPointer() == L)
14094 auto &Values2 = LoopDispositions[S];
14096 if (V.getPointer() == L) {
14105ScalarEvolution::computeLoopDisposition(
const SCEV *S,
const Loop *L) {
14124 assert(!L->contains(AR->
getLoop()) &&
"Containing loop's header does not"
14125 " dominate the contained loop's header?");
14152 bool HasVarying =
false;
14186 auto &Values = BlockDispositions[S];
14187 for (
auto &V : Values) {
14188 if (V.getPointer() == BB)
14193 auto &Values2 = BlockDispositions[S];
14195 if (V.getPointer() == BB) {
14204ScalarEvolution::computeBlockDisposition(
const SCEV *S,
const BasicBlock *BB) {
14233 bool Proper =
true;
14244 if (Instruction *
I =
14246 if (
I->getParent() == BB)
14248 if (DT.properlyDominates(
I->getParent(), BB))
14271void ScalarEvolution::forgetBackedgeTakenCounts(
const Loop *L,
14274 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14275 auto It = BECounts.find(L);
14276 if (It != BECounts.end()) {
14277 for (
const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) {
14278 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14280 auto UserIt = BECountUsers.find(S);
14281 assert(UserIt != BECountUsers.end());
14286 BECounts.erase(It);
14294 while (!Worklist.
empty()) {
14296 auto Users = SCEVUsers.find(Curr);
14297 if (
Users != SCEVUsers.end())
14298 for (
const auto *User :
Users->second)
14299 if (ToForget.
insert(User).second)
14303 for (
const auto *S : ToForget)
14304 forgetMemoizedResultsImpl(S);
14306 for (
auto I = PredicatedSCEVRewrites.begin();
14307 I != PredicatedSCEVRewrites.end();) {
14308 std::pair<const SCEV *, const Loop *>
Entry =
I->first;
14309 if (ToForget.count(
Entry.first))
14310 PredicatedSCEVRewrites.erase(
I++);
14316void ScalarEvolution::forgetMemoizedResultsImpl(
const SCEV *S) {
14317 LoopDispositions.erase(S);
14318 BlockDispositions.erase(S);
14319 UnsignedRanges.erase(S);
14320 SignedRanges.erase(S);
14321 HasRecMap.erase(S);
14322 ConstantMultipleCache.erase(S);
14325 UnsignedWrapViaInductionTried.erase(AR);
14326 SignedWrapViaInductionTried.erase(AR);
14329 auto ExprIt = ExprValueMap.find(S);
14330 if (ExprIt != ExprValueMap.end()) {
14331 for (
Value *V : ExprIt->second) {
14332 auto ValueIt = ValueExprMap.find_as(V);
14333 if (ValueIt != ValueExprMap.end())
14334 ValueExprMap.erase(ValueIt);
14336 ExprValueMap.erase(ExprIt);
14339 auto ScopeIt = ValuesAtScopes.find(S);
14340 if (ScopeIt != ValuesAtScopes.end()) {
14341 for (
const auto &Pair : ScopeIt->second)
14344 std::make_pair(Pair.first, S));
14345 ValuesAtScopes.erase(ScopeIt);
14348 auto ScopeUserIt = ValuesAtScopesUsers.find(S);
14349 if (ScopeUserIt != ValuesAtScopesUsers.end()) {
14350 for (
const auto &Pair : ScopeUserIt->second)
14351 llvm::erase(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S));
14352 ValuesAtScopesUsers.erase(ScopeUserIt);
14355 auto BEUsersIt = BECountUsers.find(S);
14356 if (BEUsersIt != BECountUsers.end()) {
14358 auto Copy = BEUsersIt->second;
14359 for (
const auto &Pair : Copy)
14360 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt());
14361 BECountUsers.erase(BEUsersIt);
14364 auto FoldUser = FoldCacheUser.find(S);
14365 if (FoldUser != FoldCacheUser.end())
14366 for (
auto &KV : FoldUser->second)
14367 FoldCache.erase(KV);
14368 FoldCacheUser.erase(S);
14372ScalarEvolution::getUsedLoops(
const SCEV *S,
14374 struct FindUsedLoops {
14375 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
14376 : LoopsUsed(LoopsUsed) {}
14377 SmallPtrSetImpl<const Loop *> &LoopsUsed;
14378 bool follow(
const SCEV *S) {
14384 bool isDone()
const {
return false; }
14387 FindUsedLoops
F(LoopsUsed);
14388 SCEVTraversal<FindUsedLoops>(F).visitAll(S);
14391void ScalarEvolution::getReachableBlocks(
14394 Worklist.
push_back(&F.getEntryBlock());
14395 while (!Worklist.
empty()) {
14397 if (!Reachable.
insert(BB).second)
14405 Worklist.
push_back(
C->isOne() ? TrueBB : FalseBB);
14412 if (isKnownPredicateViaConstantRanges(
Cmp->getCmpPredicate(), L, R)) {
14416 if (isKnownPredicateViaConstantRanges(
Cmp->getInverseCmpPredicate(), L,
14451 SCEVMapper SCM(SE2);
14453 SE2.getReachableBlocks(ReachableBlocks, F);
14455 auto GetDelta = [&](
const SCEV *Old,
const SCEV *New) ->
const SCEV * {
14473 while (!LoopStack.
empty()) {
14479 if (!ReachableBlocks.
contains(L->getHeader()))
14484 auto It = BackedgeTakenCounts.find(L);
14485 if (It == BackedgeTakenCounts.end())
14489 SCM.visit(It->second.getExact(L,
const_cast<ScalarEvolution *
>(
this)));
14509 const SCEV *Delta = GetDelta(CurBECount, NewBECount);
14510 if (Delta && !Delta->
isZero()) {
14511 dbgs() <<
"Trip Count for " << *L <<
" Changed!\n";
14512 dbgs() <<
"Old: " << *CurBECount <<
"\n";
14513 dbgs() <<
"New: " << *NewBECount <<
"\n";
14514 dbgs() <<
"Delta: " << *Delta <<
"\n";
14522 while (!Worklist.
empty()) {
14524 if (ValidLoops.
insert(L).second)
14525 Worklist.
append(L->begin(), L->end());
14527 for (
const auto &KV : ValueExprMap) {
14532 "AddRec references invalid loop");
14537 auto It = ExprValueMap.find(KV.second);
14538 if (It == ExprValueMap.end() || !It->second.contains(KV.first)) {
14539 dbgs() <<
"Value " << *KV.first
14540 <<
" is in ValueExprMap but not in ExprValueMap\n";
14545 if (!ReachableBlocks.
contains(
I->getParent()))
14547 const SCEV *OldSCEV = SCM.visit(KV.second);
14549 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV);
14550 if (Delta && !Delta->
isZero()) {
14551 dbgs() <<
"SCEV for value " << *
I <<
" changed!\n"
14552 <<
"Old: " << *OldSCEV <<
"\n"
14553 <<
"New: " << *NewSCEV <<
"\n"
14554 <<
"Delta: " << *Delta <<
"\n";
14560 for (
const auto &KV : ExprValueMap) {
14561 for (
Value *V : KV.second) {
14562 const SCEV *S = ValueExprMap.lookup(V);
14564 dbgs() <<
"Value " << *V
14565 <<
" is in ExprValueMap but not in ValueExprMap\n";
14568 if (S != KV.first) {
14569 dbgs() <<
"Value " << *V <<
" mapped to " << *S <<
" rather than "
14570 << *KV.first <<
"\n";
14577 for (
const auto &S : UniqueSCEVs) {
14582 auto It = SCEVUsers.find(
Op);
14583 if (It != SCEVUsers.end() && It->second.count(&S))
14585 dbgs() <<
"Use of operand " << *
Op <<
" by user " << S
14586 <<
" is not being tracked!\n";
14592 for (
const auto &ValueAndVec : ValuesAtScopes) {
14594 for (
const auto &LoopAndValueAtScope : ValueAndVec.second) {
14595 const Loop *L = LoopAndValueAtScope.first;
14596 const SCEV *ValueAtScope = LoopAndValueAtScope.second;
14598 auto It = ValuesAtScopesUsers.find(ValueAtScope);
14599 if (It != ValuesAtScopesUsers.end() &&
14602 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14603 << *ValueAtScope <<
" missing in ValuesAtScopesUsers\n";
14609 for (
const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) {
14610 const SCEV *ValueAtScope = ValueAtScopeAndVec.first;
14611 for (
const auto &LoopAndValue : ValueAtScopeAndVec.second) {
14612 const Loop *L = LoopAndValue.first;
14613 const SCEV *
Value = LoopAndValue.second;
14615 auto It = ValuesAtScopes.find(
Value);
14616 if (It != ValuesAtScopes.end() &&
14617 is_contained(It->second, std::make_pair(L, ValueAtScope)))
14619 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14620 << *ValueAtScope <<
" missing in ValuesAtScopes\n";
14626 auto VerifyBECountUsers = [&](
bool Predicated) {
14628 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14629 for (
const auto &LoopAndBEInfo : BECounts) {
14630 for (
const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) {
14631 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14633 auto UserIt = BECountUsers.find(S);
14634 if (UserIt != BECountUsers.end() &&
14635 UserIt->second.contains({ LoopAndBEInfo.first, Predicated }))
14637 dbgs() <<
"Value " << *S <<
" for loop " << *LoopAndBEInfo.first
14638 <<
" missing from BECountUsers\n";
14645 VerifyBECountUsers(
false);
14646 VerifyBECountUsers(
true);
14649 for (
auto &[S, Values] : LoopDispositions) {
14650 for (
auto [
Loop, CachedDisposition] : Values) {
14652 if (CachedDisposition != RecomputedDisposition) {
14653 dbgs() <<
"Cached disposition of " << *S <<
" for loop " << *
Loop
14654 <<
" is incorrect: cached " << CachedDisposition <<
", actual "
14655 << RecomputedDisposition <<
"\n";
14662 for (
auto &[S, Values] : BlockDispositions) {
14663 for (
auto [BB, CachedDisposition] : Values) {
14665 if (CachedDisposition != RecomputedDisposition) {
14666 dbgs() <<
"Cached disposition of " << *S <<
" for block %"
14667 << BB->
getName() <<
" is incorrect: cached " << CachedDisposition
14668 <<
", actual " << RecomputedDisposition <<
"\n";
14675 for (
auto [
FoldID, Expr] : FoldCache) {
14676 auto I = FoldCacheUser.find(Expr);
14677 if (
I == FoldCacheUser.end()) {
14678 dbgs() <<
"Missing entry in FoldCacheUser for cached expression " << *Expr
14683 dbgs() <<
"Missing FoldID in cached users of " << *Expr <<
"!\n";
14687 for (
auto [Expr, IDs] : FoldCacheUser) {
14688 for (
auto &
FoldID : IDs) {
14691 dbgs() <<
"Missing entry in FoldCache for expression " << *Expr
14696 dbgs() <<
"Entry in FoldCache doesn't match FoldCacheUser: " << *S
14697 <<
" != " << *Expr <<
"!\n";
14708 for (
auto [S, Multiple] : ConstantMultipleCache) {
14710 if ((Multiple != 0 && RecomputedMultiple != 0 &&
14711 Multiple.
urem(RecomputedMultiple) != 0 &&
14712 RecomputedMultiple.
urem(Multiple) != 0)) {
14713 dbgs() <<
"Incorrect cached computation in ConstantMultipleCache for "
14714 << *S <<
" : Computed " << RecomputedMultiple
14715 <<
" but cache contains " << Multiple <<
"!\n";
14723 FunctionAnalysisManager::Invalidator &Inv) {
14755 OS <<
"Printing analysis 'Scalar Evolution Analysis' for function '"
14756 <<
F.getName() <<
"':\n";
14762 "Scalar Evolution Analysis",
false,
true)
14811 const SCEV *LHS,
const SCEV *RHS) {
14813 assert(LHS->getType() == RHS->getType() &&
14814 "Type mismatch between LHS and RHS");
14817 ID.AddInteger(Pred);
14818 ID.AddPointer(LHS);
14819 ID.AddPointer(RHS);
14820 void *IP =
nullptr;
14821 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
14825 UniquePreds.InsertNode(Eq, IP);
14836 ID.AddInteger(AddedFlags);
14837 void *IP =
nullptr;
14838 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
14840 auto *OF =
new (SCEVAllocator)
14842 UniquePreds.InsertNode(OF, IP);
14862 SCEVPredicateRewriter
Rewriter(L, SE, NewPreds, Pred);
14863 return Rewriter.visit(S);
14869 for (
const auto *Pred : U->getPredicates())
14871 if (IPred->getLHS() == Expr &&
14873 return IPred->getRHS();
14875 if (IPred->getLHS() == Expr &&
14876 IPred->getPredicate() == ICmpInst::ICMP_EQ)
14877 return IPred->getRHS();
14880 return convertToAddRecWithPreds(Expr);
14883 const SCEV *visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
14899 const SCEV *visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
14916 explicit SCEVPredicateRewriter(
14917 const Loop *L, ScalarEvolution &SE,
14918 SmallVectorImpl<const SCEVPredicate *> *NewPreds,
14919 const SCEVPredicate *Pred)
14920 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred),
L(
L) {}
14922 bool addOverflowAssumption(
const SCEVPredicate *
P) {
14925 return Pred && Pred->
implies(
P, SE);
14931 bool addOverflowAssumption(
const SCEVAddRecExpr *AR,
14934 return addOverflowAssumption(
A);
14943 const SCEV *convertToAddRecWithPreds(
const SCEVUnknown *Expr) {
14947 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
14949 if (!PredicatedRewrite)
14951 for (
const auto *
P : PredicatedRewrite->second){
14954 if (L != WP->getExpr()->getLoop())
14957 if (!addOverflowAssumption(
P))
14960 return PredicatedRewrite->first;
14963 SmallVectorImpl<const SCEVPredicate *> *NewPreds;
14964 const SCEVPredicate *Pred;
14973 return SCEVPredicateRewriter::rewrite(S, L, *
this,
nullptr, &Preds);
14980 S = SCEVPredicateRewriter::rewrite(S, L, *
this, &TransformPreds,
nullptr);
15000 if (!Step->
isOne())
15025 assert(LHS->getType() == RHS->getType() &&
"LHS and RHS types don't match");
15026 assert(LHS != RHS &&
"LHS and RHS are the same SCEV");
15039 return Op->LHS == LHS &&
Op->RHS == RHS;
15046 OS.
indent(
Depth) <<
"Equal predicate: " << *LHS <<
" == " << *RHS <<
"\n";
15048 OS.
indent(
Depth) <<
"Compare predicate: " << *LHS <<
" " << Pred <<
") "
15073 const SCEV *Start = AR->getStart();
15074 const SCEV *OpStart =
Op->AR->getStart();
15079 if (Start->getType()->isPointerTy() && Start->getType() != OpStart->
getType())
15082 const SCEV *Step = AR->getStepRecurrence(SE);
15083 const SCEV *OpStep =
Op->AR->getStepRecurrence(SE);
15136 if (Step->getValue()->getValue().isNonNegative())
15140 return ImpliedFlags;
15147 for (
const auto *
P : Preds)
15160 return this->implies(I, SE);
15168 for (
const auto *Pred : Preds)
15169 Pred->print(OS,
Depth);
15174 for (
const auto *Pred : Set->Preds)
15186 for (
auto *
P : Preds) {
15187 if (
N->implies(
P, SE))
15191 Preds = std::move(PrunedPreds);
15192 Preds.push_back(
N);
15199 Preds = std::make_unique<SCEVUnionPredicate>(
Empty, SE);
15204 for (
const auto *
Op :
Ops)
15209 SCEVUsers[
Op].insert(
User);
15213 const SCEV *Expr = SE.getSCEV(V);
15214 RewriteEntry &Entry = RewriteMap[Expr];
15217 if (Entry.second && Generation == Entry.first)
15218 return Entry.second;
15223 Expr = Entry.second;
15225 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds);
15226 Entry = {Generation, NewSCEV};
15232 if (!BackedgeCount) {
15234 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds);
15235 for (
const auto *
P : Preds)
15238 return BackedgeCount;
15242 if (!SymbolicMaxBackedgeCount) {
15244 SymbolicMaxBackedgeCount =
15245 SE.getPredicatedSymbolicMaxBackedgeTakenCount(&L, Preds);
15246 for (
const auto *
P : Preds)
15249 return SymbolicMaxBackedgeCount;
15253 if (!SmallConstantMaxTripCount) {
15255 SmallConstantMaxTripCount = SE.getSmallConstantMaxTripCount(&L, &Preds);
15256 for (
const auto *
P : Preds)
15259 return *SmallConstantMaxTripCount;
15263 if (Preds->implies(&Pred, SE))
15268 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds, SE);
15269 updateGeneration();
15276void PredicatedScalarEvolution::updateGeneration() {
15278 if (++Generation == 0) {
15279 for (
auto &
II : RewriteMap) {
15280 const SCEV *Rewritten =
II.second.second;
15297 auto II = FlagsMap.insert({V, Flags});
15310 auto II = FlagsMap.find(V);
15312 if (
II != FlagsMap.end())
15321 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
15326 for (
const auto *
P : NewPreds)
15329 RewriteMap[SE.getSCEV(V)] = {Generation, New};
15335 : RewriteMap(
Init.RewriteMap), SE(
Init.SE), L(
Init.L),
15338 Generation(
Init.Generation), BackedgeCount(
Init.BackedgeCount) {
15339 for (
auto I :
Init.FlagsMap)
15340 FlagsMap.insert(
I);
15345 for (
auto *BB : L.getBlocks())
15346 for (
auto &
I : *BB) {
15347 if (!SE.isSCEVable(
I.getType()))
15350 auto *Expr = SE.getSCEV(&
I);
15351 auto II = RewriteMap.find(Expr);
15353 if (
II == RewriteMap.end())
15357 if (
II->second.second == Expr)
15362 OS.
indent(
Depth + 2) <<
"--> " << *
II->second.second <<
"\n";
15371bool ScalarEvolution::matchURem(
const SCEV *Expr,
const SCEV *&LHS,
15372 const SCEV *&RHS) {
15381 LHS = Trunc->getOperand();
15387 if (LHS->getType() != Expr->
getType())
15394 if (
Add ==
nullptr ||
Add->getNumOperands() != 2)
15397 const SCEV *
A =
Add->getOperand(1);
15400 if (
Mul ==
nullptr)
15403 const auto MatchURemWithDivisor = [&](
const SCEV *
B) {
15415 return MatchURemWithDivisor(
Mul->getOperand(1)) ||
15416 MatchURemWithDivisor(
Mul->getOperand(2));
15419 if (
Mul->getNumOperands() == 2)
15420 return MatchURemWithDivisor(
Mul->getOperand(1)) ||
15421 MatchURemWithDivisor(
Mul->getOperand(0)) ||
15431 LoopGuards Guards(SE);
15435 collectFromBlock(SE, Guards, Header, Pred, VisitedBlocks);
15439void ScalarEvolution::LoopGuards::collectFromPHI(
15447 using MinMaxPattern = std::pair<const SCEVConstant *, SCEVTypes>;
15448 auto GetMinMaxConst = [&](
unsigned IncomingIdx) -> MinMaxPattern {
15454 collectFromBlock(SE,
G->second, Phi.getParent(),
InBlock, VisitedBlocks,
15456 auto &RewriteMap =
G->second.RewriteMap;
15457 if (RewriteMap.empty())
15459 auto S = RewriteMap.find(SE.
getSCEV(Phi.getIncomingValue(IncomingIdx)));
15460 if (S == RewriteMap.end())
15466 return {C0, SM->getSCEVType()};
15469 auto MergeMinMaxConst = [](MinMaxPattern
P1,
15470 MinMaxPattern P2) -> MinMaxPattern {
15471 auto [C1,
T1] =
P1;
15472 auto [C2, T2] = P2;
15473 if (!C1 || !C2 ||
T1 != T2)
15477 return {C1->getAPInt().
ult(C2->getAPInt()) ? C1 : C2,
T1};
15479 return {C1->getAPInt().
slt(C2->getAPInt()) ? C1 : C2,
T1};
15481 return {C1->getAPInt().
ugt(C2->getAPInt()) ? C1 : C2,
T1};
15483 return {C1->getAPInt().
sgt(C2->getAPInt()) ? C1 : C2,
T1};
15488 auto P = GetMinMaxConst(0);
15489 for (
unsigned int In = 1;
In <
Phi.getNumIncomingValues();
In++) {
15492 P = MergeMinMaxConst(
P, GetMinMaxConst(In));
15495 const SCEV *
LHS = SE.
getSCEV(
const_cast<PHINode *
>(&Phi));
15498 Guards.RewriteMap.insert({
LHS,
RHS});
15502void ScalarEvolution::LoopGuards::collectFromBlock(
15504 const BasicBlock *
Block,
const BasicBlock *Pred,
15505 SmallPtrSetImpl<const BasicBlock *> &VisitedBlocks,
unsigned Depth) {
15509 DenseMap<const SCEV *, const SCEV *>
15526 &ExprsToRewrite]() {
15527 const SCEVConstant *C1;
15540 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
15542 auto [
I,
Inserted] = RewriteMap.try_emplace(LHSUnknown);
15543 const SCEV *RewrittenLHS =
Inserted ? LHSUnknown :
I->second;
15551 if (MatchRangeCheckIdiom())
15557 auto IsMinMaxSCEVWithNonNegativeConstant =
15558 [&](
const SCEV *Expr,
SCEVTypes &SCTy,
const SCEV *&
LHS,
15559 const SCEV *&
RHS) {
15561 if (MinMax->getNumOperands() != 2)
15564 if (
C->getAPInt().isNegative())
15566 SCTy = MinMax->getSCEVType();
15567 LHS = MinMax->getOperand(0);
15568 RHS = MinMax->getOperand(1);
15577 auto GetNonNegExprAndPosDivisor = [&](
const SCEV *Expr,
const SCEV *Divisor,
15578 APInt &ExprVal, APInt &DivisorVal) {
15581 if (!ConstExpr || !ConstDivisor)
15583 ExprVal = ConstExpr->getAPInt();
15584 DivisorVal = ConstDivisor->getAPInt();
15585 return ExprVal.isNonNegative() && !DivisorVal.isNonPositive();
15591 auto GetNextSCEVDividesByDivisor = [&](
const SCEV *Expr,
15592 const SCEV *Divisor) {
15595 if (!GetNonNegExprAndPosDivisor(Expr, Divisor, ExprVal, DivisorVal))
15597 APInt Rem = ExprVal.
urem(DivisorVal);
15600 return SE.
getConstant(ExprVal + DivisorVal - Rem);
15607 auto GetPreviousSCEVDividesByDivisor = [&](
const SCEV *Expr,
15608 const SCEV *Divisor) {
15611 if (!GetNonNegExprAndPosDivisor(Expr, Divisor, ExprVal, DivisorVal))
15613 APInt Rem = ExprVal.
urem(DivisorVal);
15621 std::function<
const SCEV *(
const SCEV *,
const SCEV *)>
15622 ApplyDivisibiltyOnMinMaxExpr = [&](
const SCEV *MinMaxExpr,
15623 const SCEV *Divisor) {
15624 const SCEV *MinMaxLHS =
nullptr, *MinMaxRHS =
nullptr;
15626 if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
15632 "Expected non-negative operand!");
15633 auto *DivisibleExpr =
15634 IsMin ? GetPreviousSCEVDividesByDivisor(MinMaxLHS, Divisor)
15635 : GetNextSCEVDividesByDivisor(MinMaxLHS, Divisor);
15637 ApplyDivisibiltyOnMinMaxExpr(MinMaxRHS, Divisor), DivisibleExpr};
15646 const SCEV *URemLHS =
nullptr;
15647 const SCEV *URemRHS =
nullptr;
15648 if (SE.matchURem(
LHS, URemLHS, URemRHS)) {
15650 auto I = RewriteMap.find(LHSUnknown);
15651 const SCEV *RewrittenLHS =
15652 I != RewriteMap.end() ?
I->second : LHSUnknown;
15653 RewrittenLHS = ApplyDivisibiltyOnMinMaxExpr(RewrittenLHS, URemRHS);
15654 const auto *Multiple =
15656 RewriteMap[LHSUnknown] = Multiple;
15677 auto AddRewrite = [&](
const SCEV *From,
const SCEV *FromRewritten,
15679 if (From == FromRewritten)
15681 RewriteMap[From] = To;
15687 auto GetMaybeRewritten = [&](
const SCEV *S) {
15688 return RewriteMap.lookup_or(S, S);
15698 std::function<bool(
const SCEV *,
const SCEV *&)> HasDivisibiltyInfo =
15699 [&](
const SCEV *Expr,
const SCEV *&DividesBy) {
15701 if (
Mul->getNumOperands() != 2)
15703 auto *MulLHS =
Mul->getOperand(0);
15704 auto *MulRHS =
Mul->getOperand(1);
15708 if (Div->getOperand(1) == MulRHS) {
15709 DividesBy = MulRHS;
15714 return HasDivisibiltyInfo(MinMax->getOperand(0), DividesBy) ||
15715 HasDivisibiltyInfo(MinMax->getOperand(1), DividesBy);
15720 std::function<bool(
const SCEV *,
const SCEV *&)> IsKnownToDivideBy =
15721 [&](
const SCEV *Expr,
const SCEV *DividesBy) {
15725 return IsKnownToDivideBy(MinMax->getOperand(0), DividesBy) &&
15726 IsKnownToDivideBy(MinMax->getOperand(1), DividesBy);
15730 const SCEV *RewrittenLHS = GetMaybeRewritten(
LHS);
15731 const SCEV *DividesBy =
nullptr;
15732 if (HasDivisibiltyInfo(RewrittenLHS, DividesBy))
15735 IsKnownToDivideBy(RewrittenLHS, DividesBy) ? DividesBy :
nullptr;
15749 switch (Predicate) {
15757 RHS = DividesBy ? GetPreviousSCEVDividesByDivisor(
RHS, DividesBy) :
RHS;
15763 RHS = DividesBy ? GetNextSCEVDividesByDivisor(
RHS, DividesBy) :
RHS;
15767 RHS = DividesBy ? GetPreviousSCEVDividesByDivisor(
RHS, DividesBy) :
RHS;
15771 RHS = DividesBy ? GetNextSCEVDividesByDivisor(
RHS, DividesBy) :
RHS;
15778 SmallPtrSet<const SCEV *, 16> Visited;
15780 auto EnqueueOperands = [&Worklist](
const SCEVNAryExpr *S) {
15784 while (!Worklist.
empty()) {
15788 if (!Visited.
insert(From).second)
15790 const SCEV *FromRewritten = GetMaybeRewritten(From);
15791 const SCEV *To =
nullptr;
15793 switch (Predicate) {
15798 EnqueueOperands(
UMax);
15804 EnqueueOperands(
SMax);
15810 EnqueueOperands(
UMin);
15816 EnqueueOperands(
SMin);
15824 const SCEV *OneAlignedUp =
15825 DividesBy ? GetNextSCEVDividesByDivisor(One, DividesBy) : One;
15826 To = SE.
getUMaxExpr(FromRewritten, OneAlignedUp);
15834 AddRewrite(From, FromRewritten, To);
15851 SE.F.
getParent(), Intrinsic::experimental_guard);
15853 for (
const auto *GU : GuardDecl->users())
15855 if (Guard->getFunction() ==
Block->getParent() &&
15864 unsigned NumCollectedConditions = 0;
15866 std::pair<const BasicBlock *, const BasicBlock *> Pair(Pred,
Block);
15868 Pair = SE.getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
15869 VisitedBlocks.
insert(Pair.second);
15870 const BranchInst *LoopEntryPredicate =
15877 NumCollectedConditions++;
15881 if (
Depth > 0 && NumCollectedConditions == 2)
15889 if (Pair.second->hasNPredecessorsOrMore(2) &&
15891 SmallDenseMap<const BasicBlock *, LoopGuards> IncomingGuards;
15892 for (
auto &Phi : Pair.second->phis())
15893 collectFromPHI(SE, Guards, Phi, VisitedBlocks, IncomingGuards,
Depth);
15900 for (
auto [Term, EnterIfTrue] :
reverse(Terms)) {
15901 SmallVector<Value *, 8> Worklist;
15902 SmallPtrSet<Value *, 8> Visited;
15904 while (!Worklist.
empty()) {
15911 EnterIfTrue ?
Cmp->getPredicate() :
Cmp->getInversePredicate();
15914 CollectCondition(Predicate,
LHS,
RHS, Guards.RewriteMap);
15930 Guards.PreserveNUW =
true;
15931 Guards.PreserveNSW =
true;
15932 for (
const SCEV *Expr : ExprsToRewrite) {
15933 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
15934 Guards.PreserveNUW &=
15936 Guards.PreserveNSW &=
15943 if (ExprsToRewrite.size() > 1) {
15944 for (
const SCEV *Expr : ExprsToRewrite) {
15945 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
15946 Guards.RewriteMap.erase(Expr);
15947 Guards.RewriteMap.insert({Expr, Guards.
rewrite(RewriteTo)});
15956 class SCEVLoopGuardRewriter
15966 if (Guards.PreserveNUW)
15968 if (Guards.PreserveNSW)
15975 return Map.lookup_or(Expr, Expr);
15979 if (
const SCEV *S = Map.lookup(Expr))
15986 unsigned Bitwidth = Ty->getScalarSizeInBits() / 2;
15987 while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
15988 Bitwidth >
Op->getType()->getScalarSizeInBits()) {
15990 auto *NarrowExt = SE.getZeroExtendExpr(
Op, NarrowTy);
15991 if (
const SCEV *S = Map.lookup(NarrowExt))
15992 return SE.getZeroExtendExpr(S, Ty);
15993 Bitwidth = Bitwidth / 2;
16001 if (
const SCEV *S = Map.lookup(Expr))
16008 if (
const SCEV *S = Map.lookup(Expr))
16014 if (
const SCEV *S = Map.lookup(Expr))
16026 if (
const SCEV *S = Map.lookup(
16028 return SE.getAddExpr(Expr->
getOperand(0), S);
16062 if (RewriteMap.empty())
16065 SCEVLoopGuardRewriter
Rewriter(SE, *
this);
16066 return Rewriter.visit(Expr);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool isSigned(unsigned int Opcode)
This file defines a hash set that can be used to remove duplication of nodes in a graph.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
mir Rename Register Operands
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
SI optimize exec mask operations pre RA
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file provides utility classes that use RAII to save and restore values.
bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, SCEVTypes RootKind)
static cl::opt< unsigned > MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, cl::desc("Max coefficients in AddRec during evolving"), cl::init(8))
static cl::opt< unsigned > RangeIterThreshold("scev-range-iter-threshold", cl::Hidden, cl::desc("Threshold for switching to iteratively computing SCEV ranges"), cl::init(32))
static const Loop * isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI)
static unsigned getConstantTripCount(const SCEVConstant *ExitCount)
static int CompareValueComplexity(const LoopInfo *const LI, Value *LV, Value *RV, unsigned Depth)
Compare the two values LV and RV in terms of their "complexity" where "complexity" is a partial (and ...
static void PushLoopPHIs(const Loop *L, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push PHI nodes in the header of the given loop onto the given Worklist.
static void insertFoldCacheEntry(const ScalarEvolution::FoldID &ID, const SCEV *S, DenseMap< ScalarEvolution::FoldID, const SCEV * > &FoldCache, DenseMap< const SCEV *, SmallVector< ScalarEvolution::FoldID, 2 > > &FoldCacheUser)
static cl::opt< bool > ClassifyExpressions("scalar-evolution-classify-expressions", cl::Hidden, cl::init(true), cl::desc("When printing analysis, include information on every instruction"))
static bool CanConstantFold(const Instruction *I)
Return true if we can constant fold an instruction of the specified type, assuming that all operands ...
static cl::opt< unsigned > AddOpsInlineThreshold("scev-addops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining addition operands into a SCEV"), cl::init(500))
static cl::opt< unsigned > MaxLoopGuardCollectionDepth("scalar-evolution-max-loop-guard-collection-depth", cl::Hidden, cl::desc("Maximum depth for recursive loop guard collection"), cl::init(1))
static cl::opt< bool > VerifyIR("scev-verify-ir", cl::Hidden, cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), cl::init(false))
static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, Value *&C, Value *&LHS, Value *&RHS)
static const SCEV * getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static std::optional< APInt > MinOptional(std::optional< APInt > X, std::optional< APInt > Y)
Helper function to compare optional APInts: (a) if X and Y both exist, return min(X,...
static cl::opt< unsigned > MulOpsInlineThreshold("scev-mulops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining multiplication operands into a SCEV"), cl::init(32))
static void GroupByComplexity(SmallVectorImpl< const SCEV * > &Ops, LoopInfo *LI, DominatorTree &DT)
Given a list of SCEV objects, order them by their complexity, and group objects of the same complexit...
static const SCEV * constantFoldAndGroupOps(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, SmallVectorImpl< const SCEV * > &Ops, FoldT Fold, IsIdentityT IsIdentity, IsAbsorberT IsAbsorber)
Performs a number of common optimizations on the passed Ops.
static std::optional< const SCEV * > createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, const SCEV *TrueExpr, const SCEV *FalseExpr)
static Constant * BuildConstantFromSCEV(const SCEV *V)
This builds up a Constant using the ConstantExpr interface.
static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE)
static const SCEV * BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy)
Compute BC(It, K). The result has width W. Assume, K > 0.
static cl::opt< unsigned > MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), cl::init(8))
static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, const SCEV *Candidate)
Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
static PHINode * getConstantEvolvingPHI(Value *V, const Loop *L)
getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node in the loop that V is deri...
static cl::opt< unsigned > MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100))
static bool MatchBinarySub(const SCEV *S, const SCEV *&LHS, const SCEV *&RHS)
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static void PrintSCEVWithTypeHint(raw_ostream &OS, const SCEV *S)
When printing a top-level SCEV for trip counts, it's helpful to include a type for constants which ar...
static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L)
static bool containsConstantInAddMulChain(const SCEV *StartExpr)
Determine if any of the operands in this SCEV are a constant or if any of the add or multiply express...
static const SCEV * getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static bool hasHugeExpression(ArrayRef< const SCEV * > Ops)
Returns true if Ops contains a huge SCEV (the subtree of S contains at least HugeExprThreshold nodes)...
static cl::opt< unsigned > MaxPhiSCCAnalysisSize("scalar-evolution-max-scc-analysis-depth", cl::Hidden, cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " "Phi strongly connected components"), cl::init(8))
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static cl::opt< unsigned > MaxSCEVOperationsImplicationDepth("scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV operations implication analysis"), cl::init(2))
static void PushDefUseChildren(Instruction *I, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push users of the given Instruction onto the given Worklist.
static std::optional< APInt > SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, const ConstantRange &Range, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n iterations.
static cl::opt< bool > UseContextForNoWrapFlagInference("scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, cl::desc("Infer nuw/nsw flags using context where suitable"), cl::init(true))
static cl::opt< bool > EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, cl::desc("Handle <= and >= in finite loops"), cl::init(true))
static std::optional< std::tuple< APInt, APInt, APInt, APInt, unsigned > > GetQuadraticEquation(const SCEVAddRecExpr *AddRec)
For a given quadratic addrec, generate coefficients of the corresponding quadratic equation,...
static bool isKnownPredicateExtendIdiom(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static std::optional< BinaryOp > MatchBinaryOp(Value *V, const DataLayout &DL, AssumptionCache &AC, const DominatorTree &DT, const Instruction *CxtI)
Try to map V into a BinaryOp, and return std::nullopt on failure.
static std::optional< APInt > SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n iterations.
static std::optional< APInt > TruncIfPossible(std::optional< APInt > X, unsigned BitWidth)
Helper function to truncate an optional APInt to a given BitWidth.
static cl::opt< unsigned > MaxSCEVCompareDepth("scalar-evolution-max-scev-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV complexity comparisons"), cl::init(32))
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const SCEVConstant *ConstantTerm, const SCEVAddExpr *WholeAddExpr)
static cl::opt< unsigned > MaxConstantEvolvingDepth("scalar-evolution-max-constant-evolving-depth", cl::Hidden, cl::desc("Maximum depth of recursive constant evolving"), cl::init(32))
static ConstantRange getRangeForAffineARHelper(APInt Step, const ConstantRange &StartRange, const APInt &MaxBECount, bool Signed)
static std::optional< ConstantRange > GetRangeFromMetadata(Value *V)
Helper method to assign a range to V from metadata present in the IR.
static const SCEV * SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, SmallVectorImpl< const SCEVPredicate * > *Predicates, ScalarEvolution &SE)
Finds the minimum unsigned root of the following equation:
static cl::opt< unsigned > HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, cl::desc("Size of the expression which is considered huge"), cl::init(4096))
static Type * isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, bool &Signed, ScalarEvolution &SE)
Helper function to createAddRecFromPHIWithCasts.
static Constant * EvaluateExpression(Value *V, const Loop *L, DenseMap< Instruction *, Constant * > &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI)
EvaluateExpression - Given an expression that passes the getConstantEvolvingPHI predicate,...
static const SCEV * MatchNotExpr(const SCEV *Expr)
If Expr computes ~A, return A else return nullptr.
static cl::opt< unsigned > MaxValueCompareDepth("scalar-evolution-max-value-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive value complexity comparisons"), cl::init(2))
static cl::opt< bool, true > VerifySCEVOpt("verify-scev", cl::Hidden, cl::location(VerifySCEV), cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"))
static const SCEV * getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, const ArrayRef< const SCEV * > Ops, SCEV::NoWrapFlags Flags)
static cl::opt< unsigned > MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, cl::desc("Maximum depth of recursive arithmetics"), cl::init(32))
static bool HasSameValue(const SCEV *A, const SCEV *B)
SCEV structural equivalence is usually sufficient for testing whether two expressions are equal,...
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow)
Compute the result of "n choose k", the binomial coefficient.
static std::optional< int > CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, DominatorTree &DT, unsigned Depth=0)
static bool CollectAddOperandsWithScales(SmallDenseMap< const SCEV *, APInt, 16 > &M, SmallVectorImpl< const SCEV * > &NewOps, APInt &AccumulatedConstant, ArrayRef< const SCEV * > Ops, const APInt &Scale, ScalarEvolution &SE)
Process the given Ops list, which is a list of operands to be added under the given scale,...
static bool canConstantEvolve(Instruction *I, const Loop *L)
Determine whether this instruction can constant evolve within this loop assuming its operands can all...
static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap< Instruction *, PHINode * > &PHIMap, unsigned Depth)
getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by recursing through each instructi...
static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind)
static cl::opt< bool > VerifySCEVStrict("verify-scev-strict", cl::Hidden, cl::desc("Enable stricter verification with -verify-scev is passed"))
static Constant * getOtherIncomingValue(PHINode *PN, BasicBlock *BB)
static cl::opt< bool > UseExpensiveRangeSharpening("scalar-evolution-use-expensive-range-sharpening", cl::Hidden, cl::init(false), cl::desc("Use more powerful methods of sharpening expression ranges. May " "be costly in terms of compile time"))
static const SCEV * getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Is LHS Pred RHS true on the virtue of LHS or RHS being a Min or Max expression?
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool InBlock(const Value *V, const BasicBlock *BB)
Provides some synthesis utilities to produce sequences of values.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp PredALHS ARHS" is true.
Virtual Register Rewriter
static const uint32_t IV[8]
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned countTrailingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned logBase2() const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt multiplicativeInverse() const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptions()
Access the list of assumption handles currently tracked for this function.
LLVM_ABI bool isSingleEdge() const
Check if this is the only edge between Start and End.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Conditional or Unconditional Branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
This class represents a function call, abstracting a target machine's calling convention.
virtual void deleted()
Callback for Value destruction.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
const APInt & getLower() const
Return the lower value for this range.
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI bool isSignWrappedSet() const
Return true if this set wraps around the signed domain.
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
LLVM_ABI void print(raw_ostream &OS) const
Print out the bounds to a stream.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
static LLVM_ABI ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
LLVM_ABI unsigned getMinSignedBits() const
Compute the maximal number of bits needed to represent every value in this signed range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
LLVM_ABI ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FoldingSetNodeIDRef - This class describes a reference to an interned FoldingSetNodeID,...
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
static GEPNoWrapFlags none()
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
Module * getParent()
Get the module that this global value is contained inside of...
static bool isPrivateLinkage(LinkageTypes Linkage)
static bool isInternalLinkage(LinkageTypes Linkage)
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
Predicate getNonStrictCmpPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
BlockT * getLoopPredecessor() const
If the given loop's header has exactly one unique predecessor outside the loop, return it.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
unsigned getLoopDepth(const BlockT *BB) const
Return the loop nesting level of the specified block.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V, bool HasCoroSuspendInst=false) const
Return true if the specified value is loop invariant.
A Module instance is used to store all the information related to an LLVM module.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
PointerIntPair - This class implements a pair of a pointer and small integer.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
LLVM_ABI bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const
Check if AR1 and AR2 are equal, while taking into account Equal predicates in Preds.
LLVM_ABI PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L)
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
constexpr bool isValid() const
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
friend class ScalarEvolution
const SCEV * getStart() const
LLVM_ABI const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a recurrence without clearing any previously set flags.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
bool isQuadratic() const
Return true if this represents an expression A + B*x + C*x^2 where A, B and C are loop invariant valu...
LLVM_ABI const SCEV * getNumIterationsInRange(const ConstantRange &Range, ScalarEvolution &SE) const
Return the number of iterations of this loop that produce values in the specified constant range.
LLVM_ABI const SCEVAddRecExpr * getPostIncExpr(ScalarEvolution &SE) const
Return an expression representing the value of this expression one iteration of the loop ahead.
const Loop * getLoop() const
This is the base class for unary cast operator classes.
const SCEV * getOperand() const
LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty)
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a non-recurrence without clearing previously set flags.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
SCEVComparePredicate(const FoldingSetNodeIDRef ID, const ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Implementation of the SCEVPredicate interface.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
This is the base class for unary integral cast operator classes.
LLVM_ABI SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty)
This node is the base class min/max selections.
static enum SCEVTypes negate(enum SCEVTypes T)
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
bool hasNoSelfWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
const SCEV * getOperand(unsigned i) const
const SCEV *const * Operands
ArrayRef< const SCEV * > operands() const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
SCEVPredicate(const SCEVPredicate &)=default
virtual bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const =0
Returns true if this predicate implies N.
This class represents a cast from a pointer to a pointer-sized integer value.
This visitor recursively visits a SCEV expression and re-writes it.
const SCEV * visitSignExtendExpr(const SCEVSignExtendExpr *Expr)
const SCEV * visit(const SCEV *S)
const SCEV * visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr)
const SCEV * visitSMinExpr(const SCEVSMinExpr *Expr)
const SCEV * visitUMinExpr(const SCEVUMinExpr *Expr)
This class represents a signed minimum selection.
This node is the base class for sequential/in-order min/max selections.
static SCEVTypes getEquivalentNonSequentialSCEVType(SCEVTypes Ty)
This class represents a sign extension of a small integer value to a larger integer value.
Visit all nodes in the expression tree using worklist traversal.
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
const SCEV * getLHS() const
const SCEV * getRHS() const
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
void print(raw_ostream &OS, unsigned Depth) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
SCEVUnionPredicate(ArrayRef< const SCEVPredicate * > Preds, ScalarEvolution &SE)
Union predicates don't get cached so create a dummy set ID for it.
bool isAlwaysTrue() const override
Implementation of the SCEVPredicate interface.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
IncrementWrapFlags
Similar to SCEV::NoWrapFlags, but with slightly different semantics for FlagNUSW.
SCEVWrapPredicate(const FoldingSetNodeIDRef ID, const SCEVAddRecExpr *AR, IncrementWrapFlags Flags)
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
static SCEVWrapPredicate::IncrementWrapFlags setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OnFlags)
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
static SCEVWrapPredicate::IncrementWrapFlags clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OffFlags)
Convenient IncrementWrapFlags manipulation methods.
static SCEVWrapPredicate::IncrementWrapFlags getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Returns the set of SCEVWrapPredicate no wrap flags implied by a SCEVAddRecExpr.
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
LLVM_ABI ArrayRef< const SCEV * > operands() const
Return operands of this SCEV expression.
unsigned short getExpressionSize() const
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI void dump() const
This method is used for debugging.
LLVM_ABI bool isAllOnesValue() const
Return true if the expression is a constant all-ones value.
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
LLVM_ABI void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
SCEV(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, unsigned short ExpressionSize)
SCEVTypes getSCEVType() const
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
Analysis pass that exposes the ScalarEvolution for a function.
LLVM_ABI ScalarEvolution run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *=nullptr) const override
print - Print out the internal state of the pass.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
ScalarEvolutionWrapperPass()
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
LLVM_ABI const SCEV * rewrite(const SCEV *Expr) const
Try to apply the collected loop guards to Expr.
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
static bool hasFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags TestFlags)
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI bool isKnownOnEveryIteration(CmpPredicate Pred, const SCEVAddRecExpr *LHS, const SCEV *RHS)
Test if the condition described by Pred, LHS, RHS is known to be true on every iteration of the loop ...
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterationsImpl(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
LLVM_ABI const SCEV * getSMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getUDivCeilSCEV(const SCEV *N, const SCEV *D)
Compute ceil(N / D).
LLVM_ABI const SCEV * getGEPExpr(GEPOperator *GEP, const SmallVectorImpl< const SCEV * > &IndexExprs)
Returns an expression for a GEP.
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterations(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L at given Context duri...
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI APInt getConstantMultiple(const SCEV *S)
Returns the max constant multiple of S.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the loop is protected by a conditional between LHS and RHS.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
Return a SCEV expression for the specified value at the specified scope in the program.
LLVM_ABI const SCEV * getSMinExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags)
Update no-wrap flags of an AddRec.
LLVM_ABI const SCEV * getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
Promote the operands to the wider of the types using zero-extension, and then perform a umax operatio...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit, bool AllowPredicates=false)
Compute the number of times the backedge of the specified loop will execute if its exit condition wer...
LLVM_ABI const SCEV * getZeroExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool loopHasNoAbnormalExits(const Loop *L)
Return true if the loop has no abnormal exits.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
LLVM_ABI ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI)
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty)
LLVM_ABI const SCEV * getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< const SCEV * > &Operands)
LLVM_ABI const SCEV * getLosslessPtrToIntExpr(const SCEV *Op, unsigned Depth=0)
LLVM_ABI std::optional< bool > evaluatePredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Check whether the condition described by Pred, LHS, and RHS is true or false in the given Context.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getConstantMaxBackedgeTakenCo...
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
APInt getUnsignedRangeMin(const SCEV *S)
Determine the min of the unsigned range for a particular SCEV.
LLVM_ABI bool SimplifyICmpOperands(CmpPredicate &Pred, const SCEV *&LHS, const SCEV *&RHS, unsigned Depth=0)
Simplify LHS and RHS in a comparison with predicate Pred.
LLVM_ABI const SCEV * getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo)
Return an expression for offsetof on the given field with type IntTy.
LLVM_ABI LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L)
Return the "disposition" of the given SCEV with respect to the given loop.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getSignExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
LLVM_ABI bool hasOperand(const SCEV *S, const SCEV *Op) const
Test whether the given SCEV has Op as a direct or indirect operand.
LLVM_ABI const SCEV * getUDivExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEVPredicate * getComparePredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getNotSCEV(const SCEV *V)
Return the SCEV object corresponding to ~V.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B)
Return true if there exists a point in the program at which both A and B could be operands to the sam...
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI uint32_t getMinTrailingZeros(const SCEV *S)
Determine the minimum number of zero bits that S is guaranteed to end in (at every loop iteration).
LLVM_ABI void print(raw_ostream &OS) const
LLVM_ABI const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
LLVM_ABI const SCEV * getPredicatedExitCount(const Loop *L, const BasicBlock *ExitingBlock, SmallVectorImpl< const SCEVPredicate * > *Predicates, ExitCountKind Kind=Exact)
Same as above except this uses the predicated backedge taken info and may require predicates.
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
LLVM_ABI void forgetTopmostLoop(const Loop *L)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
@ MonotonicallyDecreasing
@ MonotonicallyIncreasing
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI=nullptr)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L, return a LoopInvaria...
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEVPredicate * getWrapPredicate(const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags)
LLVM_ABI bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether the backedge of the loop is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI APInt getNonZeroConstantMultiple(const SCEV *S)
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags)
LLVM_ABI bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
LLVM_ABI BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB)
Return the "disposition" of the given SCEV with respect to the given block.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const SCEV * getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
Promote the operands to the wider of the types using zero-extension, and then perform a umin operatio...
LLVM_ABI bool loopIsFiniteByAssumption(const Loop *L)
Return true if this loop is finite by assumption.
LLVM_ABI const SCEV * getExistingSCEV(Value *V)
Return an existing SCEV for V if there is one, otherwise return nullptr.
LoopDisposition
An enum describing the relationship between a SCEV and a loop.
@ LoopComputable
The SCEV varies predictably with the loop.
@ LoopVariant
The SCEV is loop-variant (unknown).
@ LoopInvariant
The SCEV is loop-invariant.
friend class SCEVCallbackVH
LLVM_ABI bool isKnownMultipleOf(const SCEV *S, uint64_t M, SmallVectorImpl< const SCEVPredicate * > &Assumptions)
Check that S is a multiple of M.
LLVM_ABI const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero=false, bool OrNegative=false)
Test if the given expression is known to be a power of 2.
LLVM_ABI std::optional< SCEV::NoWrapFlags > getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO)
Parse NSW/NUW flags from add/sub/mul IR binary operation Op into SCEV no-wrap flags,...
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI bool containsUndefs(const SCEV *S) const
Return true if the SCEV expression contains an undef value.
LLVM_ABI std::optional< MonotonicPredicateType > getMonotonicPredicateType(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred)
If, for all loop invariant X, the predicate "LHS `Pred` X" is monotonically increasing or decreasing,...
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L)
Determine if the SCEV can be evaluated at loop's entry.
BlockDisposition
An enum describing the relationship between a SCEV and a basic block.
@ DominatesBlock
The SCEV dominates the block.
@ ProperlyDominatesBlock
The SCEV properly dominates the block.
@ DoesNotDominateBlock
The SCEV does not dominate the block.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI void getPoisonGeneratingValues(SmallPtrSetImpl< const Value * > &Result, const SCEV *S)
Return the set of Values that, if poison, will definitively result in S being poison as well.
LLVM_ABI void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
LLVM_ABI const SCEV * getVScale(Type *Ty)
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
LLVM_ABI bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI const SCEV * getMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< const SCEV * > &Operands)
LLVM_ABI void forgetAllLoops()
LLVM_ABI bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
ExitCountKind
The terms "backedge taken count" and "exit count" are used interchangeably to refer to the number of ...
@ SymbolicMaximum
An expression which provides an upper bound on the exact trip count.
@ ConstantMaximum
A constant which provides an upper bound on the exact trip count.
@ Exact
An expression exactly describing the number of times the backedge has executed when a loop is exited.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEVAddRecExpr * convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Preds)
Tries to convert the S expression to an AddRec expression, adding additional predicates to Preds as r...
LLVM_ABI const SCEV * getElementSize(Instruction *Inst)
Return the size of an element read or written by Inst.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< bool > evaluatePredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Check whether the condition described by Pred, LHS, and RHS is true or false.
LLVM_ABI const SCEV * getUnknown(Value *V)
LLVM_ABI std::optional< std::pair< const SCEV *, SmallVector< const SCEVPredicate *, 3 > > > createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI)
Checks if SymbolicPHI can be rewritten as an AddRecExpr under some Predicates.
LLVM_ABI const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
LLVM_ABI const SCEV * rewriteUsingPredicate(const SCEV *S, const Loop *L, const SCEVPredicate &A)
Re-writes the SCEV according to the Predicates in A.
LLVM_ABI std::pair< const SCEV *, const SCEV * > SplitIntoInitAndPostInc(const Loop *L, const SCEV *S)
Splits SCEV expression S into two SCEVs.
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI ~ScalarEvolution()
LLVM_ABI const SCEV * getUDivExactExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI void registerUser(const SCEV *User, ArrayRef< const SCEV * > Ops)
Notify this ScalarEvolution that User directly uses SCEVs in Ops.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the basic block is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsErasedValue(const SCEV *S) const
Return true if the SCEV expression contains a Value that has been optimised out and is now a nullptr.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI bool isKnownViaInduction(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
We'd like to check the predicate on every iteration of the most dominated loop between loops used in ...
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
APInt getSignedRangeMax(const SCEV *S)
Determine the max of the signed range for a particular SCEV.
LLVM_ABI void verify() const
LLVMContext & getContext() const
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
TypeSize getSizeInBits() const
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
unsigned getValueID() const
Return an ID for the concrete type of this object.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
LLVM_ABI std::optional< APInt > SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, unsigned RangeWidth)
Let q(n) = An^2 + Bn + C, and BW = bit width of the value range (e.g.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
int getMinValue(MCInstrInfo const &MCII, MCInst const &MCI)
Return the minimum value of an extendable operand.
@ BasicBlock
Various leaf nodes.
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
SCEVUnaryExpr_match< SCEVZeroExtendExpr, Op0_t > m_scev_ZExt(const Op0_t &Op0)
class_match< const SCEVConstant > m_SCEVConstant()
cst_pred_ty< is_one > m_scev_One()
Match an integer 1.
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
SCEVUnaryExpr_match< SCEVSignExtendExpr, Op0_t > m_scev_SExt(const Op0_t &Op0)
cst_pred_ty< is_zero > m_scev_Zero()
Match an integer 0.
bool match(const SCEV *S, const Pattern &P)
SCEVBinaryExpr_match< SCEVUDivExpr, Op0_t, Op1_t > m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
class_match< const Loop > m_Loop()
bind_ty< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
bind_ty< const SCEVUnknown > m_SCEVUnknown(const SCEVUnknown *&V)
class_match< const SCEV > m_SCEV()
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
NodeAddr< PhiNode * > Phi
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt gcd(const DynamicAPInt &A, const DynamicAPInt &B)
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
SaveAndRestore(T &) -> SaveAndRestore< T >
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
InterleavedRange< Range > interleaved(const Range &R, StringRef Separator=", ", StringRef Prefix="", StringRef Suffix="")
Output range R as a sequence of interleaved elements.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
auto successors(const MachineBasicBlock *BB)
constexpr from_range_t from_range
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
bool set_is_subset(const S1Ty &S1, const S2Ty &S2)
set_is_subset(A, B) - Return true iff A in B
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
unsigned short computeExpressionSize(ArrayRef< const SCEV * > Args)
auto uninitialized_copy(R &&Src, IterTy Dst)
bool isa_and_nonnull(const Y &Val)
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
DomTreeNodeBase< BasicBlock > DomTreeNode
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
auto reverse(ContainerTy &&C)
LLVM_ABI bool isMustProgress(const Loop *L)
Return true if this loop can be assumed to make progress.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
FunctionAddr VTableAddr Count
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
iterator_range< df_iterator< T > > depth_first(const T &G)
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A special type used by analysis passes to provide an address that identifies that particular analysis...
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
An object of this class is returned by queries that could not be answered.
LLVM_ABI SCEVCouldNotCompute()
static LLVM_ABI bool classof(const SCEV *S)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class defines a simple visitor class that may be used for various SCEV analysis purposes.
A utility class that uses RAII to save and restore the value of a variable.
Information about the number of loop iterations for which a loop exit's branch condition evaluates to...
LLVM_ABI ExitLimit(const SCEV *E)
Construct either an exact exit limit from a constant, or an unknown one from a SCEVCouldNotCompute.
const SCEV * ExactNotTaken
const SCEV * SymbolicMaxNotTaken
SmallVector< const SCEVPredicate *, 4 > Predicates
A vector of predicate guards for this ExitLimit.
const SCEV * ConstantMaxNotTaken