52#define DEBUG_TYPE "constraint-elimination"
54STATISTIC(NumCondsRemoved,
"Number of instructions removed");
56 "Controls which conditions are eliminated");
60 cl::desc(
"Maximum number of rows to keep in constraint system"));
64 cl::desc(
"Dump IR to reproduce successful transformations."));
72 UserI = Phi->getIncomingBlock(U)->getTerminator();
85 : Pred(Pred), Op0(Op0), Op1(Op1) {}
117 FactOrCheck(EntryTy Ty,
DomTreeNode *DTN, Instruction *Inst)
118 : Inst(Inst), NumIn(DTN->getDFSNumIn()), NumOut(DTN->getDFSNumOut()),
122 :
U(
U), NumIn(DTN->getDFSNumIn()), NumOut(DTN->getDFSNumOut()),
123 Ty(EntryTy::UseCheck) {}
127 :
Cond(Pred, Op0, Op1), DoesHold(Precond), NumIn(DTN->getDFSNumIn()),
128 NumOut(DTN->getDFSNumOut()), Ty(EntryTy::ConditionFact) {}
130 static FactOrCheck getConditionFact(
DomTreeNode *DTN, CmpPredicate Pred,
133 return FactOrCheck(DTN, Pred, Op0, Op1, Precond);
136 static FactOrCheck getInstFact(
DomTreeNode *DTN, Instruction *Inst) {
137 return FactOrCheck(EntryTy::InstFact, DTN, Inst);
140 static FactOrCheck getCheck(
DomTreeNode *DTN, Use *U) {
141 return FactOrCheck(DTN, U);
144 static FactOrCheck getCheck(
DomTreeNode *DTN, CallInst *CI) {
145 return FactOrCheck(EntryTy::InstCheck, DTN, CI);
148 bool isCheck()
const {
149 return Ty == EntryTy::InstCheck || Ty == EntryTy::UseCheck;
153 assert(!isConditionFact());
154 if (Ty == EntryTy::UseCheck)
161 if (Ty == EntryTy::InstCheck)
167 bool isConditionFact()
const {
return Ty == EntryTy::ConditionFact; }
175 TargetLibraryInfo &TLI;
178 State(DominatorTree &DT, LoopInfo &LI, ScalarEvolution &SE,
179 TargetLibraryInfo &TLI)
180 : DT(DT), LI(LI), SE(SE), TLI(TLI) {}
183 void addInfoFor(BasicBlock &BB);
187 void addInfoForInductions(BasicBlock &BB);
191 bool canAddSuccessor(BasicBlock &BB, BasicBlock *Succ)
const {
192 return DT.dominates(BasicBlockEdge(&BB, Succ), Succ);
201 bool IsSigned =
false;
204 SmallVector<Value *, 2> ValuesToRelease;
206 StackEntry(
unsigned NumIn,
unsigned NumOut,
bool IsSigned,
207 SmallVector<Value *, 2> ValuesToRelease)
208 : NumIn(NumIn), NumOut(NumOut), IsSigned(IsSigned),
209 ValuesToRelease(std::
move(ValuesToRelease)) {}
214 SmallVector<ConditionTy, 2> Preconditions;
218 bool IsSigned =
false;
220 ConstraintTy() =
default;
224 : Coefficients(std::
move(Coefficients)), IsSigned(IsSigned), IsEq(IsEq),
227 unsigned size()
const {
return Coefficients.size(); }
229 unsigned empty()
const {
return Coefficients.empty(); }
235 bool isEq()
const {
return IsEq; }
237 bool isNe()
const {
return IsNe; }
244 std::optional<bool> isImpliedBy(
const ConstraintSystem &CS)
const;
257class ConstraintInfo {
259 ConstraintSystem UnsignedCS;
260 ConstraintSystem SignedCS;
262 const DataLayout &DL;
266 : UnsignedCS(FunctionArgs), SignedCS(FunctionArgs), DL(DL) {
267 auto &Value2Index = getValue2Index(
false);
269 for (
Value *Arg : FunctionArgs) {
271 false,
false,
false);
272 VarPos.Coefficients[Value2Index[Arg]] = -1;
273 UnsignedCS.addVariableRow(VarPos.Coefficients);
277 DenseMap<Value *, unsigned> &getValue2Index(
bool Signed) {
278 return Signed ? SignedCS.getValue2Index() : UnsignedCS.getValue2Index();
280 const DenseMap<Value *, unsigned> &getValue2Index(
bool Signed)
const {
281 return Signed ? SignedCS.getValue2Index() : UnsignedCS.getValue2Index();
284 ConstraintSystem &getCS(
bool Signed) {
285 return Signed ? SignedCS : UnsignedCS;
287 const ConstraintSystem &getCS(
bool Signed)
const {
288 return Signed ? SignedCS : UnsignedCS;
291 void popLastConstraint(
bool Signed) { getCS(
Signed).popLastConstraint(); }
292 void popLastNVariables(
bool Signed,
unsigned N) {
293 getCS(
Signed).popLastNVariables(
N);
299 unsigned NumOut, SmallVectorImpl<StackEntry> &DFSInStack);
306 SmallVectorImpl<Value *> &NewVariables,
307 bool ForceSignedSystem =
false)
const;
322 unsigned NumIn,
unsigned NumOut,
323 SmallVectorImpl<StackEntry> &DFSInStack);
330 unsigned NumOut, SmallVectorImpl<StackEntry> &DFSInStack,
331 bool ForceSignedSystem);
339 bool IsKnownNonNegative;
341 DecompEntry(int64_t Coefficient,
Value *Variable,
342 bool IsKnownNonNegative =
false)
343 : Coefficient(Coefficient), Variable(Variable),
344 IsKnownNonNegative(IsKnownNonNegative) {}
348struct Decomposition {
352 Decomposition(int64_t Offset) : Offset(Offset) {}
353 Decomposition(
Value *V,
bool IsKnownNonNegative =
false) {
354 Vars.emplace_back(1, V, IsKnownNonNegative);
357 : Offset(Offset), Vars(Vars) {}
361 [[nodiscard]]
bool add(int64_t OtherOffset) {
367 [[nodiscard]]
bool add(
const Decomposition &
Other) {
376 [[nodiscard]]
bool sub(
const Decomposition &
Other) {
377 Decomposition Tmp =
Other;
388 [[nodiscard]]
bool mul(int64_t Factor) {
391 for (
auto &Var : Vars)
392 if (
MulOverflow(Var.Coefficient, Factor, Var.Coefficient))
401 APInt ConstantOffset;
402 SmallMapVector<Value *, APInt, 4> VariableOffsets;
405 OffsetResult() :
BasePtr(nullptr), ConstantOffset(0, uint64_t(0)) {}
407 OffsetResult(GEPOperator &
GEP,
const DataLayout &
DL)
409 ConstantOffset = APInt(
DL.getIndexTypeSizeInBits(
BasePtr->getType()), 0);
419 unsigned BitWidth = Result.ConstantOffset.getBitWidth();
421 Result.ConstantOffset))
429 bool CanCollectInner = InnerGEP->collectOffset(
430 DL,
BitWidth, VariableOffsets2, ConstantOffset2);
432 if (!CanCollectInner || Result.VariableOffsets.size() > 1 ||
433 VariableOffsets2.
size() > 1 ||
434 (Result.VariableOffsets.size() >= 1 && VariableOffsets2.
size() >= 1)) {
438 Result.BasePtr = InnerGEP->getPointerOperand();
439 Result.ConstantOffset += ConstantOffset2;
440 if (Result.VariableOffsets.size() == 0 && VariableOffsets2.
size() == 1)
441 Result.VariableOffsets = VariableOffsets2;
442 Result.NW &= InnerGEP->getNoWrapFlags();
461 if (
DL.getIndexTypeSizeInBits(
GEP.getPointerOperand()->getType()) > 64)
464 assert(!IsSigned &&
"The logic below only supports decomposition for "
465 "unsigned predicates at the moment.");
466 const auto &[BasePtr, ConstantOffset, VariableOffsets, NW] =
473 Decomposition Result(ConstantOffset.getSExtValue(), DecompEntry(1, BasePtr));
474 for (
auto [Index, Scale] : VariableOffsets) {
475 auto IdxResult =
decompose(Index, Preconditions, IsSigned,
DL);
476 if (IdxResult.mul(Scale.getSExtValue()))
478 if (Result.add(IdxResult))
481 if (!NW.hasNoUnsignedWrap()) {
483 assert(NW.hasNoUnsignedSignedWrap() &&
"Must have nusw flag");
486 ConstantInt::get(Index->getType(), 0));
499 auto MergeResults = [&Preconditions, IsSigned,
501 bool IsSignedB) -> std::optional<Decomposition> {
510 if (Ty->isPointerTy() && !IsSigned) {
522 if (!Ty->isIntegerTy() || Ty->getIntegerBitWidth() > 64)
525 bool IsKnownNonNegative =
false;
531 return CI->getSExtValue();
540 IsKnownNonNegative =
true;
547 if (
auto Decomp = MergeResults(Op0, Op1, IsSigned))
549 return {V, IsKnownNonNegative};
553 auto ResA =
decompose(Op0, Preconditions, IsSigned,
DL);
554 auto ResB =
decompose(Op1, Preconditions, IsSigned,
DL);
557 return {V, IsKnownNonNegative};
562 auto Result =
decompose(Op0, Preconditions, IsSigned,
DL);
565 return {V, IsKnownNonNegative};
572 if (Shift < Ty->getIntegerBitWidth() - 1) {
573 assert(Shift < 64 &&
"Would overflow");
574 auto Result =
decompose(Op0, Preconditions, IsSigned,
DL);
575 if (!Result.mul(int64_t(1) << Shift))
577 return {V, IsKnownNonNegative};
581 return {V, IsKnownNonNegative};
587 return int64_t(CI->getZExtValue());
592 IsKnownNonNegative =
true;
597 ConstantInt::get(Op0->
getType(), 0));
599 if (Trunc->getSrcTy()->getScalarSizeInBits() <= 64) {
600 if (Trunc->hasNoUnsignedWrap() || Trunc->hasNoSignedWrap()) {
601 V = Trunc->getOperand(0);
602 if (!Trunc->hasNoUnsignedWrap())
604 ConstantInt::get(V->getType(), 0));
612 if (
auto Decomp = MergeResults(Op0, Op1, IsSigned))
614 return {V, IsKnownNonNegative};
620 ConstantInt::get(Op0->
getType(), 0));
623 ConstantInt::get(Op1->
getType(), 0));
625 if (
auto Decomp = MergeResults(Op0, Op1, IsSigned))
627 return {V, IsKnownNonNegative};
635 if (
auto Decomp = MergeResults(Op0, CI,
true))
637 return {V, IsKnownNonNegative};
642 if (
auto Decomp = MergeResults(Op0, CI, IsSigned))
644 return {V, IsKnownNonNegative};
649 return {V, IsKnownNonNegative};
650 auto Result =
decompose(Op1, Preconditions, IsSigned,
DL);
653 return {V, IsKnownNonNegative};
658 auto Result =
decompose(Op1, Preconditions, IsSigned,
DL);
661 return {V, IsKnownNonNegative};
665 auto ResA =
decompose(Op0, Preconditions, IsSigned,
DL);
666 auto ResB =
decompose(Op1, Preconditions, IsSigned,
DL);
669 return {V, IsKnownNonNegative};
672 return {V, IsKnownNonNegative};
678 bool ForceSignedSystem)
const {
679 assert(NewVariables.
empty() &&
"NewVariables must be empty when passed in");
681 "signed system can only be forced on eq/ne");
723 auto &Value2Index = getValue2Index(IsSigned);
725 Preconditions, IsSigned,
DL);
727 Preconditions, IsSigned,
DL);
728 int64_t Offset1 = ADec.Offset;
729 int64_t Offset2 = BDec.Offset;
732 auto &VariablesA = ADec.Vars;
733 auto &VariablesB = BDec.Vars;
737 SmallDenseMap<Value *, unsigned> NewIndexMap;
738 auto GetOrAddIndex = [&Value2Index, &NewVariables,
739 &NewIndexMap](
Value *
V) ->
unsigned {
740 auto V2I = Value2Index.find(V);
741 if (V2I != Value2Index.end())
744 NewIndexMap.
insert({
V, Value2Index.size() + NewVariables.size() + 1});
746 NewVariables.push_back(V);
747 return Insert.first->second;
752 GetOrAddIndex(KV.Variable);
758 IsSigned, IsEq, IsNe);
761 SmallDenseMap<Value *, bool> KnownNonNegativeVariables;
762 auto &
R = Res.Coefficients;
763 for (
const auto &KV : VariablesA) {
764 R[GetOrAddIndex(KV.Variable)] += KV.Coefficient;
766 KnownNonNegativeVariables.
insert({KV.Variable, KV.IsKnownNonNegative});
767 I.first->second &= KV.IsKnownNonNegative;
770 for (
const auto &KV : VariablesB) {
771 auto &Coeff =
R[GetOrAddIndex(KV.Variable)];
775 KnownNonNegativeVariables.
insert({KV.Variable, KV.IsKnownNonNegative});
776 I.first->second &= KV.IsKnownNonNegative;
783 if (
AddOverflow(OffsetSum, int64_t(-1), OffsetSum))
786 Res.Preconditions = std::move(Preconditions);
790 while (!NewVariables.empty()) {
791 int64_t
Last =
R.back();
795 Value *RemovedV = NewVariables.pop_back_val();
796 NewIndexMap.
erase(RemovedV);
800 for (
auto &KV : KnownNonNegativeVariables) {
802 (!Value2Index.contains(KV.first) && !NewIndexMap.
contains(KV.first)))
804 auto &
C = Res.ExtraInfo.emplace_back(
805 Value2Index.size() + NewVariables.size() + 1, 0);
806 C[GetOrAddIndex(KV.first)] = -1;
819 auto &Value2Index = getValue2Index(
false);
834 ConstraintTy
R = getConstraint(Pred, Op0, Op1, NewVariables);
835 if (!NewVariables.
empty())
840bool ConstraintTy::isValid(
const ConstraintInfo &
Info)
const {
841 return Coefficients.
size() > 0 &&
843 return Info.doesHold(
C.Pred,
C.Op0,
C.Op1);
848ConstraintTy::isImpliedBy(
const ConstraintSystem &CS)
const {
853 bool IsNegatedOrEqualImplied =
859 if (IsConditionImplied && IsNegatedOrEqualImplied)
866 bool IsStrictLessThanImplied =
873 if (IsNegatedImplied || IsStrictLessThanImplied)
879 if (IsConditionImplied)
884 if (IsNegatedImplied)
893 auto R = getConstraintForSolving(Pred,
A,
B);
894 return R.isValid(*
this) &&
895 getCS(
R.IsSigned).isConditionImplied(
R.Coefficients);
898void ConstraintInfo::transferToOtherSystem(
900 unsigned NumOut, SmallVectorImpl<StackEntry> &DFSInStack) {
901 auto IsKnownNonNegative = [
this](
Value *
V) {
907 if (!
A->getType()->isIntegerTy())
918 if (IsKnownNonNegative(
B)) {
928 if (IsKnownNonNegative(
A)) {
936 if (IsKnownNonNegative(
A))
943 if (IsKnownNonNegative(
B))
949 if (IsKnownNonNegative(
B))
965void State::addInfoForInductions(BasicBlock &BB) {
967 if (!L ||
L->getHeader() != &BB)
996 if (!
L->contains(InLoopSucc) || !
L->isLoopExiting(&BB) || InLoopSucc == &BB)
1001 if (!AR || AR->getLoop() != L || !LoopPred)
1004 const SCEV *StartSCEV = AR->getStart();
1005 Value *StartValue =
nullptr;
1007 StartValue =
C->getValue();
1010 assert(SE.
getSCEV(StartValue) == StartSCEV &&
"inconsistent start value");
1016 bool MonotonicallyIncreasingUnsigned =
1018 bool MonotonicallyIncreasingSigned =
1022 if (MonotonicallyIncreasingUnsigned)
1025 if (MonotonicallyIncreasingSigned)
1031 StepOffset =
C->getAPInt();
1036 if (!
L->isLoopInvariant(
B))
1042 if (!(-StepOffset).isOne())
1048 WorkList.
push_back(FactOrCheck::getConditionFact(
1051 WorkList.
push_back(FactOrCheck::getConditionFact(
1056 WorkList.
push_back(FactOrCheck::getConditionFact(
1059 WorkList.
push_back(FactOrCheck::getConditionFact(
1071 if (!StepOffset.
isOne()) {
1082 if (!MonotonicallyIncreasingUnsigned)
1083 WorkList.
push_back(FactOrCheck::getConditionFact(
1086 if (!MonotonicallyIncreasingSigned)
1087 WorkList.
push_back(FactOrCheck::getConditionFact(
1091 WorkList.
push_back(FactOrCheck::getConditionFact(
1094 WorkList.
push_back(FactOrCheck::getConditionFact(
1103 "unsupported predicate");
1106 L->getExitBlocks(ExitBBs);
1107 for (BasicBlock *EB : ExitBBs) {
1122 if (!
Offset.NW.hasNoUnsignedWrap())
1125 if (
Offset.VariableOffsets.size() != 1)
1129 auto &[Index, Scale] =
Offset.VariableOffsets.front();
1131 if (Index->getType()->getScalarSizeInBits() !=
BitWidth)
1140 std::optional<TypeSize>
Size =
1155 B = ConstantInt::get(Index->getType(), MaxIndex);
1159void State::addInfoFor(BasicBlock &BB) {
1160 addInfoForInductions(BB);
1164 bool GuaranteedToExecute =
true;
1166 for (Instruction &
I : BB) {
1168 for (Use &U :
Cmp->uses()) {
1170 auto *DTN = DT.
getNode(UserI->getParent());
1173 WorkList.
push_back(FactOrCheck::getCheck(DTN, &U));
1178 auto AddFactFromMemoryAccess = [&](
Value *
Ptr,
Type *AccessType) {
1182 TypeSize AccessSize =
DL.getTypeStoreSize(AccessType);
1185 if (GuaranteedToExecute) {
1189 Pred,
A,
B,
DL, TLI)) {
1197 FactOrCheck::getInstFact(DT.
getNode(
I.getParent()), &
I));
1202 if (!LI->isVolatile())
1203 AddFactFromMemoryAccess(LI->getPointerOperand(), LI->getAccessType());
1206 if (!
SI->isVolatile())
1207 AddFactFromMemoryAccess(
SI->getPointerOperand(),
SI->getAccessType());
1213 case Intrinsic::assume: {
1218 if (GuaranteedToExecute) {
1225 FactOrCheck::getInstFact(DT.
getNode(
I.getParent()), &
I));
1230 case Intrinsic::ssub_with_overflow:
1231 case Intrinsic::ucmp:
1232 case Intrinsic::scmp:
1237 case Intrinsic::umin:
1238 case Intrinsic::umax:
1239 case Intrinsic::smin:
1240 case Intrinsic::smax:
1245 case Intrinsic::uadd_sat:
1246 case Intrinsic::usub_sat:
1252 case Intrinsic::abs:
1261 for (
auto &Case :
Switch->cases()) {
1263 Value *
V = Case.getCaseValue();
1264 if (!canAddSuccessor(BB, Succ))
1273 if (!Br || !Br->isConditional())
1293 SmallPtrSet<Value *, 8> SeenCond;
1294 auto QueueValue = [&CondWorkList, &SeenCond](
Value *
V) {
1295 if (SeenCond.
insert(V).second)
1300 while (!CondWorkList.
empty()) {
1305 IsOr ?
Cmp->getInverseCmpPredicate() :
Cmp->getCmpPredicate(),
1306 Cmp->getOperand(0),
Cmp->getOperand(1)));
1327 if (canAddSuccessor(BB, Br->getSuccessor(0)))
1329 DT.
getNode(Br->getSuccessor(0)), CmpI->getCmpPredicate(),
1330 CmpI->getOperand(0), CmpI->getOperand(1)));
1331 if (canAddSuccessor(BB, Br->getSuccessor(1)))
1333 DT.
getNode(Br->getSuccessor(1)), CmpI->getInverseCmpPredicate(),
1334 CmpI->getOperand(0), CmpI->getOperand(1)));
1340 OS <<
"icmp " << Pred <<
' ';
1341 LHS->printAsOperand(OS,
true);
1343 RHS->printAsOperand(OS,
false);
1352struct ReproducerEntry {
1353 ICmpInst::Predicate Pred;
1388 auto &Value2Index =
Info.getValue2Index(IsSigned);
1390 while (!WorkList.
empty()) {
1392 if (!Seen.
insert(V).second)
1394 if (Old2New.
find(V) != Old2New.
end())
1400 if (Value2Index.contains(V) || !
I ||
1411 for (
auto &Entry : Stack)
1417 for (
auto *
P : Args)
1423 Cond->getModule()->getName() +
1424 Cond->getFunction()->getName() +
"repro",
1427 for (
unsigned I = 0;
I < Args.size(); ++
I) {
1429 Old2New[Args[
I]] =
F->getArg(
I);
1434 Builder.CreateRet(Builder.getTrue());
1435 Builder.SetInsertPoint(Entry->getTerminator());
1444 auto &Value2Index =
Info.getValue2Index(IsSigned);
1445 while (!WorkList.
empty()) {
1447 if (Old2New.
find(V) != Old2New.
end())
1451 if (!Value2Index.contains(V) &&
I) {
1452 Old2New[V] =
nullptr;
1462 Old2New[
I] = Cloned;
1463 Old2New[
I]->setName(
I->getName());
1475 for (
auto &Entry : Stack) {
1484 auto *Cmp = Builder.CreateICmp(Entry.Pred, Entry.LHS, Entry.RHS);
1485 Builder.CreateAssumption(Cmp);
1491 Entry->getTerminator()->setOperand(0,
Cond);
1499 ConstraintInfo &
Info) {
1502 auto R =
Info.getConstraintForSolving(Pred,
A,
B);
1503 if (R.empty() || !R.isValid(
Info)) {
1505 return std::nullopt;
1508 auto &CSToUse =
Info.getCS(R.IsSigned);
1513 for (
auto &Row : R.ExtraInfo)
1514 CSToUse.addVariableRow(Row);
1516 for (
unsigned I = 0;
I < R.ExtraInfo.size(); ++
I)
1517 CSToUse.popLastConstraint();
1520 if (
auto ImpliedCondition = R.isImpliedBy(CSToUse)) {
1522 return std::nullopt;
1525 dbgs() <<
"Condition ";
1529 dbgs() <<
" implied by dominating constraints\n";
1532 return ImpliedCondition;
1535 return std::nullopt;
1539 ICmpInst *Cmp, ConstraintInfo &
Info,
unsigned NumIn,
unsigned NumOut,
1543 auto ReplaceCmpWithConstant = [&](
CmpInst *Cmp,
bool IsTrue) {
1548 Cmp->replaceUsesWithIf(ConstantC, [&DT, NumIn, NumOut, ContextInst,
1551 auto *DTN = DT.
getNode(UserI->getParent());
1554 if (UserI->getParent() == ContextInst->
getParent() &&
1555 UserI->comesBefore(ContextInst))
1561 bool ShouldReplace = !
II ||
II->getIntrinsicID() != Intrinsic::assume;
1563 return ShouldReplace;
1572 for (
auto *DVR : DVRUsers) {
1573 auto *DTN = DT.
getNode(DVR->getParent());
1577 auto *MarkedI = DVR->getInstruction();
1578 if (MarkedI->getParent() == ContextInst->
getParent() &&
1579 MarkedI->comesBefore(ContextInst))
1582 DVR->replaceVariableLocationOp(Cmp, ConstantC);
1585 if (Cmp->use_empty())
1591 if (
auto ImpliedCondition =
1593 Cmp->getOperand(1), Cmp,
Info))
1594 return ReplaceCmpWithConstant(Cmp, *ImpliedCondition);
1598 if (Cmp->hasSameSign() && Cmp->isUnsigned())
1599 if (
auto ImpliedCondition =
1601 Cmp->getOperand(1), Cmp,
Info))
1602 return ReplaceCmpWithConstant(Cmp, *ImpliedCondition);
1611 MinMax->replaceAllUsesWith(
MinMax->getOperand(UseLHS ? 0 : 1));
1620 return ReplaceMinMaxWithOperand(
MinMax, *ImpliedCondition);
1623 return ReplaceMinMaxWithOperand(
MinMax, !*ImpliedCondition);
1632 I->replaceAllUsesWith(ConstantInt::get(
I->getType(), 1));
1642 I->replaceAllUsesWith(ConstantInt::get(
I->getType(), 0));
1651 Module *ReproducerModule,
1654 Info.popLastConstraint(
E.IsSigned);
1656 auto &Mapping =
Info.getValue2Index(
E.IsSigned);
1657 for (
Value *V :
E.ValuesToRelease)
1659 Info.popLastNVariables(
E.IsSigned,
E.ValuesToRelease.size());
1661 if (ReproducerModule)
1668 FactOrCheck &CB, ConstraintInfo &
Info,
Module *ReproducerModule,
1677 unsigned OtherOpIdx = JoinOp->
getOperand(0) == CmpToCheck ? 1 : 0;
1685 unsigned OldSize = DFSInStack.
size();
1688 while (OldSize < DFSInStack.
size()) {
1689 StackEntry
E = DFSInStack.
back();
1697 while (!Worklist.empty()) {
1698 Value *Val = Worklist.pop_back_val();
1706 Info.addFact(Pred,
LHS,
RHS, CB.NumIn, CB.NumOut, DFSInStack);
1711 Worklist.push_back(
LHS);
1712 Worklist.push_back(
RHS);
1715 if (OldSize == DFSInStack.
size())
1719 if (
auto ImpliedCondition =
1722 if (IsOr == *ImpliedCondition)
1735 unsigned NumIn,
unsigned NumOut,
1736 SmallVectorImpl<StackEntry> &DFSInStack) {
1737 addFactImpl(Pred,
A,
B, NumIn, NumOut, DFSInStack,
false);
1740 addFactImpl(Pred,
A,
B, NumIn, NumOut, DFSInStack,
true);
1744 unsigned NumIn,
unsigned NumOut,
1745 SmallVectorImpl<StackEntry> &DFSInStack,
1746 bool ForceSignedSystem) {
1750 auto R = getConstraint(Pred,
A,
B, NewVariables, ForceSignedSystem);
1753 if (!
R.isValid(*
this) ||
R.isNe())
1758 auto &CSToUse = getCS(
R.IsSigned);
1759 if (
R.Coefficients.empty())
1762 bool Added = CSToUse.addVariableRowFill(
R.Coefficients);
1768 SmallVector<Value *, 2> ValuesToRelease;
1769 auto &Value2Index = getValue2Index(
R.IsSigned);
1770 for (
Value *V : NewVariables) {
1771 Value2Index.insert({
V, Value2Index.size() + 1});
1776 dbgs() <<
" constraint: ";
1782 std::move(ValuesToRelease));
1785 for (
Value *V : NewVariables) {
1787 false,
false,
false);
1788 VarPos.Coefficients[Value2Index[
V]] = -1;
1789 CSToUse.addVariableRow(VarPos.Coefficients);
1791 SmallVector<Value *, 2>());
1797 for (
auto &Coeff :
R.Coefficients)
1799 CSToUse.addVariableRowFill(
R.Coefficients);
1802 SmallVector<Value *, 2>());
1814 Sub = Builder.CreateSub(
A,
B);
1815 U->replaceAllUsesWith(
Sub);
1818 U->replaceAllUsesWith(Builder.getFalse());
1823 if (U->use_empty()) {
1831 if (
II->use_empty()) {
1832 II->eraseFromParent();
1842 ConstraintInfo &
Info) {
1843 auto R =
Info.getConstraintForSolving(Pred,
A,
B);
1844 if (R.size() < 2 || !R.isValid(
Info))
1847 auto &CSToUse =
Info.getCS(R.IsSigned);
1848 return CSToUse.isConditionImplied(R.Coefficients);
1852 if (
II->getIntrinsicID() == Intrinsic::ssub_with_overflow) {
1859 ConstantInt::get(
A->getType(), 0),
Info))
1873 ConstraintInfo
Info(
F.getDataLayout(), FunctionArgs);
1874 State S(DT, LI, SE, TLI);
1875 std::unique_ptr<Module> ReproducerModule(
1894 stable_sort(S.WorkList, [](
const FactOrCheck &
A,
const FactOrCheck &
B) {
1895 auto HasNoConstOp = [](const FactOrCheck &B) {
1896 Value *V0 = B.isConditionFact() ? B.Cond.Op0 : B.Inst->getOperand(0);
1897 Value *V1 = B.isConditionFact() ? B.Cond.Op1 : B.Inst->getOperand(1);
1898 return !isa<ConstantInt>(V0) && !isa<ConstantInt>(V1);
1902 if (
A.NumIn ==
B.NumIn) {
1903 if (A.isConditionFact() && B.isConditionFact()) {
1904 bool NoConstOpA = HasNoConstOp(A);
1905 bool NoConstOpB = HasNoConstOp(B);
1906 return NoConstOpA < NoConstOpB;
1908 if (
A.isConditionFact())
1910 if (
B.isConditionFact())
1912 auto *InstA =
A.getContextInst();
1913 auto *InstB =
B.getContextInst();
1914 return InstA->comesBefore(InstB);
1916 return A.NumIn <
B.NumIn;
1924 for (FactOrCheck &CB : S.WorkList) {
1927 while (!DFSInStack.
empty()) {
1928 auto &
E = DFSInStack.
back();
1931 LLVM_DEBUG(
dbgs() <<
"CB: " << CB.NumIn <<
" " << CB.NumOut <<
"\n");
1933 if (CB.NumOut <=
E.NumOut)
1936 dbgs() <<
"Removing ";
1938 Info.getValue2Index(
E.IsSigned));
1948 Instruction *Inst = CB.getInstructionToSimplify();
1951 LLVM_DEBUG(
dbgs() <<
"Processing condition to simplify: " << *Inst
1957 Cmp,
Info, CB.NumIn, CB.NumOut, CB.getContextInst(),
1958 ReproducerModule.get(), ReproducerCondStack, S.DT,
ToRemove);
1962 CB,
Info, ReproducerModule.get(), ReproducerCondStack, DFSInStack,
1974 auto AddFact = [&](CmpPredicate Pred,
Value *
A,
Value *
B) {
1980 <<
"Skip adding constraint because system has too many rows.\n");
1984 Info.addFact(Pred,
A,
B, CB.NumIn, CB.NumOut, DFSInStack);
1985 if (ReproducerModule && DFSInStack.
size() > ReproducerCondStack.
size())
1994 CB.NumIn, CB.NumOut, DFSInStack);
1996 Info.transferToOtherSystem(Pred,
A,
B, CB.NumIn, CB.NumOut,
2000 if (ReproducerModule && DFSInStack.
size() > ReproducerCondStack.
size()) {
2003 for (
unsigned I = 0,
2004 E = (DFSInStack.
size() - ReproducerCondStack.
size());
2006 ReproducerCondStack.
emplace_back(ICmpInst::BAD_ICMP_PREDICATE,
2013 if (!CB.isConditionFact()) {
2019 ConstantInt::get(CB.Inst->getType(), 0));
2025 Pred = ICmpInst::getNonStrictPredicate(MinMax->getPredicate());
2026 AddFact(Pred, MinMax, MinMax->getLHS());
2027 AddFact(Pred, MinMax, MinMax->getRHS());
2031 switch (USatI->getIntrinsicID()) {
2034 case Intrinsic::uadd_sat:
2035 AddFact(ICmpInst::ICMP_UGE, USatI, USatI->getLHS());
2036 AddFact(ICmpInst::ICMP_UGE, USatI, USatI->getRHS());
2038 case Intrinsic::usub_sat:
2039 AddFact(ICmpInst::ICMP_ULE, USatI, USatI->getLHS());
2045 auto &
DL =
F.getDataLayout();
2046 auto AddFactsAboutIndices = [&](
Value *
Ptr,
Type *AccessType) {
2051 DL.getTypeStoreSize(AccessType).getFixedValue(), Pred,
A,
B,
DL,
2053 AddFact(Pred,
A,
B);
2057 AddFactsAboutIndices(LI->getPointerOperand(), LI->getAccessType());
2061 AddFactsAboutIndices(
SI->getPointerOperand(),
SI->getAccessType());
2066 Value *
A =
nullptr, *
B =
nullptr;
2067 if (CB.isConditionFact()) {
2068 Pred = CB.Cond.Pred;
2072 !
Info.doesHold(CB.DoesHold.Pred, CB.DoesHold.Op0, CB.DoesHold.Op1)) {
2074 dbgs() <<
"Not adding fact ";
2076 dbgs() <<
" because precondition ";
2079 dbgs() <<
" does not hold.\n";
2087 assert(Matched &&
"Must have an assume intrinsic with a icmp operand");
2089 AddFact(Pred,
A,
B);
2092 if (ReproducerModule && !ReproducerModule->functions().empty()) {
2094 raw_string_ostream StringS(S);
2095 ReproducerModule->print(StringS,
nullptr);
2096 OptimizationRemark Rem(
DEBUG_TYPE,
"Reproducer", &
F);
2097 Rem <<
ore::NV(
"module") << S;
2102 unsigned SignedEntries =
2103 count_if(DFSInStack, [](
const StackEntry &
E) {
return E.IsSigned; });
2104 assert(
Info.getCS(
false).size() - FunctionArgs.size() ==
2105 DFSInStack.
size() - SignedEntries &&
2106 "updates to CS and DFSInStack are out of sync");
2107 assert(
Info.getCS(
true).size() == SignedEntries &&
2108 "updates to CS and DFSInStack are out of sync");
2112 I->eraseFromParent();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
std::pair< ICmpInst *, unsigned > ConditionTy
static int64_t MaxConstraintValue
static int64_t MinSignedConstraintValue
static Instruction * getContextInstForUse(Use &U)
static Decomposition decomposeGEP(GEPOperator &GEP, SmallVectorImpl< ConditionTy > &Preconditions, bool IsSigned, const DataLayout &DL)
static bool canUseSExt(ConstantInt *CI)
static void dumpConstraint(ArrayRef< int64_t > C, const DenseMap< Value *, unsigned > &Value2Index)
static void removeEntryFromStack(const StackEntry &E, ConstraintInfo &Info, Module *ReproducerModule, SmallVectorImpl< ReproducerEntry > &ReproducerCondStack, SmallVectorImpl< StackEntry > &DFSInStack)
static std::optional< bool > checkCondition(CmpInst::Predicate Pred, Value *A, Value *B, Instruction *CheckInst, ConstraintInfo &Info)
static cl::opt< unsigned > MaxRows("constraint-elimination-max-rows", cl::init(500), cl::Hidden, cl::desc("Maximum number of rows to keep in constraint system"))
static cl::opt< bool > DumpReproducers("constraint-elimination-dump-reproducers", cl::init(false), cl::Hidden, cl::desc("Dump IR to reproduce successful transformations."))
static bool checkOrAndOpImpliedByOther(FactOrCheck &CB, ConstraintInfo &Info, Module *ReproducerModule, SmallVectorImpl< ReproducerEntry > &ReproducerCondStack, SmallVectorImpl< StackEntry > &DFSInStack, SmallVectorImpl< Instruction * > &ToRemove)
Check if either the first condition of an AND or OR is implied by the (negated in case of OR) second ...
static bool eliminateConstraints(Function &F, DominatorTree &DT, LoopInfo &LI, ScalarEvolution &SE, OptimizationRemarkEmitter &ORE, TargetLibraryInfo &TLI)
static OffsetResult collectOffsets(GEPOperator &GEP, const DataLayout &DL)
static bool checkAndReplaceMinMax(MinMaxIntrinsic *MinMax, ConstraintInfo &Info, SmallVectorImpl< Instruction * > &ToRemove)
static void generateReproducer(CmpInst *Cond, Module *M, ArrayRef< ReproducerEntry > Stack, ConstraintInfo &Info, DominatorTree &DT)
Helper function to generate a reproducer function for simplifying Cond.
static bool checkAndReplaceCondition(ICmpInst *Cmp, ConstraintInfo &Info, unsigned NumIn, unsigned NumOut, Instruction *ContextInst, Module *ReproducerModule, ArrayRef< ReproducerEntry > ReproducerCondStack, DominatorTree &DT, SmallVectorImpl< Instruction * > &ToRemove)
static bool getConstraintFromMemoryAccess(GetElementPtrInst &GEP, uint64_t AccessSize, CmpPredicate &Pred, Value *&A, Value *&B, const DataLayout &DL, const TargetLibraryInfo &TLI)
static void dumpUnpackedICmp(raw_ostream &OS, ICmpInst::Predicate Pred, Value *LHS, Value *RHS)
static Decomposition decompose(Value *V, SmallVectorImpl< ConditionTy > &Preconditions, bool IsSigned, const DataLayout &DL)
static bool replaceSubOverflowUses(IntrinsicInst *II, Value *A, Value *B, SmallVectorImpl< Instruction * > &ToRemove)
static bool tryToSimplifyOverflowMath(IntrinsicInst *II, ConstraintInfo &Info, SmallVectorImpl< Instruction * > &ToRemove)
static bool checkAndReplaceCmp(CmpIntrinsic *I, ConstraintInfo &Info, SmallVectorImpl< Instruction * > &ToRemove)
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This is the interface for a simple mod/ref and alias analysis over globals.
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
Class for arbitrary precision integers.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
bool isNegative() const
Determine sign of this APInt.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
bool slt(const APInt &RHS) const
Signed less than comparison.
bool isOne() const
Determine if this is a value of 1.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
LLVM Basic Block Representation.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Represents analyses that only rely on functions' control flow.
This class is the base class for the comparison instructions.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
bool isEquality() const
Determine if this is an equals/not equals predicate.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
This class represents a ucmp/scmp intrinsic.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
bool hasSameSign() const
Query samesign information, for optimizations.
This is the shared class of boolean and integer constants.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &)
static SmallVector< int64_t, 8 > negate(SmallVector< int64_t, 8 > R)
LLVM_ABI bool isConditionImplied(SmallVector< int64_t, 8 > R) const
static SmallVector< int64_t, 8 > toStrictLessThan(SmallVector< int64_t, 8 > R)
Converts the given vector to form a strict less than inequality.
static SmallVector< int64_t, 8 > negateOrEqual(SmallVector< int64_t, 8 > R)
Multiplies each coefficient in the given vector by -1.
bool addVariableRowFill(ArrayRef< int64_t > R)
LLVM_ABI void dump() const
Print the constraints in the system.
A parsed version of the target data layout string in and methods for querying it.
static bool shouldExecute(unsigned CounterName)
bool erase(const KeyT &Val)
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
unsigned getDFSNumIn() const
getDFSNumIn/getDFSNumOut - These return the DFS visitation order for nodes in the dominator tree.
unsigned getDFSNumOut() const
Analysis pass which computes a DominatorTree.
void updateDFSNumbers() const
updateDFSNumbers - Assign In and Out numbers to the nodes while walking dominator tree in dfs order.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
static GEPNoWrapFlags none()
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
@ ExternalLinkage
Externally visible function.
This instruction compares its operands according to the predicate given to the constructor.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI void dropUnknownNonDebugMetadata(ArrayRef< unsigned > KnownIDs={})
Drop all unknown metadata except for debug locations.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Analysis pass that exposes the LoopInfo for a function.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
This class represents min/max intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI APInt getConstantMultiple(const SCEV *S)
Returns the max constant multiple of S.
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
@ MonotonicallyIncreasing
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI std::optional< MonotonicPredicateType > getMonotonicPredicateType(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred)
If, for all loop invariant X, the predicate "LHS `Pred` X" is monotonically increasing or decreasing,...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
iterator find(const KeyT &Val)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
LLVM_ABI const Value * stripPointerCastsSameRepresentation() const
Strip off pointer casts, all-zero GEPs and address space casts but ensures the representation of the ...
constexpr ScalarTy getFixedValue() const
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
auto m_LogicalOp()
Matches either L && R or L || R where L and R are arbitrary values.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
NoWrapTrunc_match< OpTy, TruncInst::NoSignedWrap > m_NSWTrunc(const OpTy &Op)
Matches trunc nsw.
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoSignedWrap > m_NSWMul(const LHS &L, const RHS &R)
initializer< Ty > init(const Ty &Val)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< UseNode * > Use
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
std::enable_if_t< std::is_signed_v< T >, T > MulOverflow(T X, T Y, T &Result)
Multiply two signed integers, computing the two's complement truncated result, returning true if an o...
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI std::optional< TypeSize > getBaseObjectSize(const Value *Ptr, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Like getObjectSize(), but only returns the size of base objects (like allocas, global variables and a...
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
DomTreeNodeBase< BasicBlock > DomTreeNode
auto dyn_cast_or_null(const Y &Val)
constexpr unsigned MaxAnalysisRecursionDepth
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
@ Sub
Subtraction of integers.
LLVM_ABI void remapInstructionsInBlocks(ArrayRef< BasicBlock * > Blocks, ValueToValueMapTy &VMap)
Remaps instructions in Blocks using the mapping in VMap.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
ValueMap< const Value *, WeakTrackingVH > ValueToValueMapTy
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
std::enable_if_t< std::is_signed_v< T >, T > AddOverflow(T X, T Y, T &Result)
Add two signed integers, computing the two's complement truncated result, returning true if overflow ...
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
std::enable_if_t< std::is_signed_v< T >, T > SubOverflow(T X, T Y, T &Result)
Subtract two signed integers, computing the two's complement truncated result, returning true if an o...
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
A MapVector that performs no allocations if smaller than a certain size.