53#define DEBUG_TYPE "instsimplify"
97 CmpInst *Cmp = dyn_cast<CmpInst>(V);
101 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
102 if (CPred == Pred && CLHS ==
LHS && CRHS ==
RHS)
115 unsigned MaxRecurse,
Constant *TrueOrFalse) {
117 if (SimplifiedCmp ==
Cond) {
125 return SimplifiedCmp;
131 unsigned MaxRecurse) {
139 unsigned MaxRecurse) {
149 unsigned MaxRecurse) {
186 if (
I->getParent()->isEntryBlock() && !isa<InvokeInst>(
I) &&
199 auto *
B = dyn_cast<BinaryOperator>(V);
200 if (!
B ||
B->getOpcode() != OpcodeToExpand)
202 Value *B0 =
B->getOperand(0), *B1 =
B->getOperand(1);
213 if ((L == B0 && R == B1) ||
234 unsigned MaxRecurse) {
251 unsigned MaxRecurse) {
354 unsigned MaxRecurse) {
360 if (isa<SelectInst>(
LHS)) {
361 SI = cast<SelectInst>(
LHS);
363 assert(isa<SelectInst>(
RHS) &&
"No select instruction operand!");
364 SI = cast<SelectInst>(
RHS);
391 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
397 if ((FV && !TV) || (TV && !FV)) {
400 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
401 if (Simplified && Simplified->getOpcode() ==
unsigned(Opcode) &&
402 !Simplified->hasPoisonGeneratingFlags()) {
406 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
407 Value *UnsimplifiedLHS = SI ==
LHS ? UnsimplifiedBranch :
LHS;
408 Value *UnsimplifiedRHS = SI ==
LHS ?
RHS : UnsimplifiedBranch;
409 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
410 Simplified->getOperand(1) == UnsimplifiedRHS)
412 if (Simplified->isCommutative() &&
413 Simplified->getOperand(1) == UnsimplifiedLHS &&
414 Simplified->getOperand(0) == UnsimplifiedRHS)
438 if (!isa<SelectInst>(
LHS)) {
442 assert(isa<SelectInst>(
LHS) &&
"Not comparing with a select instruction!");
445 Value *TV = SI->getTrueValue();
446 Value *FV = SI->getFalseValue();
478 unsigned MaxRecurse) {
484 if (isa<PHINode>(
LHS)) {
485 PI = cast<PHINode>(
LHS);
490 assert(isa<PHINode>(
RHS) &&
"No PHI instruction operand!");
491 PI = cast<PHINode>(
RHS);
498 Value *CommonValue =
nullptr;
511 if (!V || (CommonValue && V != CommonValue))
530 if (!isa<PHINode>(
LHS)) {
534 assert(isa<PHINode>(
LHS) &&
"Not comparing with a phi instruction!");
542 Value *CommonValue =
nullptr;
556 if (!V || (CommonValue && V != CommonValue))
567 if (
auto *CLHS = dyn_cast<Constant>(Op0)) {
568 if (
auto *CRHS = dyn_cast<Constant>(Op1)) {
572 case Instruction::FAdd:
573 case Instruction::FSub:
574 case Instruction::FMul:
575 case Instruction::FDiv:
576 case Instruction::FRem:
577 if (Q.
CxtI !=
nullptr)
598 if (isa<PoisonValue>(Op1))
661 return ::simplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query,
RecursionLimit);
674 bool AllowNonInbounds =
false) {
675 assert(V->getType()->isPtrOrPtrVectorTy());
678 V = V->stripAndAccumulateConstantOffsets(
DL,
Offset, AllowNonInbounds);
681 return Offset.sextOrTrunc(
DL.getIndexTypeSizeInBits(V->getType()));
701 if (
auto *VecTy = dyn_cast<VectorType>(
LHS->
getType()))
716 std::optional<bool> Imp =
721 case Instruction::Sub:
722 case Instruction::Xor:
723 case Instruction::URem:
724 case Instruction::SRem:
727 case Instruction::SDiv:
728 case Instruction::UDiv:
729 return ConstantInt::get(Ty, 1);
731 case Instruction::And:
732 case Instruction::Or:
751 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
787 Value *
X =
nullptr, *
Y =
nullptr, *Z = Op1;
845 if (
X->getType() ==
Y->getType())
890 return ::simplifySubInst(Op0, Op1, IsNSW, IsNUW, Q,
RecursionLimit);
901 if (isa<PoisonValue>(Op1))
925 return ConstantInt::getNullValue(Op0->
getType());
940 Instruction::Add, Q, MaxRecurse))
945 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
952 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
962 return ::simplifyMulInst(Op0, Op1, IsNSW, IsNUW, Q,
RecursionLimit);
971 Constant *
C = dyn_cast_or_null<Constant>(V);
972 return (
C &&
C->isAllOnesValue());
978 unsigned MaxRecurse,
bool IsSigned) {
995 Type *Ty =
X->getType();
1001 Constant *PosDividendC = ConstantInt::get(Ty,
C->abs());
1002 Constant *NegDividendC = ConstantInt::get(Ty, -
C->abs());
1011 if (
C->isMinSignedValue())
1017 Constant *PosDivisorC = ConstantInt::get(Ty,
C->abs());
1018 Constant *NegDivisorC = ConstantInt::get(Ty, -
C->abs());
1037 return isICmpTrue(ICmpInst::ICMP_ULT,
X,
Y, Q, MaxRecurse);
1044 unsigned MaxRecurse) {
1045 bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
1046 bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
1063 if (isa<PoisonValue>(Op0))
1103 auto *
Mul = cast<OverflowingBinaryOperator>(Op0);
1114 if (
isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1122 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1128 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1138 unsigned MaxRecurse) {
1161 (Opcode == Instruction::UDiv
1181 if ((Opcode == Instruction::SRem &&
1183 (Opcode == Instruction::URem &&
1191 if (Opcode == Instruction::SRem
1194 return C.srem(*C0).isZero();
1198 return C.urem(*C0).isZero();
1214 return simplifyDiv(Instruction::SDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1226 return simplifyDiv(Instruction::UDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1237 unsigned MaxRecurse) {
1242 return ConstantInt::getNullValue(Op0->
getType());
1246 return ConstantInt::getNullValue(Op0->
getType());
1248 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1258 unsigned MaxRecurse) {
1259 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1268 Constant *
C = dyn_cast<Constant>(Amount);
1278 const APInt *AmountC;
1284 if (isa<ConstantVector>(
C) || isa<ConstantDataVector>(
C)) {
1285 for (
unsigned I = 0,
1286 E = cast<FixedVectorType>(
C->getType())->getNumElements();
1300 unsigned MaxRecurse) {
1305 if (isa<PoisonValue>(Op0))
1326 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1332 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1350 assert(Opcode == Instruction::Shl &&
"Expected shl for nsw instruction");
1369 Value *Op1,
bool IsExact,
1388 if (Op0Known.
One[0])
1400 simplifyShift(Instruction::Shl, Op0, Op1, IsNSW, Q, MaxRecurse))
1424 if (IsNSW && IsNUW &&
1433 return ::simplifyShlInst(Op0, Op1, IsNSW, IsNUW, Q,
RecursionLimit);
1455 const APInt *ShRAmt, *ShLAmt;
1458 *ShRAmt == *ShLAmt) {
1461 if (ShRAmt->
uge(EffWidthY))
1509 ICmpInst *UnsignedICmp,
bool IsAnd,
1523 if (
match(UnsignedICmp,
1525 ICmpInst::isUnsigned(UnsignedPred)) {
1527 if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1528 UnsignedPred == ICmpInst::ICMP_ULE) &&
1529 EqPred == ICmpInst::ICMP_NE && !IsAnd)
1532 if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1533 UnsignedPred == ICmpInst::ICMP_UGT) &&
1534 EqPred == ICmpInst::ICMP_EQ && IsAnd)
1539 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1540 UnsignedPred == ICmpInst::ICMP_UGT))
1541 return IsAnd ? UnsignedICmp : ZeroICmp;
1545 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1546 UnsignedPred == ICmpInst::ICMP_UGE))
1547 return IsAnd ? ZeroICmp : UnsignedICmp;
1553 if (
match(UnsignedICmp,
1555 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1557 return UnsignedICmp;
1558 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1560 return UnsignedICmp;
1565 ICmpInst::isUnsigned(UnsignedPred))
1567 else if (
match(UnsignedICmp,
1569 ICmpInst::isUnsigned(UnsignedPred))
1570 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1576 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1578 return IsAnd ? ZeroICmp : UnsignedICmp;
1582 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1584 return IsAnd ? UnsignedICmp : ZeroICmp;
1593 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1594 return IsAnd ? UnsignedICmp : ZeroICmp;
1598 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1599 return IsAnd ? ZeroICmp : UnsignedICmp;
1602 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1607 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1623 const APInt *C0, *C1;
1633 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1638 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1646 if (Range0.contains(Range1))
1647 return IsAnd ? Cmp1 : Cmp0;
1648 if (Range1.contains(Range0))
1649 return IsAnd ? Cmp0 : Cmp1;
1658 const APInt *C0, *C1;
1666 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->
getOperand(0));
1667 if (AddInst->getOperand(1) != Op1->
getOperand(1))
1674 const APInt Delta = *C1 - *C0;
1677 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1679 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1683 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1685 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1691 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1694 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1713 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_NE)
1716 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_EQ)
1749 const APInt *C0, *C1;
1757 auto *AddInst = cast<BinaryOperator>(Op0->
getOperand(0));
1758 if (AddInst->getOperand(1) != Op1->
getOperand(1))
1765 const APInt Delta = *C1 - *C0;
1768 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1770 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1774 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1776 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1782 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1785 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1817 Value *LHS0 =
LHS->getOperand(0), *LHS1 =
LHS->getOperand(1);
1818 Value *RHS0 =
RHS->getOperand(0), *RHS1 =
RHS->getOperand(1);
1824 if ((PredL == FCmpInst::FCMP_ORD || PredL == FCmpInst::FCMP_UNO) &&
1825 ((FCmpInst::isOrdered(PredR) && IsAnd) ||
1826 (FCmpInst::isUnordered(PredR) && !IsAnd))) {
1831 if ((
match(RHS0, AbsOrSelfLHS0) ||
match(RHS1, AbsOrSelfLHS0)) &&
1833 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1839 if ((PredR == FCmpInst::FCMP_ORD || PredR == FCmpInst::FCMP_UNO) &&
1840 ((FCmpInst::isOrdered(PredL) && IsAnd) ||
1841 (FCmpInst::isUnordered(PredL) && !IsAnd))) {
1846 if ((
match(LHS0, AbsOrSelfRHS0) ||
match(LHS1, AbsOrSelfRHS0)) &&
1848 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1857 Value *Op1,
bool IsAnd) {
1859 auto *Cast0 = dyn_cast<CastInst>(Op0);
1860 auto *Cast1 = dyn_cast<CastInst>(Op1);
1861 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1862 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1863 Op0 = Cast0->getOperand(0);
1864 Op1 = Cast1->getOperand(0);
1868 auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1869 auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1874 auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1875 auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1886 if (
auto *
C = dyn_cast<Constant>(V))
1895 bool AllowRefinement,
1897 unsigned MaxRecurse);
1901 unsigned MaxRecurse) {
1902 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1917 (Opcode == Instruction::And ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) {
1918 if (Res == Absorber)
1928 if (Res == Absorber)
1938 nullptr, MaxRecurse))
1939 return Simplify(Res);
1942 nullptr, MaxRecurse))
1943 return Simplify(Res);
1953 assert(BinaryOperator::isBitwiseLogicOp(Opcode) &&
"Expected logic op");
1965 return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
1966 : ConstantInt::getAllOnesValue(Ty);
1975 unsigned MaxRecurse) {
2009 const APInt *Shift1, *Shift2;
2013 Shift1->
uge(*Shift2))
2026 unsigned MaxRecurse) {
2031 if (isa<PoisonValue>(Op1))
2066 (~(*Mask)).lshr(*ShAmt).isZero())
2072 (~(*Mask)).shl(*ShAmt).isZero())
2077 const APInt *PowerC;
2086 return ConstantInt::getNullValue(Op1->
getType());
2099 Instruction::Or, Q, MaxRecurse))
2104 Instruction::Xor, Q, MaxRecurse))
2107 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2125 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2149 if (EffWidthY <= ShftCnt) {
2182 if (*Implied ==
true)
2185 if (*Implied ==
false)
2210 assert(
X->getType() ==
Y->getType() &&
"Expected same type for 'or' ops");
2211 Type *Ty =
X->getType();
2215 return ConstantInt::getAllOnesValue(Ty);
2219 return ConstantInt::getAllOnesValue(Ty);
2237 return ConstantInt::getAllOnesValue(Ty);
2261 return ConstantInt::getAllOnesValue(Ty);
2301 unsigned MaxRecurse) {
2306 if (isa<PoisonValue>(Op1))
2340 C->ule(
X->getType()->getScalarSizeInBits())) {
2341 return ConstantInt::getAllOnesValue(
X->getType());
2395 Instruction::And, Q, MaxRecurse))
2398 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2416 const APInt *C1, *C2;
2442 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2452 if (std::optional<bool> Implied =
2455 if (*Implied ==
false)
2458 if (*Implied ==
true)
2461 if (std::optional<bool> Implied =
2464 if (*Implied ==
false)
2467 if (*Implied ==
true)
2485 unsigned MaxRecurse) {
2490 if (isa<PoisonValue>(Op1))
2527 if (
Value *R = foldAndOrNot(Op0, Op1))
2529 if (
Value *R = foldAndOrNot(Op1, Op0))
2579 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2582 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2583 if (Pred == Cmp->getPredicate() &&
LHS == CmpLHS &&
RHS == CmpRHS)
2586 LHS == CmpRHS &&
RHS == CmpLHS)
2599 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2600 return AI->isStaticAlloca();
2601 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2602 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2603 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2604 !GV->isThreadLocal();
2605 if (
const Argument *
A = dyn_cast<Argument>(V))
2606 return A->hasByValAttr();
2639 auto isByValArg = [](
const Value *V) {
2640 const Argument *
A = dyn_cast<Argument>(V);
2641 return A &&
A->hasByValAttr();
2647 return isa<AllocaInst>(V2) || isa<GlobalVariable>(V2) || isByValArg(V2);
2649 return isa<AllocaInst>(V1) || isa<GlobalVariable>(V1) || isByValArg(V1);
2651 return isa<AllocaInst>(V1) &&
2652 (isa<AllocaInst>(V2) || isa<GlobalVariable>(V2));
2708 unsigned IndexSize =
DL.getIndexTypeSizeInBits(
LHS->
getType());
2709 APInt LHSOffset(IndexSize, 0), RHSOffset(IndexSize, 0);
2729 Opts.
EvalMode = ObjectSizeOpts::Mode::Min;
2731 if (
auto *
I = dyn_cast<Instruction>(V))
2732 return I->getFunction();
2733 if (
auto *
A = dyn_cast<Argument>(V))
2734 return A->getParent();
2740 APInt Dist = LHSOffset - RHSOffset;
2768 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2769 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2789 bool Captured =
false;
2793 if (
auto *ICmp = dyn_cast<ICmpInst>(U->getUser())) {
2797 unsigned OtherIdx = 1 - U->getOperandNo();
2798 auto *LI = dyn_cast<LoadInst>(ICmp->getOperand(OtherIdx));
2799 if (LI && isa<GlobalVariable>(LI->getPointerOperand()))
2807 CustomCaptureTracker Tracker;
2809 if (!Tracker.Captured)
2831 auto ExtractNotLHS = [](
Value *V) ->
Value * {
2893 case ICmpInst::ICMP_UGE:
2897 case ICmpInst::ICMP_SGE:
2908 case ICmpInst::ICMP_ULE:
2912 case ICmpInst::ICMP_SLE:
2932 case ICmpInst::ICMP_ULT:
2934 case ICmpInst::ICMP_UGE:
2936 case ICmpInst::ICMP_EQ:
2937 case ICmpInst::ICMP_ULE:
2941 case ICmpInst::ICMP_NE:
2942 case ICmpInst::ICMP_UGT:
2946 case ICmpInst::ICMP_SLT: {
2954 case ICmpInst::ICMP_SLE: {
2962 case ICmpInst::ICMP_SGE: {
2970 case ICmpInst::ICMP_SGT: {
3023 *MulC != 0 &&
C->urem(*MulC) != 0) ||
3025 *MulC != 0 &&
C->srem(*MulC) != 0)))
3026 return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
3040 unsigned Depth = 0) {
3041 if (!Res.
insert(V).second)
3048 auto *
I = dyn_cast<Instruction>(V);
3068 switch (
I->getOpcode()) {
3069 case Instruction::And:
3073 case Instruction::URem:
3074 case Instruction::UDiv:
3075 case Instruction::LShr:
3078 case Instruction::Call:
3091 if (Pred != ICmpInst::ICMP_UGE && Pred != ICmpInst::ICMP_ULT)
3100 for (
Value *GV : GreaterValues)
3103 Pred == ICmpInst::ICMP_UGE);
3109 unsigned MaxRecurse) {
3115 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
3130 case ICmpInst::ICMP_SGT:
3131 case ICmpInst::ICMP_SGE: {
3137 case ICmpInst::ICMP_EQ:
3138 case ICmpInst::ICMP_UGT:
3139 case ICmpInst::ICMP_UGE:
3141 case ICmpInst::ICMP_SLT:
3142 case ICmpInst::ICMP_SLE: {
3148 case ICmpInst::ICMP_NE:
3149 case ICmpInst::ICMP_ULT:
3150 case ICmpInst::ICMP_ULE:
3172 case ICmpInst::ICMP_EQ:
3173 case ICmpInst::ICMP_UGE:
3174 case ICmpInst::ICMP_UGT:
3176 case ICmpInst::ICMP_NE:
3177 case ICmpInst::ICMP_ULT:
3178 case ICmpInst::ICMP_ULE:
3193 const APInt *C1, *C2;
3200 if (Pred == ICmpInst::ICMP_UGT)
3202 if (Pred == ICmpInst::ICMP_ULE)
3240 const APInt *C1, *C2;
3254 unsigned MaxRecurse) {
3257 if (MaxRecurse && (LBO || RBO)) {
3259 Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr;
3261 bool NoLHSWrapProblem =
false, NoRHSWrapProblem =
false;
3262 if (LBO && LBO->
getOpcode() == Instruction::Add) {
3272 if (RBO && RBO->
getOpcode() == Instruction::Add) {
3284 if ((
A ==
RHS ||
B ==
RHS) && NoLHSWrapProblem)
3291 if ((
C ==
LHS ||
D ==
LHS) && NoRHSWrapProblem)
3294 C ==
LHS ?
D :
C, Q, MaxRecurse - 1))
3298 bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3300 if (
A &&
C && (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D) && CanSimplify) {
3307 }
else if (
A ==
D) {
3311 }
else if (
B ==
C) {
3332 ICmpInst::getSwappedPredicate(Pred), RBO,
LHS, Q, MaxRecurse))
3339 if (
C->isStrictlyPositive()) {
3340 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3342 if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3345 if (
C->isNonNegative()) {
3346 if (Pred == ICmpInst::ICMP_SLE)
3348 if (Pred == ICmpInst::ICMP_SGT)
3371 if (Pred == ICmpInst::ICMP_EQ)
3373 if (Pred == ICmpInst::ICMP_NE)
3382 if (Pred == ICmpInst::ICMP_UGT)
3384 if (Pred == ICmpInst::ICMP_ULE)
3395 case Instruction::Shl: {
3398 if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
3411 case Instruction::And:
3412 case Instruction::Or: {
3413 const APInt *C1, *C2;
3419 Pred = ICmpInst::getSwappedPredicate(Pred);
3422 if (Pred == ICmpInst::ICMP_ULE)
3424 if (Pred == ICmpInst::ICMP_UGT)
3427 if (Pred == ICmpInst::ICMP_SLE)
3429 if (Pred == ICmpInst::ICMP_SGT)
3443 case Instruction::UDiv:
3444 case Instruction::LShr:
3445 if (ICmpInst::isSigned(Pred) || !Q.
IIQ.
isExact(LBO) ||
3452 case Instruction::SDiv:
3460 case Instruction::AShr:
3467 case Instruction::Shl: {
3472 if (!NSW && ICmpInst::isSigned(Pred))
3488 unsigned MaxRecurse) {
3644 Pred = ICmpInst::getSwappedPredicate(Pred);
3650 (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D)) {
3659 (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D)) {
3683 CallInst *Assume = cast<CallInst>(AssumeVH);
3696 auto *
II = dyn_cast<IntrinsicInst>(
LHS);
3700 switch (
II->getIntrinsicID()) {
3701 case Intrinsic::uadd_sat:
3705 if (Pred == ICmpInst::ICMP_UGE)
3707 if (Pred == ICmpInst::ICMP_ULT)
3711 case Intrinsic::usub_sat:
3715 if (Pred == ICmpInst::ICMP_ULE)
3717 if (Pred == ICmpInst::ICMP_UGT)
3733 if (
const Argument *
A = dyn_cast<Argument>(V))
3734 return A->getRange();
3735 else if (
const CallBase *CB = dyn_cast<CallBase>(V))
3736 return CB->getRange();
3738 return std::nullopt;
3755 assert(!isa<UndefValue>(
LHS) &&
"Unexpected icmp undef,%X");
3760 if (isa<PoisonValue>(
RHS))
3789 if (LhsCr->icmp(Pred, *RhsCr))
3797 if (isa<CastInst>(
LHS) && (isa<Constant>(
RHS) || isa<CastInst>(
RHS))) {
3805 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3814 if (RI->getOperand(0)->getType() == SrcTy)
3822 if (isa<ZExtInst>(
LHS)) {
3826 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3830 RI->getOperand(0), Q, MaxRecurse - 1))
3834 else if (
SExtInst *RI = dyn_cast<SExtInst>(
RHS)) {
3835 if (
SrcOp == RI->getOperand(0)) {
3836 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3838 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3852 assert(Trunc &&
"Constant-fold of ImmConstant should not fail");
3855 assert(RExt &&
"Constant-fold of ImmConstant should not fail");
3858 assert(AnyEq &&
"Constant-fold of ImmConstant should not fail");
3865 SrcOp, Trunc, Q, MaxRecurse - 1))
3875 case ICmpInst::ICMP_EQ:
3876 case ICmpInst::ICMP_UGT:
3877 case ICmpInst::ICMP_UGE:
3880 case ICmpInst::ICMP_NE:
3881 case ICmpInst::ICMP_ULT:
3882 case ICmpInst::ICMP_ULE:
3887 case ICmpInst::ICMP_SGT:
3888 case ICmpInst::ICMP_SGE:
3892 case ICmpInst::ICMP_SLT:
3893 case ICmpInst::ICMP_SLE:
3902 if (isa<SExtInst>(
LHS)) {
3906 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3913 else if (
ZExtInst *RI = dyn_cast<ZExtInst>(
RHS)) {
3914 if (
SrcOp == RI->getOperand(0)) {
3915 if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3917 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3930 assert(Trunc &&
"Constant-fold of ImmConstant should not fail");
3933 assert(RExt &&
"Constant-fold of ImmConstant should not fail");
3936 assert(AnyEq &&
"Constant-fold of ImmConstant should not fail");
3951 case ICmpInst::ICMP_EQ:
3953 case ICmpInst::ICMP_NE:
3958 case ICmpInst::ICMP_SGT:
3959 case ICmpInst::ICMP_SGE:
3963 case ICmpInst::ICMP_SLT:
3964 case ICmpInst::ICMP_SLE:
3971 case ICmpInst::ICMP_UGT:
3972 case ICmpInst::ICMP_UGE:
3980 case ICmpInst::ICMP_ULT:
3981 case ICmpInst::ICMP_ULE:
4012 ICmpInst::getSwappedPredicate(Pred),
RHS,
LHS))
4018 ICmpInst::getSwappedPredicate(Pred),
RHS,
LHS, Q))
4024 if (std::optional<bool> Res =
4033 if (
auto *CLHS = dyn_cast<PtrToIntOperator>(
LHS))
4034 if (
auto *CRHS = dyn_cast<PtrToIntOperator>(
RHS))
4035 if (CLHS->getPointerOperandType() == CRHS->getPointerOperandType() &&
4039 CRHS->getPointerOperand(), Q))
4044 if (isa<SelectInst>(
LHS) || isa<SelectInst>(
RHS))
4050 if (isa<PHINode>(
LHS) || isa<PHINode>(
RHS))
4066 unsigned MaxRecurse) {
4081 if (Pred == FCmpInst::FCMP_FALSE)
4083 if (Pred == FCmpInst::FCMP_TRUE)
4088 if (isa<PoisonValue>(
LHS) || isa<PoisonValue>(
RHS))
4111 if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) {
4117 return ConstantInt::get(
RetTy, Pred == FCmpInst::FCMP_ORD);
4125 std::optional<KnownFPClass> FullKnownClassLHS;
4129 auto computeLHSClass = [=, &FullKnownClassLHS](
FPClassTest InterestedFlags =
4131 if (FullKnownClassLHS)
4132 return *FullKnownClassLHS;
4145 FullKnownClassLHS = computeLHSClass();
4146 if ((FullKnownClassLHS->KnownFPClasses & ClassTest) ==
fcNone)
4148 if ((FullKnownClassLHS->KnownFPClasses & ~ClassTest) ==
fcNone)
4163 if (
C->isNegative() && !
C->isNegZero()) {
4169 case FCmpInst::FCMP_UGE:
4170 case FCmpInst::FCMP_UGT:
4171 case FCmpInst::FCMP_UNE: {
4179 case FCmpInst::FCMP_OEQ:
4180 case FCmpInst::FCMP_OLE:
4181 case FCmpInst::FCMP_OLT: {
4200 cast<IntrinsicInst>(
LHS)->getIntrinsicID() == Intrinsic::maxnum;
4204 case FCmpInst::FCMP_OEQ:
4205 case FCmpInst::FCMP_UEQ:
4209 case FCmpInst::FCMP_ONE:
4210 case FCmpInst::FCMP_UNE:
4214 case FCmpInst::FCMP_OGE:
4215 case FCmpInst::FCMP_UGE:
4216 case FCmpInst::FCMP_OGT:
4217 case FCmpInst::FCMP_UGT:
4222 return ConstantInt::get(
RetTy, IsMaxNum);
4223 case FCmpInst::FCMP_OLE:
4224 case FCmpInst::FCMP_ULE:
4225 case FCmpInst::FCMP_OLT:
4226 case FCmpInst::FCMP_ULT:
4231 return ConstantInt::get(
RetTy, !IsMaxNum);
4243 case FCmpInst::FCMP_OGE:
4244 case FCmpInst::FCMP_ULT: {
4247 Interested |=
fcNan;
4258 case FCmpInst::FCMP_UGE:
4259 case FCmpInst::FCMP_OLT: {
4276 if (isa<SelectInst>(
LHS) || isa<SelectInst>(
RHS))
4282 if (isa<PHINode>(
LHS) || isa<PHINode>(
RHS))
4295 ArrayRef<std::pair<Value *, Value *>> Ops,
4297 bool AllowRefinement,
4299 unsigned MaxRecurse) {
4301 "If AllowRefinement=false then CanUseUndef=false");
4302 for (
const auto &OpAndRepOp : Ops) {
4304 if (isa<Constant>(OpAndRepOp.first))
4308 if (V == OpAndRepOp.first)
4309 return OpAndRepOp.second;
4315 auto *
I = dyn_cast<Instruction>(V);
4321 if (isa<PHINode>(
I))
4325 if (
match(
I, m_Intrinsic<Intrinsic::is_constant>()))
4329 if (isa<FreezeInst>(
I))
4332 for (
const auto &OpAndRepOp : Ops) {
4335 if (OpAndRepOp.first->getType()->isVectorTy() &&
4342 bool AnyReplaced =
false;
4343 for (
Value *InstOp :
I->operands()) {
4345 InstOp, Ops, Q, AllowRefinement, DropFlags, MaxRecurse)) {
4347 AnyReplaced = InstOp != NewInstOp;
4361 if (!AllowRefinement) {
4366 if (
auto *BO = dyn_cast<BinaryOperator>(
I)) {
4367 unsigned Opcode = BO->getOpcode();
4370 if (!BO->getType()->isFPOrFPVectorTy()) {
4379 if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
4380 NewOps[0] == NewOps[1]) {
4382 if (
auto *PDI = dyn_cast<PossiblyDisjointInst>(BO)) {
4383 if (PDI->isDisjoint()) {
4395 if ((Opcode == Instruction::Sub || Opcode == Instruction::Xor) &&
4396 NewOps[0] == NewOps[1] &&
4397 any_of(Ops, [=](
const auto &Rep) {
return NewOps[0] == Rep.second; }))
4408 if ((NewOps[0] == Absorber || NewOps[1] == Absorber) &&
4410 [=](
const auto &Rep) {
return impliesPoison(BO, Rep.first); }))
4414 if (isa<GetElementPtrInst>(
I)) {
4430 auto PreventSelfSimplify = [V](
Value *Simplified) {
4431 return Simplified != V ? Simplified :
nullptr;
4434 return PreventSelfSimplify(
4441 for (
Value *NewOp : NewOps) {
4442 if (
Constant *ConstOp = dyn_cast<Constant>(NewOp))
4457 if (!AllowRefinement) {
4460 if (
auto *
II = dyn_cast<IntrinsicInst>(
I);
4461 II &&
II->getIntrinsicID() == Intrinsic::abs) {
4462 if (!ConstOps[0]->isNotMinSignedValue())
4469 if (DropFlags && Res &&
I->hasPoisonGeneratingAnnotations())
4480 bool AllowRefinement,
4482 unsigned MaxRecurse) {
4484 DropFlags, MaxRecurse);
4489 bool AllowRefinement,
4493 if (!AllowRefinement)
4496 return ::simplifyWithOpReplaced(V,
Op, RepOp, Q, AllowRefinement, DropFlags,
4503 const APInt *
Y,
bool TrueWhenUnset) {
4510 return TrueWhenUnset ? FalseVal : TrueVal;
4516 return TrueWhenUnset ? FalseVal : TrueVal;
4518 if (
Y->isPowerOf2()) {
4524 if (TrueWhenUnset && cast<PossiblyDisjointInst>(TrueVal)->isDisjoint())
4526 return TrueWhenUnset ? TrueVal : FalseVal;
4534 if (!TrueWhenUnset && cast<PossiblyDisjointInst>(FalseVal)->isDisjoint())
4536 return TrueWhenUnset ? TrueVal : FalseVal;
4547 if (CmpRHS == TVal || CmpRHS == FVal) {
4549 Pred = ICmpInst::getSwappedPredicate(Pred);
4553 if (CmpLHS == FVal) {
4555 Pred = ICmpInst::getInversePredicate(Pred);
4560 Value *
X = CmpLHS, *
Y = CmpRHS;
4561 bool PeekedThroughSelectShuffle =
false;
4562 auto *Shuf = dyn_cast<ShuffleVectorInst>(FVal);
4563 if (Shuf && Shuf->isSelect()) {
4564 if (Shuf->getOperand(0) ==
Y)
4565 FVal = Shuf->getOperand(1);
4566 else if (Shuf->getOperand(1) ==
Y)
4567 FVal = Shuf->getOperand(0);
4570 PeekedThroughSelectShuffle =
true;
4574 auto *MMI = dyn_cast<MinMaxIntrinsic>(FVal);
4575 if (!MMI || TVal !=
X ||
4593 if (PeekedThroughSelectShuffle)
4621 Res->Pred == ICmpInst::ICMP_EQ);
4629 ArrayRef<std::pair<Value *, Value *>> Replacements,
Value *TrueVal,
4631 Value *SimplifiedFalseVal =
4634 nullptr, MaxRecurse);
4635 if (!SimplifiedFalseVal)
4636 SimplifiedFalseVal = FalseVal;
4638 Value *SimplifiedTrueVal =
4641 nullptr, MaxRecurse);
4642 if (!SimplifiedTrueVal)
4643 SimplifiedTrueVal = TrueVal;
4645 if (SimplifiedFalseVal == SimplifiedTrueVal)
4656 unsigned MaxRecurse) {
4658 Value *CmpLHS, *CmpRHS;
4666 if (Pred == ICmpInst::ICMP_NE) {
4667 Pred = ICmpInst::ICMP_EQ;
4674 if (TrueVal->getType()->isIntOrIntVectorTy()) {
4682 X->getType()->getScalarSizeInBits());
4688 if (Pred == ICmpInst::ICMP_EQ &&
match(CmpRHS,
m_Zero())) {
4702 if (
match(TrueVal, isFsh) && FalseVal ==
X && CmpLHS == ShAmt)
4715 if (
match(FalseVal, isRotate) && TrueVal ==
X && CmpLHS == ShAmt &&
4716 Pred == ICmpInst::ICMP_EQ)
4721 if (
match(TrueVal, m_Intrinsic<Intrinsic::abs>(
m_Specific(CmpLHS))) &&
4733 if (Pred == ICmpInst::ICMP_EQ) {
4737 FalseVal, Q, MaxRecurse))
4742 FalseVal, Q, MaxRecurse))
4752 {{
X, CmpRHS}, {
Y, CmpRHS}}, TrueVal, FalseVal, Q, MaxRecurse))
4761 {{
X, CmpRHS}, {
Y, CmpRHS}}, TrueVal, FalseVal, Q, MaxRecurse))
4773 unsigned MaxRecurse) {
4775 Value *CmpLHS, *CmpRHS;
4780 bool IsEquiv =
I->isEquivalence();
4781 if (
I->isEquivalence(
true)) {
4783 Pred = FCmpInst::getInversePredicate(Pred);
4799 if (CmpLHS ==
F && CmpRHS ==
T)
4802 if (CmpLHS !=
T || CmpRHS !=
F)
4808 if (Pred == FCmpInst::FCMP_OEQ)
4812 if (Pred == FCmpInst::FCMP_UNE)
4823 if (
auto *CondC = dyn_cast<Constant>(
Cond)) {
4824 if (
auto *TrueC = dyn_cast<Constant>(TrueVal))
4825 if (
auto *FalseC = dyn_cast<Constant>(FalseVal))
4830 if (isa<PoisonValue>(CondC))
4835 return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4847 assert(
Cond->getType()->isIntOrIntVectorTy(1) &&
4848 "Select must have bool or bool vector condition");
4849 assert(TrueVal->getType() == FalseVal->getType() &&
4850 "Select must have same types for true/false ops");
4852 if (
Cond->getType() == TrueVal->getType()) {
4915 if (TrueVal == FalseVal)
4918 if (
Cond == TrueVal) {
4926 if (
Cond == FalseVal) {
4940 if (isa<PoisonValue>(TrueVal) ||
4945 if (isa<PoisonValue>(FalseVal) ||
4951 if (isa<FixedVectorType>(TrueVal->getType()) &&
4955 cast<FixedVectorType>(TrueC->
getType())->getNumElements();
4957 for (
unsigned i = 0; i != NumElts; ++i) {
4961 if (!TEltC || !FEltC)
4968 else if (isa<PoisonValue>(TEltC) ||
4971 else if (isa<PoisonValue>(FEltC) ||
4977 if (NewC.
size() == NumElts)
4993 return *Imp ? TrueVal : FalseVal;
5010 cast<PointerType>(
Ptr->getType()->getScalarType())->getAddressSpace();
5013 if (Indices.
empty())
5023 if (
VectorType *VT = dyn_cast<VectorType>(
Op->getType())) {
5024 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
5031 if (
Ptr->getType() == GEPTy &&
5037 if (isa<PoisonValue>(
Ptr) ||
5038 any_of(Indices, [](
const auto *V) {
return isa<PoisonValue>(V); }))
5045 bool IsScalableVec =
5047 return isa<ScalableVectorType>(V->getType());
5050 if (Indices.
size() == 1) {
5052 if (!IsScalableVec && Ty->
isSized()) {
5057 if (TyAllocSize == 0 &&
Ptr->getType() == GEPTy)
5062 if (Indices[0]->
getType()->getScalarSizeInBits() ==
5064 auto CanSimplify = [GEPTy, &
P,
Ptr]() ->
bool {
5065 return P->getType() == GEPTy &&
5069 if (TyAllocSize == 1 &&
5080 TyAllocSize == 1ULL <<
C && CanSimplify())
5096 [](
Value *
Idx) { return match(Idx, m_Zero()); })) {
5100 APInt BasePtrOffset(IdxWidth, 0);
5101 Value *StrippedBasePtr =
5102 Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, BasePtrOffset);
5111 !BasePtrOffset.
isZero()) {
5112 auto *CI = ConstantInt::get(GEPTy->
getContext(), BasePtrOffset);
5118 !BasePtrOffset.
isOne()) {
5119 auto *CI = ConstantInt::get(GEPTy->
getContext(), BasePtrOffset - 1);
5126 if (!isa<Constant>(
Ptr) ||
5127 !
all_of(Indices, [](
Value *V) {
return isa<Constant>(V); }))
5149 if (
Constant *CAgg = dyn_cast<Constant>(Agg))
5150 if (
Constant *CVal = dyn_cast<Constant>(Val))
5155 if (isa<PoisonValue>(Val) ||
5161 if (EV->getAggregateOperand()->getType() == Agg->
getType() &&
5162 EV->getIndices() == Idxs) {
5165 if (isa<PoisonValue>(Agg) ||
5168 return EV->getAggregateOperand();
5171 if (Agg == EV->getAggregateOperand())
5181 return ::simplifyInsertValueInst(Agg, Val, Idxs, Q,
RecursionLimit);
5187 auto *VecC = dyn_cast<Constant>(Vec);
5188 auto *ValC = dyn_cast<Constant>(Val);
5189 auto *IdxC = dyn_cast<Constant>(
Idx);
5190 if (VecC && ValC && IdxC)
5194 if (
auto *CI = dyn_cast<ConstantInt>(
Idx)) {
5195 if (isa<FixedVectorType>(Vec->
getType()) &&
5196 CI->uge(cast<FixedVectorType>(Vec->
getType())->getNumElements()))
5206 if (isa<PoisonValue>(Val) ||
5211 if (VecC && ValC && VecC->getSplatValue() == ValC)
5227 if (
auto *CAgg = dyn_cast<Constant>(Agg))
5231 unsigned NumIdxs = Idxs.
size();
5232 for (
auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI !=
nullptr;
5233 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
5235 unsigned NumInsertValueIdxs = InsertValueIdxs.
size();
5236 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
5237 if (InsertValueIdxs.
slice(0, NumCommonIdxs) ==
5238 Idxs.
slice(0, NumCommonIdxs)) {
5239 if (NumIdxs == NumInsertValueIdxs)
5240 return IVI->getInsertedValueOperand();
5257 auto *VecVTy = cast<VectorType>(Vec->
getType());
5258 if (
auto *CVec = dyn_cast<Constant>(Vec)) {
5259 if (
auto *CIdx = dyn_cast<Constant>(
Idx))
5273 if (
auto *IdxC = dyn_cast<ConstantInt>(
Idx)) {
5275 unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
5276 if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
5279 if (IdxC->getValue().ult(MinNumElts))
5289 auto *IE = dyn_cast<InsertElementInst>(Vec);
5290 if (IE && IE->getOperand(2) ==
Idx)
5291 return IE->getOperand(1);
5314 Value *CommonValue =
nullptr;
5315 bool HasPoisonInput =
false;
5316 bool HasUndefInput =
false;
5322 HasPoisonInput =
true;
5327 HasUndefInput =
true;
5330 if (CommonValue &&
Incoming != CommonValue)
5341 if (HasPoisonInput || HasUndefInput) {
5349 if (HasUndefInput &&
5360 if (
auto *
C = dyn_cast<Constant>(
Op))
5363 if (
auto *CI = dyn_cast<CastInst>(
Op)) {
5364 auto *Src = CI->getOperand(0);
5365 Type *SrcTy = Src->getType();
5366 Type *MidTy = CI->getType();
5368 if (Src->getType() == Ty) {
5369 auto FirstOp = CI->getOpcode();
5378 SrcIntPtrTy, MidIntPtrTy,
5379 DstIntPtrTy) == Instruction::BitCast)
5385 if (CastOpc == Instruction::BitCast)
5386 if (
Op->getType() == Ty)
5391 if (CastOpc == Instruction::PtrToInt &&
5409 int MaskVal,
Value *RootVec,
5410 unsigned MaxRecurse) {
5420 int InVecNumElts = cast<FixedVectorType>(Op0->
getType())->getNumElements();
5421 int RootElt = MaskVal;
5422 Value *SourceOp = Op0;
5423 if (MaskVal >= InVecNumElts) {
5424 RootElt = MaskVal - InVecNumElts;
5430 if (
auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
5432 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
5433 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
5442 if (RootVec != SourceOp)
5447 if (RootElt != DestElt)
5456 unsigned MaxRecurse) {
5460 auto *InVecTy = cast<VectorType>(Op0->
getType());
5461 unsigned MaskNumElts = Mask.size();
5462 ElementCount InVecEltCount = InVecTy->getElementCount();
5467 Indices.
assign(Mask.begin(), Mask.end());
5472 bool MaskSelects0 =
false, MaskSelects1 =
false;
5474 for (
unsigned i = 0; i != MaskNumElts; ++i) {
5475 if (Indices[i] == -1)
5477 if ((
unsigned)Indices[i] < InVecNumElts)
5478 MaskSelects0 =
true;
5480 MaskSelects1 =
true;
5488 auto *Op0Const = dyn_cast<Constant>(Op0);
5489 auto *Op1Const = dyn_cast<Constant>(Op1);
5494 if (Op0Const && Op1Const)
5500 if (!Scalable && Op0Const && !Op1Const) {
5518 if (
all_of(Indices, [InsertIndex](
int MaskElt) {
5519 return MaskElt == InsertIndex || MaskElt == -1;
5521 assert(isa<UndefValue>(Op1) &&
"Expected undef operand 1 for splat");
5525 for (
unsigned i = 0; i != MaskNumElts; ++i)
5526 if (Indices[i] == -1)
5534 if (
auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
5554 Value *RootVec =
nullptr;
5555 for (
unsigned i = 0; i != MaskNumElts; ++i) {
5577 if (
auto *
C = dyn_cast<Constant>(
Op))
5605 Type *Ty = In->getType();
5606 if (
auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
5607 unsigned NumElts = VecTy->getNumElements();
5609 for (
unsigned i = 0; i != NumElts; ++i) {
5610 Constant *EltC = In->getAggregateElement(i);
5613 if (EltC && isa<PoisonValue>(EltC))
5615 else if (EltC && EltC->
isNaN())
5616 NewC[i] = ConstantFP::get(
5617 EltC->
getType(), cast<ConstantFP>(EltC)->getValue().makeQuiet());
5631 if (isa<ScalableVectorType>(Ty)) {
5632 auto *
Splat = In->getSplatValue();
5634 "Found a scalable-vector NaN but not a splat");
5640 return ConstantFP::get(Ty, cast<ConstantFP>(In)->getValue().makeQuiet());
5655 for (
Value *V : Ops) {
5663 if (FMF.
noNaNs() && (IsNan || IsUndef))
5665 if (FMF.
noInfs() && (IsInf || IsUndef))
5691 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5757 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5874 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5880 return simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
5887 return ::simplifyFAddInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5895 return ::simplifyFSubInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5903 return ::simplifyFMulInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5911 return ::simplifyFMAFMul(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5919 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5944 return ConstantFP::get(Op0->
getType(), 1.0);
5956 return ConstantFP::get(Op0->
getType(), -1.0);
5970 return ::simplifyFDivInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5978 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
6008 return ::simplifyFRemInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
6017 unsigned MaxRecurse) {
6019 case Instruction::FNeg:
6031 unsigned MaxRecurse) {
6033 case Instruction::FNeg:
6054 case Instruction::Add:
6057 case Instruction::Sub:
6060 case Instruction::Mul:
6063 case Instruction::SDiv:
6065 case Instruction::UDiv:
6067 case Instruction::SRem:
6069 case Instruction::URem:
6071 case Instruction::Shl:
6074 case Instruction::LShr:
6076 case Instruction::AShr:
6078 case Instruction::And:
6080 case Instruction::Or:
6082 case Instruction::Xor:
6084 case Instruction::FAdd:
6086 case Instruction::FSub:
6088 case Instruction::FMul:
6090 case Instruction::FDiv:
6092 case Instruction::FRem:
6104 unsigned MaxRecurse) {
6106 case Instruction::FAdd:
6108 case Instruction::FSub:
6110 case Instruction::FMul:
6112 case Instruction::FDiv:
6148 case Intrinsic::fabs:
6149 case Intrinsic::floor:
6150 case Intrinsic::ceil:
6151 case Intrinsic::trunc:
6152 case Intrinsic::rint:
6153 case Intrinsic::nearbyint:
6154 case Intrinsic::round:
6155 case Intrinsic::roundeven:
6156 case Intrinsic::canonicalize:
6157 case Intrinsic::arithmetic_fence:
6169 case Intrinsic::floor:
6170 case Intrinsic::ceil:
6171 case Intrinsic::trunc:
6172 case Intrinsic::rint:
6173 case Intrinsic::nearbyint:
6174 case Intrinsic::round:
6175 case Intrinsic::roundeven:
6189 auto *OffsetConstInt = dyn_cast<ConstantInt>(
Offset);
6190 if (!OffsetConstInt || OffsetConstInt->getBitWidth() > 64)
6194 DL.getIndexTypeSizeInBits(
Ptr->getType()));
6195 if (OffsetInt.
srem(4) != 0)
6203 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
6207 if (LoadedCE->getOpcode() == Instruction::Trunc) {
6208 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6213 if (LoadedCE->getOpcode() != Instruction::Sub)
6216 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6217 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
6219 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
6223 APInt LoadedRHSOffset;
6226 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
6229 return LoadedLHSPtr;
6237 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6260 if (
C && (
C->isZero() ||
C->isInfinity()))
6269 if (
C &&
C->isNaN())
6270 return ConstantFP::get(Op0->
getType(),
C->makeQuiet());
6288 if (
auto *
II = dyn_cast<IntrinsicInst>(Op0))
6289 if (
II->getIntrinsicID() == IID)
6298 auto *
II = dyn_cast<IntrinsicInst>(Op0);
6306 case Intrinsic::fabs:
6310 case Intrinsic::bswap:
6315 case Intrinsic::bitreverse:
6320 case Intrinsic::ctpop: {
6323 return ConstantInt::get(Op0->
getType(), 1);
6332 case Intrinsic::exp:
6334 if (Call->hasAllowReassoc() &&
6338 case Intrinsic::exp2:
6340 if (Call->hasAllowReassoc() &&
6344 case Intrinsic::exp10:
6346 if (Call->hasAllowReassoc() &&
6350 case Intrinsic::log:
6352 if (Call->hasAllowReassoc() &&
6356 case Intrinsic::log2:
6358 if (Call->hasAllowReassoc() &&
6364 case Intrinsic::log10:
6367 if (Call->hasAllowReassoc() &&
6373 case Intrinsic::vector_reverse:
6396 auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
6401 if (Op1 ==
X || Op1 ==
Y ||
6418 assert((IID == Intrinsic::maxnum || IID == Intrinsic::minnum ||
6419 IID == Intrinsic::maximum || IID == Intrinsic::minimum) &&
6420 "Unsupported intrinsic");
6422 auto *
M0 = dyn_cast<IntrinsicInst>(Op0);
6426 if (!
M0 ||
M0->getIntrinsicID() != IID)
6428 Value *X0 =
M0->getOperand(0);
6429 Value *Y0 =
M0->getOperand(1);
6436 if (X0 == Op1 || Y0 == Op1)
6439 auto *
M1 = dyn_cast<IntrinsicInst>(Op1);
6442 Value *X1 =
M1->getOperand(0);
6443 Value *Y1 =
M1->getOperand(1);
6451 if ((X0 == X1 && Y0 == Y1) || (X0 == Y1 && Y0 == X1))
6462 unsigned BitWidth = ReturnType->getScalarSizeInBits();
6464 case Intrinsic::abs:
6472 case Intrinsic::cttz: {
6478 case Intrinsic::ctlz: {
6486 case Intrinsic::ptrmask: {
6494 "Invalid mask width");
6511 APInt IrrelevantPtrBits =
6514 Instruction::Or,
C, ConstantInt::get(
C->getType(), IrrelevantPtrBits),
6516 if (
C !=
nullptr &&
C->isAllOnesValue())
6521 case Intrinsic::smax:
6522 case Intrinsic::smin:
6523 case Intrinsic::umax:
6524 case Intrinsic::umin: {
6535 return ConstantInt::get(
6543 return ConstantInt::get(ReturnType, *
C);
6554 auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
6555 if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
6557 Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
6558 const APInt *InnerC;
6561 ICmpInst::getNonStrictPredicate(
6581 case Intrinsic::scmp:
6582 case Intrinsic::ucmp: {
6589 IID == Intrinsic::scmp ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
6591 return ConstantInt::get(ReturnType, 1);
6594 IID == Intrinsic::scmp ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
6600 case Intrinsic::usub_with_overflow:
6601 case Intrinsic::ssub_with_overflow:
6608 case Intrinsic::uadd_with_overflow:
6609 case Intrinsic::sadd_with_overflow:
6614 cast<StructType>(ReturnType),
6619 case Intrinsic::umul_with_overflow:
6620 case Intrinsic::smul_with_overflow:
6630 case Intrinsic::uadd_sat:
6636 case Intrinsic::sadd_sat:
6651 case Intrinsic::usub_sat:
6656 case Intrinsic::ssub_sat:
6664 case Intrinsic::load_relative:
6665 if (
auto *C0 = dyn_cast<Constant>(Op0))
6666 if (
auto *C1 = dyn_cast<Constant>(Op1))
6669 case Intrinsic::powi:
6670 if (
auto *Power = dyn_cast<ConstantInt>(Op1)) {
6672 if (Power->isZero())
6673 return ConstantFP::get(Op0->
getType(), 1.0);
6679 case Intrinsic::ldexp:
6681 case Intrinsic::copysign:
6691 case Intrinsic::is_fpclass: {
6692 uint64_t Mask = cast<ConstantInt>(Op1)->getZExtValue();
6695 return ConstantInt::get(ReturnType,
true);
6697 return ConstantInt::get(ReturnType,
false);
6702 case Intrinsic::maxnum:
6703 case Intrinsic::minnum:
6704 case Intrinsic::maximum:
6705 case Intrinsic::minimum: {
6711 if (isa<Constant>(Op0))
6718 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
6719 bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum;
6726 return PropagateNaN ?
propagateNaN(cast<Constant>(Op1)) : Op0;
6732 (
C->isInfinity() || (Call && Call->hasNoInfs() &&
C->isLargest()))) {
6737 if (
C->isNegative() == IsMin &&
6738 (!PropagateNaN || (Call && Call->hasNoNaNs())))
6739 return ConstantFP::get(ReturnType, *
C);
6745 if (
C->isNegative() != IsMin &&
6746 (PropagateNaN || (Call && Call->hasNoNaNs())))
6759 case Intrinsic::vector_extract: {
6761 unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
6765 IdxN == 0 &&
X->getType() == ReturnType)
6781 assert(Call->arg_size() == Args.size());
6782 unsigned NumOperands = Args.size();
6787 any_of(Args, IsaPred<PoisonValue>))
6793 case Intrinsic::vscale: {
6797 return ConstantInt::get(
RetTy,
C->getZExtValue());
6805 if (NumOperands == 1)
6808 if (NumOperands == 2)
6814 case Intrinsic::masked_load:
6815 case Intrinsic::masked_gather: {
6816 Value *MaskArg = Args[2];
6817 Value *PassthruArg = Args[3];
6823 case Intrinsic::fshl:
6824 case Intrinsic::fshr: {
6825 Value *Op0 = Args[0], *Op1 = Args[1], *ShAmtArg = Args[2];
6833 return Args[IID == Intrinsic::fshl ? 0 : 1];
6835 const APInt *ShAmtC;
6840 return Args[IID == Intrinsic::fshl ? 0 : 1];
6845 return ConstantInt::getNullValue(
F->getReturnType());
6849 return ConstantInt::getAllOnesValue(
F->getReturnType());
6853 case Intrinsic::experimental_constrained_fma: {
6854 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6856 *FPI->getRoundingMode()))
6860 case Intrinsic::fma:
6861 case Intrinsic::fmuladd: {
6863 RoundingMode::NearestTiesToEven))
6867 case Intrinsic::smul_fix:
6868 case Intrinsic::smul_fix_sat: {
6869 Value *Op0 = Args[0];
6870 Value *Op1 = Args[1];
6871 Value *Op2 = Args[2];
6872 Type *ReturnType =
F->getReturnType();
6877 if (isa<Constant>(Op0))
6891 cast<ConstantInt>(Op2)->getZExtValue());
6897 case Intrinsic::vector_insert: {
6898 Value *Vec = Args[0];
6899 Value *SubVec = Args[1];
6901 Type *ReturnType =
F->getReturnType();
6905 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
6910 X->getType() == ReturnType)
6915 case Intrinsic::experimental_constrained_fadd: {
6916 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6918 *FPI->getExceptionBehavior(),
6919 *FPI->getRoundingMode());
6921 case Intrinsic::experimental_constrained_fsub: {
6922 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6924 *FPI->getExceptionBehavior(),
6925 *FPI->getRoundingMode());
6927 case Intrinsic::experimental_constrained_fmul: {
6928 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6930 *FPI->getExceptionBehavior(),
6931 *FPI->getRoundingMode());
6933 case Intrinsic::experimental_constrained_fdiv: {
6934 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6936 *FPI->getExceptionBehavior(),
6937 *FPI->getRoundingMode());
6939 case Intrinsic::experimental_constrained_frem: {
6940 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6942 *FPI->getExceptionBehavior(),
6943 *FPI->getRoundingMode());
6945 case Intrinsic::experimental_constrained_ldexp:
6947 case Intrinsic::experimental_gc_relocate: {
6953 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
6957 if (
auto *PT = dyn_cast<PointerType>(GCR.
getType())) {
6961 if (isa<ConstantPointerNull>(DerivedPtr)) {
6968 case Intrinsic::experimental_vp_reverse: {
6969 Value *Vec = Call->getArgOperand(0);
6970 Value *Mask = Call->getArgOperand(1);
6971 Value *EVL = Call->getArgOperand(2);
6976 match(Vec, m_Intrinsic<Intrinsic::experimental_vp_reverse>(
6993 auto *
F = dyn_cast<Function>(Callee);
6998 ConstantArgs.
reserve(Args.size());
6999 for (
Value *Arg : Args) {
7002 if (isa<MetadataAsValue>(Arg))
7015 assert(Call->arg_size() == Args.size());
7019 if (Call->isMustTailCall())
7024 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
7030 auto *
F = dyn_cast<Function>(Callee);
7031 if (
F &&
F->isIntrinsic())
7039 assert(isa<ConstrainedFPIntrinsic>(Call));
7058 return ::simplifyFreezeInst(Op0, Q);
7066 if (
auto *PtrOpC = dyn_cast<Constant>(PtrOp))
7072 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
7103 unsigned MaxRecurse) {
7104 assert(
I->getFunction() &&
"instruction should be inserted in a function");
7106 "context instruction should be in the same function");
7110 switch (
I->getOpcode()) {
7115 [](
Value *V) { return cast<Constant>(V); });
7119 case Instruction::FNeg:
7121 case Instruction::FAdd:
7124 case Instruction::Add:
7128 case Instruction::FSub:
7131 case Instruction::Sub:
7135 case Instruction::FMul:
7138 case Instruction::Mul:
7142 case Instruction::SDiv:
7146 case Instruction::UDiv:
7150 case Instruction::FDiv:
7153 case Instruction::SRem:
7155 case Instruction::URem:
7157 case Instruction::FRem:
7160 case Instruction::Shl:
7164 case Instruction::LShr:
7168 case Instruction::AShr:
7172 case Instruction::And:
7174 case Instruction::Or:
7176 case Instruction::Xor:
7178 case Instruction::ICmp:
7180 NewOps[1], Q, MaxRecurse);
7181 case Instruction::FCmp:
7183 NewOps[1],
I->getFastMathFlags(), Q, MaxRecurse);
7184 case Instruction::Select:
7186 case Instruction::GetElementPtr: {
7187 auto *GEPI = cast<GetElementPtrInst>(
I);
7189 ArrayRef(NewOps).slice(1), GEPI->getNoWrapFlags(), Q,
7192 case Instruction::InsertValue: {
7197 case Instruction::InsertElement:
7199 case Instruction::ExtractValue: {
7200 auto *EVI = cast<ExtractValueInst>(
I);
7204 case Instruction::ExtractElement:
7206 case Instruction::ShuffleVector: {
7207 auto *SVI = cast<ShuffleVectorInst>(
I);
7209 SVI->getShuffleMask(), SVI->getType(), Q,
7212 case Instruction::PHI:
7214 case Instruction::Call:
7216 cast<CallInst>(
I), NewOps.
back(),
7217 NewOps.
drop_back(1 + cast<CallInst>(
I)->getNumTotalBundleOperands()), Q);
7218 case Instruction::Freeze:
7220#define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
7221#include "llvm/IR/Instruction.def"
7222#undef HANDLE_CAST_INST
7225 case Instruction::Alloca:
7228 case Instruction::Load:
7237 "Number of operands should match the instruction!");
7238 return ::simplifyInstructionWithOperands(
I, NewOps, SQ,
RecursionLimit);
7268 bool Simplified =
false;
7275 for (
User *U :
I->users())
7277 Worklist.
insert(cast<Instruction>(U));
7280 I->replaceAllUsesWith(SimpleV);
7282 if (!
I->isEHPad() && !
I->isTerminator() && !
I->mayHaveSideEffects())
7283 I->eraseFromParent();
7295 if (UnsimplifiedUsers)
7296 UnsimplifiedUsers->insert(
I);
7305 for (
User *U :
I->users())
7306 Worklist.
insert(cast<Instruction>(U));
7309 I->replaceAllUsesWith(SimpleV);
7311 if (!
I->isEHPad() && !
I->isTerminator() && !
I->mayHaveSideEffects())
7312 I->eraseFromParent();
7321 assert(
I != SimpleV &&
"replaceAndRecursivelySimplify(X,X) is not valid!");
7322 assert(SimpleV &&
"Must provide a simplified value.");
7330 auto *DT = DTWP ? &DTWP->getDomTree() :
nullptr;
7332 auto *TLI = TLIWP ? &TLIWP->
getTLI(
F) :
nullptr;
7335 return {
F.getDataLayout(), TLI, DT, AC};
7343template <
class T,
class... TArgs>
7346 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(
F);
7347 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(
F);
7348 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(
F);
7349 return {
F.getDataLayout(), TLI, DT, AC};
7363void InstSimplifyFolder::anchor() {}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Value * simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q)
Given operands for a Freeze, see if we can fold the result.
static Value * simplifyCmpSelFalseCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with false branch of select.
static Value * simplifyCmpSelCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse, Constant *TrueOrFalse)
Simplify comparison with true or false branch of select: sel = select i1 cond, i32 tv,...
static Value * simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an LShr, see if we can fold the result.
static Value * simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a UDiv, see if we can fold the result.
static Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a Sub, see if we can fold the result.
static Value * simplifyFCmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an FCmpInst, see if we can fold the result.
static Value * expandCommutativeBinOp(Instruction::BinaryOps Opcode, Value *L, Value *R, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify binops of form "A op (B op' C)" or the commuted variant by distributing op over op'.
static Constant * foldOrCommuteConstant(Instruction::BinaryOps Opcode, Value *&Op0, Value *&Op1, const SimplifyQuery &Q)
static bool haveNonOverlappingStorage(const Value *V1, const Value *V2)
Return true if V1 and V2 are each the base of some distict storage region [V, object_size(V)] which d...
static Constant * foldConstant(Instruction::UnaryOps Opcode, Value *&Op, const SimplifyQuery &Q)
static Value * handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
We know comparison with both branches of select can be simplified, but they are not equal.
static Value * threadCmpOverPHI(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a PHI instruction, try to simplify the comparison by seeing whether ...
static Constant * propagateNaN(Constant *In)
Try to propagate existing NaN values when possible.
static Value * simplifyICmpOfBools(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Fold an icmp when its operands have i1 scalar type.
static Value * simplifyICmpWithBinOpOnLHS(CmpPredicate Pred, BinaryOperator *LBO, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an AShr, see if we can fold the result.
static void getUnsignedMonotonicValues(SmallPtrSetImpl< Value * > &Res, Value *V, MonotonicType Type, const SimplifyQuery &Q, unsigned Depth=0)
Get values V_i such that V uge V_i (GreaterEq) or V ule V_i (LowerEq).
static Value * simplifyRelativeLoad(Constant *Ptr, Constant *Offset, const DataLayout &DL)
static Value * simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SDiv and UDiv.
static Value * simplifyPHINode(PHINode *PN, ArrayRef< Value * > IncomingValues, const SimplifyQuery &Q)
See if we can fold the given phi. If not, returns null.
static Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &, unsigned)
Given operands for an ExtractValueInst, see if we can fold the result.
static Value * simplifySelectInst(Value *, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a SelectInst, see if we can fold the result.
static Value * simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Add, see if we can fold the result.
static Value * simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned)
Given the operand for a UnaryOperator, see if we can fold the result.
static bool isSameCompare(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS)
isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
static Value * simplifyAndCommutative(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &SQ, unsigned MaxRecurse)
See if we can compute a simplified version of this instruction.
static bool isIdempotent(Intrinsic::ID ID)
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
static Value * simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Try to simplify and/or of icmp with ctpop intrinsic.
static Value * simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, ICmpInst *UnsignedICmp, bool IsAnd, const SimplifyQuery &Q)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
static Value * tryConstantFoldCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static Value * simplifyWithOpsReplaced(Value *V, ArrayRef< std::pair< Value *, Value * > > Ops, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags, unsigned MaxRecurse)
static Value * simplifyICmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an ICmpInst, see if we can fold the result.
static Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q, unsigned)
Given operands for an ExtractElementInst, see if we can fold the result.
static Value * simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * simplifyICmpWithMinMax(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
simplify integer comparisons where at least one operand of the compare matches an integer min/max idi...
static Value * simplifyCmpSelTrueCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with true branch of select.
static Value * simplifyIntrinsic(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static Value * simplifyICmpUsingMonotonicValues(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q)
Returns true if a shift by Amount always yields poison.
static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, bool AllowNonInbounds=false)
Compute the base pointer and cumulative constant offsets for V.
static Value * simplifyCmpInst(CmpPredicate, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a CmpInst, see if we can fold the result.
static Value * simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior, RoundingMode Rounding)
static Value * simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an LShr or AShr, see if we can fold the result.
static Value * simplifyICmpWithIntrinsicOnLHS(CmpPredicate Pred, Value *LHS, Value *RHS)
static Value * simplifySDivInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an SDiv, see if we can fold the result.
static Value * simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Test if there is a dominating equivalence condition for the two operands.
static Value * simplifyFPUnOp(unsigned, Value *, const FastMathFlags &, const SimplifyQuery &, unsigned)
Given the operand for a UnaryOperator, see if we can fold the result.
static Value * simplifyICmpWithBinOp(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
TODO: A large part of this logic is duplicated in InstCombine's foldICmpBinOp().
static Value * simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, see if we can fold the result.
static Value * simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static Value * expandBinOp(Instruction::BinaryOps Opcode, Value *V, Value *OtherOp, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a binary operator of form "V op OtherOp" where V is "(B0 opex B1)" by distributing 'o...
static Value * simplifyICmpWithZero(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Try hard to fold icmp with zero RHS because this is a common case.
static Value * simplifySelectWithFCmp(Value *Cond, Value *T, Value *F, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is a floating-point comparison.
static Constant * getFalse(Type *Ty)
For a boolean type or a vector of boolean type, return false or a vector with every element false.
static Value * simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Check for common or similar folds of integer division or integer remainder.
static bool removesFPFraction(Intrinsic::ID ID)
Return true if the intrinsic rounds a floating-point value to an integral floating-point value (not a...
static Value * simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
static Value * simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * simplifySelectWithEquivalence(ArrayRef< std::pair< Value *, Value * > > Replacements, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer equality or floating-po...
static Value * simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a Mul, see if we can fold the result.
static Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse)
Given the operand for an FNeg, see if we can fold the result.
static Value * simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for an Or, see if we can fold the result.
static bool trySimplifyICmpWithAdds(CmpPredicate Pred, Value *LHS, Value *RHS, const InstrInfoQuery &IIQ)
static Value * simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, const APInt *Y, bool TrueWhenUnset)
Try to simplify a select instruction when its condition operand is an integer comparison where one op...
static Value * simplifyAssociativeBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Generic simplifications for associative binary operations.
static Value * simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Shl, see if we can fold the result.
static Value * threadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with an operand that is a PHI instruction, try to simplify the bino...
static Value * simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS, CmpPredicate Pred, Value *TVal, Value *FVal)
static Value * simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
static Value * simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, see if we can fold the result.
static Value * simplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a Xor, see if we can fold the result.
static Value * simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a URem, see if we can fold the result.
static Constant * simplifyFPOp(ArrayRef< Value * > Ops, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior, RoundingMode Rounding)
Perform folds that are common to any floating-point operation.
static Value * threadCmpOverSelect(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a select instruction, try to simplify the comparison by seeing wheth...
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Implementation of recursive simplification through an instruction's uses.
static bool isAllocDisjoint(const Value *V)
Return true if the underlying object (storage) must be disjoint from storage returned by any noalias ...
static Constant * getTrue(Type *Ty)
For a boolean type or a vector of boolean type, return true or a vector with every element true.
static Value * simplifyGEPInst(Type *, Value *, ArrayRef< Value * >, GEPNoWrapFlags, const SimplifyQuery &, unsigned)
Given operands for an GetElementPtrInst, see if we can fold the result.
static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q, unsigned MaxRecurse, bool IsSigned)
Return true if we can simplify X / Y to 0.
static Value * simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q, bool IsStrict)
static Value * simplifyLogicOfAddSub(Value *Op0, Value *Op1, Instruction::BinaryOps Opcode)
Given a bitwise logic op, check if the operands are add/sub with a common source value and inverted c...
static Value * simplifySelectWithBitTest(Value *CondVal, Value *TrueVal, Value *FalseVal)
An alternative way to test if a bit is set or not.
static Value * simplifyOrLogic(Value *X, Value *Y)
static Type * getCompareTy(Value *Op)
static Value * simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &, unsigned)
static Value * simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static Value * simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a BinaryOperator, see if we can fold the result.
static bool isICmpTrue(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Given a predicate and two operands, return true if the comparison is true.
static Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q, unsigned)
Given operands for an InsertValueInst, see if we can fold the result.
static Value * simplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for an And, see if we can fold the result.
static Value * foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, int MaskVal, Value *RootVec, unsigned MaxRecurse)
For the given destination element of a shuffle, peek through shuffles to match a root vector source o...
static Value * simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS, FCmpInst *RHS, bool IsAnd)
static Value * simplifyICmpWithConstant(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * extractEquivalentCondition(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS)
Rummage around inside V looking for something equivalent to the comparison "LHS Pred RHS".
static Value * simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0, Value *Op1, bool IsAnd)
static Value * simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags, unsigned MaxRecurse)
static Value * threadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with a select instruction as an operand, try to simplify the binop ...
static Constant * computePointerDifference(const DataLayout &DL, Value *LHS, Value *RHS)
Compute the constant difference between two pointer values.
static Value * simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an SRem, see if we can fold the result.
static Value * simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given the operands for an FMul, see if we can fold the result.
static Value * simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Test if a pair of compares with a shared operand and 2 constants has an empty set intersection,...
static Value * simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyICmpWithDominatingAssume(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsNSW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Shl, LShr or AShr, see if we can fold the result.
static Constant * computePointerICmp(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SRem and URem.
static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT)
Does the given value dominate the specified phi node?
static Value * simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer comparison.
static Value * foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * simplifyUnaryIntrinsic(Function *F, Value *Op0, const SimplifyQuery &Q, const CallBase *Call)
This header provides classes for managing per-loop analyses.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
static const uint32_t IV[8]
Class for arbitrary precision integers.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
void setSignBit()
Set the sign bit to 1.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
unsigned countr_zero() const
Count the number of trailing zero bits.
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
bool getBoolValue() const
Convert APInt to a boolean value.
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
bool isMask(unsigned numBits) const
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isOne() const
Determine if this is a value of 1.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
an instruction to allocate memory on the stack
A container for analyses that lazily runs them and caches their results.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
An immutable pass that tracks lazily created AssumptionCache objects.
AssumptionCache & getAssumptionCache(Function &F)
Get the cached assumptions for a function.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
This class is the base class for the comparison instructions.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
bool isFPPredicate() const
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
static LLVM_ABI bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
bool isIntPredicate() const
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static LLVM_ABI Constant * getBinOpAbsorber(unsigned Opcode, Type *Ty, bool AllowLHSConstant=false)
Return the absorbing element for the given binary operation, i.e.
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static LLVM_ABI Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
static Constant * getNegativeZero(Type *Ty)
static LLVM_ABI Constant * getNaN(Type *Ty, bool Negative=false, uint64_t Payload=0)
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This class represents a range of values.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNaN() const
Return true if this is a floating-point NaN constant or a vector floating-point constant with all NaN...
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
The size in bits of the pointer representation in a given address space.
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getIndexSizeInBits(unsigned AS) const
The size in bits of indices used for address calculation in getelementptr and for addresses in the gi...
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction compares its operands according to the predicate given to the constructor.
Convenience struct for specifying and reasoning about fast-math flags.
bool noSignedZeros() const
bool allowReassoc() const
Flag queries.
Represents calls to the gc.relocate intrinsic.
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
Represents flags for the getelementptr instruction/expression.
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
This instruction compares its operands according to the predicate given to the constructor.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This instruction inserts a struct field of array element value into an aggregate value.
LLVM_ABI bool hasNoSignedZeros() const LLVM_READONLY
Determine whether the no-signed-zeros flag is set.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Pass interface - Implemented by all 'passes'.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an integer.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
size_type size() const
Determine the number of elements in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)
Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetLibraryInfo & getTLI(const Function &F)
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
This class represents zero extension of integer types.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cstfp_pred_ty< is_inf > m_Inf()
Match a positive or negative infinity FP constant.
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< cstfp_pred_ty< is_any_zero_fp >, RHS, Instruction::FSub > m_FNegNSZ(const RHS &X)
Match 'fneg X' as 'fsub +-0.0, X'.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()
Match a floating-point negative zero.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
apfloat_match m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::And, true > m_c_LogicalAnd(const LHS &L, const RHS &R)
Matches L && R with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
cstfp_pred_ty< is_nan > m_NaN()
Match an arbitrary NaN constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::Or, true > m_c_LogicalOr(const LHS &L, const RHS &R)
Matches L || R with LHS and RHS in either order.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoSignedWrap > m_NSWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebIgnore
This corresponds to "fpexcept.ignore".
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
LLVM_ABI Value * simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a AShr, fold the result or return nulll.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth=0)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I, bool AllowNonDeterministic=true)
Attempt to constant fold a floating point binary operation with the specified operands,...
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
LLVM_ABI APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
LLVM_ABI Value * simplifySDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for an SDiv, fold the result or return null.
LLVM_ABI Value * simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q)
Given operand for a UnaryOperator, fold the result or return null.
bool isDefaultFPEnvironment(fp::ExceptionBehavior EB, RoundingMode RM)
Returns true if the exception handling behavior and rounding mode match what is used in the default f...
LLVM_ABI Value * simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Mul, fold the result or return null.
LLVM_ABI bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
LLVM_ABI Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Given a callsite, callee, and arguments, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
bool canRoundingModeBe(RoundingMode RM, RoundingMode QRM)
Returns true if the rounding mode RM may be QRM at compile time or at run time.
LLVM_ABI bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
LLVM_ABI Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Constant * ConstantFoldGetElementPtr(Type *Ty, Constant *C, std::optional< ConstantRange > InRange, ArrayRef< Value * > Idxs)
LLVM_ABI CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
LLVM_ABI Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q)
Given operands for a ShuffleVectorInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
LLVM_ABI Value * simplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Or, fold the result or return null.
LLVM_ABI Value * simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Xor, fold the result or return null.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
LLVM_ABI Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
LLVM_ABI Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
unsigned M1(unsigned Val)
LLVM_ABI Value * simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
LLVM_ABI SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
LLVM_ABI bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
SelectPatternFlavor
Specific patterns of select instructions we can match.
LLVM_ABI Value * simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Shl, fold the result or return null.
LLVM_ABI Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)
Given operand for an FNeg, fold the result or return null.
LLVM_ABI Value * simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, fold the result or return null.
LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI Value * simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FRem, fold the result or return null.
LLVM_ABI Value * simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, fold the result or return null.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI Value * simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a LShr, fold the result or return null.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI bool cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is never equal to -0.0.
LLVM_ABI Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI Value * simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an And, fold the result or return null.
LLVM_ABI bool intrinsicPropagatesPoison(Intrinsic::ID IID)
Return whether this intrinsic propagates poison for all operands.
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
LLVM_ABI Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an InsertValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI Value * simplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FDiv, fold the result or return null.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
LLVM_ABI Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
LLVM_ABI Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for the multiplication of a FMA, fold the result or return null.
LLVM_ABI SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, FastMathFlags FMF=FastMathFlags(), Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
LLVM_ABI Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)
Given a constrained FP intrinsic call, tries to compute its simplified version.
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
std::optional< DecomposedBitTest > decomposeBitTest(Value *Cond, bool LookThroughTrunc=true, bool AllowNonZeroC=false, bool DecomposeAnd=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
LLVM_ABI Value * findScalarElement(Value *V, unsigned EltNo)
Given a vector and an element number, see if the scalar value is already around as a register,...
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
LLVM_ABI Value * simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for a UDiv, fold the result or return null.
DWARFExpression::Operation Op
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
LLVM_ABI Value * simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, Value *Op0, Value *Op1, const SimplifyQuery &Q, const CallBase *Call)
Given operands for a BinaryIntrinsic, fold the result or return null.
RoundingMode
Rounding mode.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
unsigned M0(unsigned Val)
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI Value * simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx, const SimplifyQuery &Q)
Given operands for an InsertElement, fold the result or return null.
constexpr unsigned BitWidth
LLVM_ABI Value * simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags=nullptr)
See if V simplifies when its operand Op is replaced with RepOp.
LLVM_ABI bool maskIsAllZeroOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
LLVM_ABI Value * simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an SRem, fold the result or return null.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue instruction with the spe...
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
LLVM_ABI Value * simplifyCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a CmpInst, fold the result or return null.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
LLVM_ABI const SimplifyQuery getBestSimplifyQuery(Pass &, Function &)
std::pair< Value *, FPClassTest > fcmpToClassTest(FCmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with...
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool isCheckForZeroAndMulWithOverflow(Value *Op0, Value *Op1, bool IsAnd, Use *&Y)
Match one of the patterns up to the select/logic op: Op0 = icmp ne i4 X, 0 Agg = call { i4,...
bool canIgnoreSNaN(fp::ExceptionBehavior EB, FastMathFlags FMF)
Returns true if the possibility of a signaling NaN can be safely ignored.
LLVM_ABI Value * simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a URem, fold the result or return null.
LLVM_ABI Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q)
Given operands for an ExtractElementInst, fold the result or return null.
LLVM_ABI Value * simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q)
Given operands for a SelectInst, fold the result or return null.
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI std::optional< bool > computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return false if we can prove that the specified FP value's sign bit is 0.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This callback is used in conjunction with PointerMayBeCaptured.
@ Continue
Continue traversal, and also follow the return value of the user if it has additional capture compone...
@ Stop
Stop the traversal.
virtual Action captured(const Use *U, UseCaptureInfo CI)=0
Use U directly captures CI.UseCC and additionally CI.ResultCC through the return value of the user of...
virtual void tooManyUses()=0
tooManyUses - The depth of traversal has breached a limit.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
bool isExact(const BinaryOperator *Op) const
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
bool hasNoSignedWrap(const InstT *Op) const
bool hasNoUnsignedWrap(const InstT *Op) const
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
bool hasConflict() const
Returns true if there is conflicting information.
unsigned getBitWidth() const
Get the bit width of this value.
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
bool isKnownAlwaysNaN() const
Return true if it's known this must always be a nan.
static constexpr FPClassTest OrderedLessThanZeroMask
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
Mode EvalMode
How we want to evaluate this object's size.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
bool CanUseUndef
Controls whether simplifications are allowed to constrain the range of possible values for uses of un...
SimplifyQuery getWithInstruction(const Instruction *I) const
LLVM_ABI bool isUndefValue(Value *V) const
If CanUseUndef is true, returns whether V is undef.
const TargetLibraryInfo * TLI
SimplifyQuery getWithoutUndef() const
Capture information for a specific Use.