36using namespace PatternMatch;
38#define DEBUG_TYPE "instcombine"
46 bool IsSigned =
false) {
49 Result = In1.
sadd_ov(In2, Overflow);
51 Result = In1.
uadd_ov(In2, Overflow);
59 bool IsSigned =
false) {
62 Result = In1.
ssub_ov(In2, Overflow);
64 Result = In1.
usub_ov(In2, Overflow);
72 for (
auto *U :
I.users())
73 if (isa<BranchInst>(U))
83 if (!ICmpInst::isSigned(Pred))
90 if (Pred == ICmpInst::ICMP_SLT) {
91 Pred = ICmpInst::ICMP_SLE;
94 }
else if (
C.isAllOnes()) {
95 if (Pred == ICmpInst::ICMP_SGT) {
96 Pred = ICmpInst::ICMP_SGE;
120 Type *GEPSrcEltTy =
GEP->getSourceElementType();
127 if (!isa<ConstantArray>(
Init) && !isa<ConstantDataArray>(
Init))
130 uint64_t ArrayElementCount =
Init->getType()->getArrayNumElements();
141 unsigned GEPIdxOp = 1;
142 if (
GEP->getSourceElementType()->isArrayTy()) {
147 if (
GEP->getNumOperands() < GEPIdxOp + 1 ||
148 isa<Constant>(
GEP->getOperand(GEPIdxOp)))
156 Type *EltTy =
Init->getType()->getArrayElementType();
157 for (
unsigned i = GEPIdxOp + 1, e =
GEP->getNumOperands(); i != e; ++i) {
163 if ((
unsigned)IdxVal != IdxVal)
166 if (
StructType *STy = dyn_cast<StructType>(EltTy))
167 EltTy = STy->getElementType(IdxVal);
168 else if (
ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
169 if (IdxVal >= ATy->getNumElements())
171 EltTy = ATy->getElementType();
184 enum { Overdefined = -3, Undefined = -2 };
193 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
197 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
205 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
214 for (
unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
220 if (!LaterIndices.
empty()) {
235 CompareRHS,
DL, &
TLI);
240 if (isa<UndefValue>(
C)) {
243 if (TrueRangeEnd == (
int)i - 1)
245 if (FalseRangeEnd == (
int)i - 1)
252 if (!isa<ConstantInt>(
C))
257 bool IsTrueForElt = !cast<ConstantInt>(
C)->isZero();
262 if (FirstTrueElement == Undefined)
263 FirstTrueElement = TrueRangeEnd = i;
266 if (SecondTrueElement == Undefined)
267 SecondTrueElement = i;
269 SecondTrueElement = Overdefined;
272 if (TrueRangeEnd == (
int)i - 1)
275 TrueRangeEnd = Overdefined;
279 if (FirstFalseElement == Undefined)
280 FirstFalseElement = FalseRangeEnd = i;
283 if (SecondFalseElement == Undefined)
284 SecondFalseElement = i;
286 SecondFalseElement = Overdefined;
289 if (FalseRangeEnd == (
int)i - 1)
292 FalseRangeEnd = Overdefined;
297 if (i < 64 && IsTrueForElt)
298 MagicBitvector |= 1ULL << i;
303 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
304 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
305 FalseRangeEnd == Overdefined)
319 unsigned ElementSize =
332 if (SecondTrueElement != Overdefined) {
335 if (FirstTrueElement == Undefined)
338 Value *FirstTrueIdx = ConstantInt::get(
Idx->getType(), FirstTrueElement);
341 if (SecondTrueElement == Undefined)
346 Value *SecondTrueIdx = ConstantInt::get(
Idx->getType(), SecondTrueElement);
348 return BinaryOperator::CreateOr(C1, C2);
353 if (SecondFalseElement != Overdefined) {
356 if (FirstFalseElement == Undefined)
359 Value *FirstFalseIdx = ConstantInt::get(
Idx->getType(), FirstFalseElement);
362 if (SecondFalseElement == Undefined)
367 Value *SecondFalseIdx =
368 ConstantInt::get(
Idx->getType(), SecondFalseElement);
370 return BinaryOperator::CreateAnd(C1, C2);
375 if (TrueRangeEnd != Overdefined) {
376 assert(TrueRangeEnd != FirstTrueElement &&
"Should emit single compare");
380 if (FirstTrueElement) {
381 Value *Offs = ConstantInt::get(
Idx->getType(), -FirstTrueElement);
386 ConstantInt::get(
Idx->getType(), TrueRangeEnd - FirstTrueElement + 1);
391 if (FalseRangeEnd != Overdefined) {
392 assert(FalseRangeEnd != FirstFalseElement &&
"Should emit single compare");
395 if (FirstFalseElement) {
396 Value *Offs = ConstantInt::get(
Idx->getType(), -FirstFalseElement);
401 ConstantInt::get(
Idx->getType(), FalseRangeEnd - FirstFalseElement);
414 if (ArrayElementCount <= Idx->
getType()->getIntegerBitWidth())
448 while (!WorkList.
empty()) {
451 while (!WorkList.
empty()) {
452 if (Explored.
size() >= 100)
462 if (!isa<GetElementPtrInst>(V) && !isa<PHINode>(V))
467 if (
auto *
GEP = dyn_cast<GEPOperator>(V)) {
469 auto IsNonConst = [](
Value *V) {
return !isa<ConstantInt>(V); };
470 if (!
GEP->isInBounds() ||
count_if(
GEP->indices(), IsNonConst) > 1)
478 if (WorkList.
back() == V) {
484 if (
auto *PN = dyn_cast<PHINode>(V)) {
486 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
494 for (
auto *PN : PHIs)
495 for (
Value *
Op : PN->incoming_values())
503 for (
Value *Val : Explored) {
506 auto *
PHI = dyn_cast<PHINode>(
Use);
507 auto *Inst = dyn_cast<Instruction>(Val);
509 if (Inst ==
Base || Inst ==
PHI || !Inst || !
PHI ||
513 if (
PHI->getParent() == Inst->getParent())
523 bool Before =
true) {
524 if (
auto *
PHI = dyn_cast<PHINode>(V)) {
529 if (
auto *
I = dyn_cast<Instruction>(V)) {
531 I = &*std::next(
I->getIterator());
535 if (
auto *
A = dyn_cast<Argument>(V)) {
537 BasicBlock &Entry =
A->getParent()->getEntryBlock();
543 assert(isa<Constant>(V) &&
"Setting insertion point for unknown value!");
560 Base->getContext(),
DL.getIndexTypeSizeInBits(Start->getType()));
566 for (
Value *Val : Explored) {
571 if (
auto *
PHI = dyn_cast<PHINode>(Val))
574 PHI->getName() +
".idx",
PHI->getIterator());
579 for (
Value *Val : Explored) {
583 if (
auto *
GEP = dyn_cast<GEPOperator>(Val)) {
587 if (isa<ConstantInt>(
Op) && cast<ConstantInt>(
Op)->
isZero())
588 NewInsts[
GEP] = OffsetV;
591 Op, OffsetV,
GEP->getOperand(0)->getName() +
".add",
596 if (isa<PHINode>(Val))
603 for (
Value *Val : Explored) {
608 if (
auto *
PHI = dyn_cast<PHINode>(Val)) {
610 for (
unsigned I = 0, E =
PHI->getNumIncomingValues();
I < E; ++
I) {
611 Value *NewIncoming =
PHI->getIncomingValue(
I);
613 auto It = NewInsts.
find(NewIncoming);
614 if (It != NewInsts.
end())
615 NewIncoming = It->second;
622 for (
Value *Val : Explored) {
629 Val->getName() +
".ptr", NW);
636 return NewInsts[Start];
698 if (!isa<GetElementPtrInst>(
RHS))
726 EmitGEPOffsets(
Base.LHSGEPs,
Base.LHSNW, IdxTy,
true);
732 isa<Constant>(
RHS) && cast<Constant>(
RHS)->isNullValue() &&
754 auto EC = cast<VectorType>(GEPLHS->
getType())->getElementCount();
759 cast<Constant>(
RHS),
Base->getType()));
765 if (GEPLHS->
getOperand(0) != GEPRHS->getOperand(0)) {
766 bool IndicesTheSame =
769 GEPRHS->getPointerOperand()->getType() &&
773 if (GEPLHS->
getOperand(i) != GEPRHS->getOperand(i)) {
774 IndicesTheSame =
false;
780 if (IndicesTheSame &&
788 if (GEPLHS->
isInBounds() && GEPRHS->isInBounds() &&
790 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
794 Value *LOffset = EmitGEPOffset(GEPLHS);
795 Value *ROffset = EmitGEPOffset(GEPRHS);
802 if (LHSIndexTy != RHSIndexTy) {
816 if (GEPLHS->
getOperand(0) == GEPRHS->getOperand(0) &&
820 unsigned NumDifferences = 0;
821 unsigned DiffOperand = 0;
822 for (
unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
823 if (GEPLHS->
getOperand(i) != GEPRHS->getOperand(i)) {
825 Type *RHSType = GEPRHS->getOperand(i)->getType();
836 if (NumDifferences++)
841 if (NumDifferences == 0)
849 Value *RHSV = GEPRHS->getOperand(DiffOperand);
850 return NewICmp(NW, LHSV, RHSV);
858 EmitGEPOffsets(
Base.LHSGEPs,
Base.LHSNW, IdxTy,
true);
860 EmitGEPOffsets(
Base.RHSGEPs,
Base.RHSNW, IdxTy,
true);
861 return NewICmp(
Base.LHSNW &
Base.RHSNW, L, R);
887 bool Captured =
false;
892 CmpCaptureTracker(
AllocaInst *Alloca) : Alloca(Alloca) {}
894 void tooManyUses()
override { Captured =
true; }
898 auto *ICmp = dyn_cast<ICmpInst>(U->getUser());
906 ICmps[ICmp] |= 1u << U->getOperandNo();
915 CmpCaptureTracker Tracker(Alloca);
917 if (Tracker.Captured)
920 bool Changed =
false;
921 for (
auto [ICmp,
Operands] : Tracker.ICmps) {
927 auto *Res = ConstantInt::get(ICmp->getType(),
953 assert(!!
C &&
"C should not be zero!");
969 ConstantInt::get(
X->getType(), -
C));
981 ConstantInt::get(
X->getType(),
SMax -
C));
992 ConstantInt::get(
X->getType(),
SMax - (
C - 1)));
1001 assert(
I.isEquality() &&
"Cannot fold icmp gt/lt");
1004 if (
I.getPredicate() ==
I.ICMP_NE)
1013 bool IsAShr = isa<AShrOperator>(
I.getOperand(0));
1025 return getICmp(
I.ICMP_UGT,
A,
1026 ConstantInt::get(
A->getType(), AP2.
logBase2()));
1038 if (IsAShr && AP1 == AP2.
ashr(Shift)) {
1042 return getICmp(
I.ICMP_UGE,
A, ConstantInt::get(
A->getType(), Shift));
1043 return getICmp(
I.ICMP_EQ,
A, ConstantInt::get(
A->getType(), Shift));
1044 }
else if (AP1 == AP2.
lshr(Shift)) {
1045 return getICmp(
I.ICMP_EQ,
A, ConstantInt::get(
A->getType(), Shift));
1051 auto *TorF = ConstantInt::get(
I.getType(),
I.getPredicate() ==
I.ICMP_NE);
1060 assert(
I.isEquality() &&
"Cannot fold icmp gt/lt");
1063 if (
I.getPredicate() ==
I.ICMP_NE)
1074 if (!AP1 && AP2TrailingZeros != 0)
1077 ConstantInt::get(
A->getType(), AP2.
getBitWidth() - AP2TrailingZeros));
1085 if (Shift > 0 && AP2.
shl(Shift) == AP1)
1086 return getICmp(
I.ICMP_EQ,
A, ConstantInt::get(
A->getType(), Shift));
1090 auto *TorF = ConstantInt::get(
I.getType(),
I.getPredicate() ==
I.ICMP_NE);
1111 Instruction *AddWithCst = cast<Instruction>(
I.getOperand(0));
1119 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1143 if (U == AddWithCst)
1161 I.getModule(), Intrinsic::sadd_with_overflow, NewType);
1190 if (!
I.isEquality())
1221 APInt(XBitWidth, XBitWidth - 1))))
1223 }
else if (isa<BinaryOperator>(Val) &&
1248 return new ICmpInst(Pred,
B, Cmp.getOperand(1));
1250 return new ICmpInst(Pred,
A, Cmp.getOperand(1));
1267 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
1279 return new ICmpInst(Pred,
Y, Cmp.getOperand(1));
1285 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
1287 auto *BO0 = cast<OverflowingBinaryOperator>(Cmp.getOperand(0));
1288 if (BO0->hasNoUnsignedWrap() || BO0->hasNoSignedWrap()) {
1296 return new ICmpInst(Pred,
Y, Cmp.getOperand(1));
1301 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
1317 return new ICmpInst(Pred, Stripped,
1330 const APInt *Mask, *Neg;
1349 return new ICmpInst(Pred, NewAnd, Zero);
1370 Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
1383 if (
auto *Phi = dyn_cast<PHINode>(Op0))
1384 if (
all_of(Phi->operands(), [](
Value *V) { return isa<Constant>(V); })) {
1386 for (
Value *V : Phi->incoming_values()) {
1395 for (
auto [V, Pred] :
zip(Ops, Phi->blocks()))
1410 Value *
X = Cmp.getOperand(0), *
Y = Cmp.getOperand(1);
1443 if (Cmp.isEquality() || (IsSignBit &&
hasBranchUse(Cmp)))
1448 if (Cmp.hasOneUse() &&
1462 if (!
match(BI->getCondition(),
1468 if (
auto *V = handleDomCond(DomPred, DomC))
1488 Type *SrcTy =
X->getType();
1494 if (shouldChangeType(Trunc->
getType(), SrcTy)) {
1496 return new ICmpInst(Pred,
X, ConstantInt::get(SrcTy,
C.sext(SrcBits)));
1498 return new ICmpInst(Pred,
X, ConstantInt::get(SrcTy,
C.zext(SrcBits)));
1501 if (
C.isOne() &&
C.getBitWidth() > 1) {
1506 ConstantInt::get(V->getType(), 1));
1516 auto NewPred = (Pred == Cmp.ICMP_EQ) ? Cmp.ICMP_UGE : Cmp.ICMP_ULT;
1517 return new ICmpInst(NewPred,
Y, ConstantInt::get(SrcTy, DstBits));
1522 return new ICmpInst(Pred,
Y, ConstantInt::get(SrcTy,
C.logBase2()));
1525 if (Cmp.isEquality() && Trunc->
hasOneUse()) {
1528 if (!SrcTy->
isVectorTy() && shouldChangeType(DstBits, SrcBits)) {
1532 Constant *WideC = ConstantInt::get(SrcTy,
C.zext(SrcBits));
1541 if ((Known.
Zero | Known.
One).countl_one() >= SrcBits - DstBits) {
1543 APInt NewRHS =
C.zext(SrcBits);
1545 return new ICmpInst(Pred,
X, ConstantInt::get(SrcTy, NewRHS));
1557 DstBits == SrcBits - ShAmt) {
1574 bool YIsSExt =
false;
1577 unsigned NoWrapFlags = cast<TruncInst>(Cmp.getOperand(0))->getNoWrapKind() &
1578 cast<TruncInst>(Cmp.getOperand(1))->getNoWrapKind();
1579 if (Cmp.isSigned()) {
1590 if (
X->getType() !=
Y->getType() &&
1591 (!Cmp.getOperand(0)->hasOneUse() || !Cmp.getOperand(1)->hasOneUse()))
1593 if (!isDesirableIntType(
X->getType()->getScalarSizeInBits()) &&
1594 isDesirableIntType(
Y->getType()->getScalarSizeInBits())) {
1596 Pred = Cmp.getSwappedPredicate(Pred);
1601 else if (!Cmp.isSigned() &&
1611 isa<SExtInst>(Cmp.getOperand(0)) || isa<SExtInst>(Cmp.getOperand(1));
1615 Type *TruncTy = Cmp.getOperand(0)->getType();
1620 if (isDesirableIntType(TruncBits) &&
1621 !isDesirableIntType(
X->getType()->getScalarSizeInBits()))
1644 bool TrueIfSigned =
false;
1661 if (
Xor->hasOneUse()) {
1663 if (!Cmp.isEquality() && XorC->
isSignMask()) {
1664 Pred = Cmp.getFlippedSignednessPredicate();
1665 return new ICmpInst(Pred,
X, ConstantInt::get(
X->getType(),
C ^ *XorC));
1670 Pred = Cmp.getFlippedSignednessPredicate();
1671 Pred = Cmp.getSwappedPredicate(Pred);
1672 return new ICmpInst(Pred,
X, ConstantInt::get(
X->getType(),
C ^ *XorC));
1679 if (*XorC == ~
C && (
C + 1).isPowerOf2())
1682 if (*XorC ==
C && (
C + 1).isPowerOf2())
1687 if (*XorC == -
C &&
C.isPowerOf2())
1689 ConstantInt::get(
X->getType(), ~
C));
1691 if (*XorC ==
C && (-
C).isPowerOf2())
1693 ConstantInt::get(
X->getType(), ~
C));
1715 const APInt *ShiftC;
1720 Type *XType =
X->getType();
1726 return new ICmpInst(Pred,
Add, ConstantInt::get(XType, Bound));
1735 if (!Shift || !Shift->
isShift())
1743 unsigned ShiftOpcode = Shift->
getOpcode();
1744 bool IsShl = ShiftOpcode == Instruction::Shl;
1747 APInt NewAndCst, NewCmpCst;
1748 bool AnyCmpCstBitsShiftedOut;
1749 if (ShiftOpcode == Instruction::Shl) {
1757 NewCmpCst = C1.
lshr(*C3);
1758 NewAndCst = C2.
lshr(*C3);
1759 AnyCmpCstBitsShiftedOut = NewCmpCst.
shl(*C3) != C1;
1760 }
else if (ShiftOpcode == Instruction::LShr) {
1765 NewCmpCst = C1.
shl(*C3);
1766 NewAndCst = C2.
shl(*C3);
1767 AnyCmpCstBitsShiftedOut = NewCmpCst.
lshr(*C3) != C1;
1773 assert(ShiftOpcode == Instruction::AShr &&
"Unknown shift opcode");
1774 NewCmpCst = C1.
shl(*C3);
1775 NewAndCst = C2.
shl(*C3);
1776 AnyCmpCstBitsShiftedOut = NewCmpCst.
ashr(*C3) != C1;
1777 if (NewAndCst.
ashr(*C3) != C2)
1781 if (AnyCmpCstBitsShiftedOut) {
1791 Shift->
getOperand(0), ConstantInt::get(
And->getType(), NewAndCst));
1792 return new ICmpInst(Cmp.getPredicate(), NewAnd,
1793 ConstantInt::get(
And->getType(), NewCmpCst));
1810 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1825 if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.
isZero() &&
1827 return new TruncInst(
And->getOperand(0), Cmp.getType());
1838 ConstantInt::get(
X->getType(), ~*C2));
1843 ConstantInt::get(
X->getType(), -*C2));
1846 if (!
And->hasOneUse())
1849 if (Cmp.isEquality() && C1.
isZero()) {
1867 Constant *NegBOC = ConstantInt::get(
And->getType(), -NewC2);
1869 return new ICmpInst(NewPred,
X, NegBOC);
1887 if (!Cmp.getType()->isVectorTy()) {
1888 Type *WideType = W->getType();
1890 Constant *ZextC1 = ConstantInt::get(WideType, C1.
zext(WideScalarBits));
1891 Constant *ZextC2 = ConstantInt::get(WideType, C2->
zext(WideScalarBits));
1893 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1904 if (!Cmp.isSigned() && C1.
isZero() &&
And->getOperand(0)->hasOneUse() &&
1906 Constant *One = cast<Constant>(
And->getOperand(1));
1911 unsigned UsesRemoved = 0;
1912 if (
And->hasOneUse())
1914 if (
Or->hasOneUse())
1921 if (UsesRemoved >= RequireUsesRemoved) {
1925 One,
Or->getName());
1927 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1941 if (!Cmp.getParent()->getParent()->hasFnAttribute(
1942 Attribute::NoImplicitFloat) &&
1945 Type *FPType = V->getType()->getScalarType();
1946 if (FPType->isIEEELikeFPTy() && (C1.
isZero() || C1 == *C2)) {
1947 APInt ExponentMask =
1949 if (*C2 == ExponentMask) {
1950 unsigned Mask = C1.
isZero()
1984 Constant *MinSignedC = ConstantInt::get(
1988 return new ICmpInst(NewPred,
X, MinSignedC);
1997 if (
auto *C2 = dyn_cast<ConstantInt>(
Y))
1998 if (
auto *LI = dyn_cast<LoadInst>(
X))
1999 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
2000 if (
auto *GV = dyn_cast<GlobalVariable>(
GEP->getOperand(0)))
2005 if (!Cmp.isEquality())
2011 if (Cmp.getOperand(1) ==
Y &&
C.isNegatedPowerOf2()) {
2014 return new ICmpInst(NewPred,
X,
SubOne(cast<Constant>(Cmp.getOperand(1))));
2022 X->getType()->isIntOrIntVectorTy(1) && (
C.isZero() ||
C.isOne())) {
2028 return BinaryOperator::CreateAnd(TruncY,
X);
2046 const APInt *Addend, *Msk;
2050 APInt NewComperand = (
C - *Addend) & *Msk;
2053 ConstantInt::get(MaskA->
getType(), NewComperand));
2075 while (!WorkList.
empty()) {
2076 auto MatchOrOperatorArgument = [&](
Value *OrOperatorArgument) {
2079 if (
match(OrOperatorArgument,
2085 if (
match(OrOperatorArgument,
2095 Value *OrOperatorLhs, *OrOperatorRhs;
2097 if (!
match(CurrentValue,
2102 MatchOrOperatorArgument(OrOperatorRhs);
2103 MatchOrOperatorArgument(OrOperatorLhs);
2109 CmpValues.
rbegin()->second);
2111 for (
auto It = CmpValues.
rbegin() + 1; It != CmpValues.
rend(); ++It) {
2113 LhsCmp = Builder.
CreateBinOp(BOpc, LhsCmp, RhsCmp);
2129 ConstantInt::get(V->getType(), 1));
2132 Value *OrOp0 =
Or->getOperand(0), *OrOp1 =
Or->getOperand(1);
2137 cast<PossiblyDisjointInst>(
Or)->isDisjoint()) {
2140 return new ICmpInst(Pred, OrOp0, NewC);
2144 if (
match(OrOp1,
m_APInt(MaskC)) && Cmp.isEquality()) {
2145 if (*MaskC ==
C && (
C + 1).isPowerOf2()) {
2150 return new ICmpInst(Pred, OrOp0, OrOp1);
2157 if (
Or->hasOneUse()) {
2159 Constant *NewC = ConstantInt::get(
Or->getType(),
C ^ (*MaskC));
2171 Constant *NewC = ConstantInt::get(
X->getType(), TrueIfSigned ? 1 : 0);
2199 if (!Cmp.isEquality() || !
C.isZero() || !
Or->hasOneUse())
2231 if (Cmp.isEquality() &&
C.isZero() &&
X ==
Mul->getOperand(1) &&
2232 (
Mul->hasNoUnsignedWrap() ||
Mul->hasNoSignedWrap()))
2254 if (Cmp.isEquality()) {
2256 if (
Mul->hasNoSignedWrap() &&
C.srem(*MulC).isZero()) {
2257 Constant *NewC = ConstantInt::get(MulTy,
C.sdiv(*MulC));
2265 if (
C.urem(*MulC).isZero()) {
2268 if ((*MulC & 1).isOne() ||
Mul->hasNoUnsignedWrap()) {
2269 Constant *NewC = ConstantInt::get(MulTy,
C.udiv(*MulC));
2282 if (
C.isMinSignedValue() && MulC->
isAllOnes())
2288 NewC = ConstantInt::get(
2292 "Unexpected predicate");
2293 NewC = ConstantInt::get(
2298 NewC = ConstantInt::get(
2302 "Unexpected predicate");
2303 NewC = ConstantInt::get(
2308 return NewC ?
new ICmpInst(Pred,
X, NewC) :
nullptr;
2320 unsigned TypeBits =
C.getBitWidth();
2322 if (Cmp.isUnsigned()) {
2342 return new ICmpInst(Pred,
Y, ConstantInt::get(ShiftType, CLog2));
2343 }
else if (Cmp.isSigned() && C2->
isOne()) {
2344 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
2365 const APInt *ShiftVal;
2395 const APInt *ShiftAmt;
2401 unsigned TypeBits =
C.getBitWidth();
2402 if (ShiftAmt->
uge(TypeBits))
2414 APInt ShiftedC =
C.ashr(*ShiftAmt);
2415 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2418 C.ashr(*ShiftAmt).shl(*ShiftAmt) ==
C) {
2419 APInt ShiftedC =
C.ashr(*ShiftAmt);
2420 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2427 assert(!
C.isMinSignedValue() &&
"Unexpected icmp slt");
2428 APInt ShiftedC = (
C - 1).ashr(*ShiftAmt) + 1;
2429 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2439 APInt ShiftedC =
C.lshr(*ShiftAmt);
2440 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2443 C.lshr(*ShiftAmt).shl(*ShiftAmt) ==
C) {
2444 APInt ShiftedC =
C.lshr(*ShiftAmt);
2445 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2452 assert(
C.ugt(0) &&
"ult 0 should have been eliminated");
2453 APInt ShiftedC = (
C - 1).lshr(*ShiftAmt) + 1;
2454 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2458 if (Cmp.isEquality() && Shl->
hasOneUse()) {
2464 Constant *LShrC = ConstantInt::get(ShType,
C.lshr(*ShiftAmt));
2469 bool TrueIfSigned =
false;
2481 if (Cmp.isUnsigned() && Shl->
hasOneUse()) {
2483 if ((
C + 1).isPowerOf2() &&
2491 if (
C.isPowerOf2() &&
2521 Pred, ConstantInt::get(ShType->
getContext(),
C))) {
2522 CmpPred = FlippedStrictness->first;
2523 RHSC = cast<ConstantInt>(FlippedStrictness->second)->getValue();
2530 ConstantInt::get(TruncTy, RHSC.
ashr(*ShiftAmt).
trunc(TypeBits - Amt));
2549 if (Cmp.isEquality() && Shr->
isExact() &&
C.isZero())
2550 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
2552 bool IsAShr = Shr->
getOpcode() == Instruction::AShr;
2553 const APInt *ShiftValC;
2555 if (Cmp.isEquality())
2573 assert(ShiftValC->
uge(
C) &&
"Expected simplify of compare");
2574 assert((IsUGT || !
C.isZero()) &&
"Expected X u< 0 to simplify");
2576 unsigned CmpLZ = IsUGT ?
C.countl_zero() : (
C - 1).
countl_zero();
2584 const APInt *ShiftAmtC;
2590 unsigned TypeBits =
C.getBitWidth();
2592 if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2595 bool IsExact = Shr->
isExact();
2603 (
C - 1).isPowerOf2() &&
C.countLeadingZeros() > ShAmtVal) {
2609 APInt ShiftedC = (
C - 1).shl(ShAmtVal) + 1;
2610 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2616 APInt ShiftedC =
C.shl(ShAmtVal);
2617 if (ShiftedC.
ashr(ShAmtVal) ==
C)
2618 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2622 APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
2623 if (!
C.isMaxSignedValue() && !(
C + 1).shl(ShAmtVal).isMinSignedValue() &&
2624 (ShiftedC + 1).ashr(ShAmtVal) == (
C + 1))
2625 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2631 APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
2632 if ((ShiftedC + 1).ashr(ShAmtVal) == (
C + 1) ||
2633 (
C + 1).shl(ShAmtVal).isMinSignedValue())
2634 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2641 if (
C.getBitWidth() > 2 &&
C.getNumSignBits() <= ShAmtVal) {
2651 }
else if (!IsAShr) {
2655 APInt ShiftedC =
C.shl(ShAmtVal);
2656 if (ShiftedC.
lshr(ShAmtVal) ==
C)
2657 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2661 APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
2662 if ((ShiftedC + 1).lshr(ShAmtVal) == (
C + 1))
2663 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2667 if (!Cmp.isEquality())
2675 assert(((IsAShr &&
C.shl(ShAmtVal).ashr(ShAmtVal) ==
C) ||
2676 (!IsAShr &&
C.shl(ShAmtVal).lshr(ShAmtVal) ==
C)) &&
2677 "Expected icmp+shr simplify did not occur.");
2682 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy,
C << ShAmtVal));
2688 ConstantInt::get(ShrTy, (
C + 1).shl(ShAmtVal)));
2691 ConstantInt::get(ShrTy, (
C + 1).shl(ShAmtVal) - 1));
2698 Constant *Mask = ConstantInt::get(ShrTy, Val);
2700 return new ICmpInst(Pred,
And, ConstantInt::get(ShrTy,
C << ShAmtVal));
2717 const APInt *DivisorC;
2724 "ult X, 0 should have been simplified already.");
2730 "srem X, 0 should have been simplified already.");
2731 if (!NormalizedC.
uge(DivisorC->
abs() - 1))
2754 const APInt *DivisorC;
2763 !
C.isStrictlyPositive()))
2769 Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
2773 return new ICmpInst(Pred,
And, ConstantInt::get(Ty,
C));
2800 assert(*C2 != 0 &&
"udiv 0, X should have been simplified already.");
2805 "icmp ugt X, UINT_MAX should have been simplified already.");
2807 ConstantInt::get(Ty, C2->
udiv(
C + 1)));
2812 assert(
C != 0 &&
"icmp ult X, 0 should have been simplified already.");
2814 ConstantInt::get(Ty, C2->
udiv(
C)));
2828 bool DivIsSigned = Div->
getOpcode() == Instruction::SDiv;
2838 if (Cmp.isEquality() && Div->
hasOneUse() &&
C.isSignBitSet() &&
2839 (!DivIsSigned ||
C.isMinSignedValue())) {
2864 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2883 bool ProdOV = (DivIsSigned ? Prod.
sdiv(*C2) : Prod.
udiv(*C2)) !=
C;
2896 int LoOverflow = 0, HiOverflow = 0;
2897 APInt LoBound, HiBound;
2902 HiOverflow = LoOverflow = ProdOV;
2911 LoBound = -(RangeSize - 1);
2912 HiBound = RangeSize;
2913 }
else if (
C.isStrictlyPositive()) {
2915 HiOverflow = LoOverflow = ProdOV;
2921 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2923 APInt DivNeg = -RangeSize;
2924 LoOverflow =
addWithOverflow(LoBound, HiBound, DivNeg,
true) ? -1 : 0;
2932 LoBound = RangeSize + 1;
2933 HiBound = -RangeSize;
2934 if (HiBound == *C2) {
2938 }
else if (
C.isStrictlyPositive()) {
2941 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2947 LoOverflow = HiOverflow = ProdOV;
2960 if (LoOverflow && HiOverflow)
2964 X, ConstantInt::get(Ty, LoBound));
2967 X, ConstantInt::get(Ty, HiBound));
2971 if (LoOverflow && HiOverflow)
2975 X, ConstantInt::get(Ty, LoBound));
2978 X, ConstantInt::get(Ty, HiBound));
2983 if (LoOverflow == +1)
2985 if (LoOverflow == -1)
2987 return new ICmpInst(Pred,
X, ConstantInt::get(Ty, LoBound));
2990 if (HiOverflow == +1)
2992 if (HiOverflow == -1)
3022 bool HasNSW =
Sub->hasNoSignedWrap();
3023 bool HasNUW =
Sub->hasNoUnsignedWrap();
3025 ((Cmp.isUnsigned() && HasNUW) || (Cmp.isSigned() && HasNSW)) &&
3027 return new ICmpInst(SwappedPred,
Y, ConstantInt::get(Ty, SubResult));
3035 if (Cmp.isEquality() &&
C.isZero() &&
3036 none_of((
Sub->users()), [](
const User *U) { return isa<PHINode>(U); }))
3044 if (!
Sub->hasOneUse())
3047 if (
Sub->hasNoSignedWrap()) {
3071 (*C2 & (
C - 1)) == (
C - 1))
3084 return new ICmpInst(SwappedPred,
Add, ConstantInt::get(Ty, ~
C));
3090 auto FoldConstant = [&](
bool Val) {
3094 cast<VectorType>(Op0->
getType())->getElementCount(), Res);
3098 switch (Table.to_ulong()) {
3100 return FoldConstant(
false);
3130 return FoldConstant(
true);
3145 Cmp.getType() !=
A->getType())
3148 std::bitset<4> Table;
3149 auto ComputeTable = [&](
bool First,
bool Second) -> std::optional<bool> {
3154 if (
auto *CI = dyn_cast_or_null<ConstantInt>(Val))
3157 return std::nullopt;
3160 for (
unsigned I = 0;
I < 4; ++
I) {
3161 bool First = (
I >> 1) & 1;
3162 bool Second =
I & 1;
3163 if (
auto Res = ComputeTable(
First, Second))
3191 unsigned BW =
C.getBitWidth();
3192 std::bitset<4> Table;
3193 auto ComputeTable = [&](
bool Op0Val,
bool Op1Val) {
3196 Res +=
APInt(BW, isa<ZExtInst>(Ext0) ? 1 : -1,
true);
3198 Res +=
APInt(BW, isa<ZExtInst>(Ext1) ? 1 : -1,
true);
3202 Table[0] = ComputeTable(
false,
false);
3203 Table[1] = ComputeTable(
false,
true);
3204 Table[2] = ComputeTable(
true,
false);
3205 Table[3] = ComputeTable(
true,
true);
3220 if ((
Add->hasNoSignedWrap() &&
3222 (
Add->hasNoUnsignedWrap() &&
3226 Cmp.isSigned() ?
C.ssub_ov(*C2, Overflow) :
C.usub_ov(*C2, Overflow);
3232 return new ICmpInst(Pred,
X, ConstantInt::get(Ty, NewC));
3236 C.isNonNegative() && (
C - *C2).isNonNegative() &&
3239 ConstantInt::get(Ty,
C - *C2));
3244 if (Cmp.isSigned()) {
3245 if (
Lower.isSignMask())
3247 if (
Upper.isSignMask())
3250 if (
Lower.isMinValue())
3252 if (
Upper.isMinValue())
3285 if (!
Add->hasOneUse())
3300 ConstantInt::get(Ty,
C * 2));
3315 ConstantInt::get(Ty, ~
C));
3320 Type *NewCmpTy = V->getType();
3322 if (shouldChangeType(Ty, NewCmpTy)) {
3334 ConstantInt::get(NewCmpTy, EquivInt));
3356 Value *EqualVal = SI->getTrueValue();
3357 Value *UnequalVal = SI->getFalseValue();
3380 auto FlippedStrictness =
3382 if (!FlippedStrictness)
3385 "basic correctness failure");
3386 RHS2 = FlippedStrictness->second;
3398 assert(
C &&
"Cmp RHS should be a constant int!");
3404 Value *OrigLHS, *OrigRHS;
3405 ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
3406 if (Cmp.hasOneUse() &&
3409 assert(C1LessThan && C2Equal && C3GreaterThan);
3412 C1LessThan->
getValue(),
C->getValue(), Cmp.getPredicate());
3414 Cmp.getPredicate());
3416 C3GreaterThan->
getValue(),
C->getValue(), Cmp.getPredicate());
3427 if (TrueWhenLessThan)
3433 if (TrueWhenGreaterThan)
3443 auto *Bitcast = dyn_cast<BitCastInst>(Cmp.getOperand(0));
3448 Value *Op1 = Cmp.getOperand(1);
3449 Value *BCSrcOp = Bitcast->getOperand(0);
3450 Type *SrcType = Bitcast->getSrcTy();
3451 Type *DstType = Bitcast->getType();
3471 return new ICmpInst(Pred,
X, ConstantInt::get(
X->getType(), 1));
3498 Type *XType =
X->getType();
3503 if (
auto *XVTy = dyn_cast<VectorType>(XType))
3517 if (!Cmp.getParent()->getParent()->hasFnAttribute(
3518 Attribute::NoImplicitFloat) &&
3519 Cmp.isEquality() && FPType->isIEEELikeFPTy()) {
3543 if (Cmp.isEquality() &&
C->isAllOnes() && Bitcast->hasOneUse()) {
3544 if (
Value *NotBCSrcOp =
3555 if (Cmp.isEquality() &&
C->isZero() && Bitcast->hasOneUse() &&
3557 if (
auto *VecTy = dyn_cast<FixedVectorType>(
X->getType())) {
3576 auto *VecTy = cast<VectorType>(SrcType);
3577 auto *EltTy = cast<IntegerType>(VecTy->getElementType());
3578 if (
C->isSplat(EltTy->getBitWidth())) {
3586 Value *NewC = ConstantInt::get(EltTy,
C->trunc(EltTy->getBitWidth()));
3587 return new ICmpInst(Pred, Extract, NewC);
3600 if (
auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0)))
3604 if (
auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0)))
3608 if (
auto *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1)))
3612 if (
auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0)))
3616 if (
auto *
II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)))
3623 Value *Cmp0 = Cmp.getOperand(0);
3625 if (
C->isZero() && Cmp.isEquality() && Cmp0->
hasOneUse() &&
3627 m_ExtractValue<0>(m_Intrinsic<Intrinsic::ssub_with_overflow>(
3630 m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>(
3632 return new ICmpInst(Cmp.getPredicate(),
X,
Y);
3647 if (!Cmp.isEquality())
3652 Constant *
RHS = cast<Constant>(Cmp.getOperand(1));
3656 case Instruction::SRem:
3667 case Instruction::Add: {
3671 if (
Constant *C2 = dyn_cast<Constant>(BOp1)) {
3674 }
else if (
C.isZero()) {
3677 if (
Value *NegVal = dyn_castNegVal(BOp1))
3678 return new ICmpInst(Pred, BOp0, NegVal);
3679 if (
Value *NegVal = dyn_castNegVal(BOp0))
3680 return new ICmpInst(Pred, NegVal, BOp1);
3689 return new ICmpInst(Pred, BOp0, Neg);
3694 case Instruction::Xor:
3695 if (
Constant *BOC = dyn_cast<Constant>(BOp1)) {
3699 }
else if (
C.isZero()) {
3701 return new ICmpInst(Pred, BOp0, BOp1);
3704 case Instruction::Or: {
3725 Cond->getType() == Cmp.getType()) {
3763 case Instruction::UDiv:
3764 case Instruction::SDiv:
3774 return new ICmpInst(Pred, BOp0, BOp1);
3777 Instruction::Mul, BO->
getOpcode() == Instruction::SDiv, BOp1,
3778 Cmp.getOperand(1), BO);
3782 return new ICmpInst(Pred, YC, BOp0);
3786 if (BO->
getOpcode() == Instruction::UDiv &&
C.isZero()) {
3789 return new ICmpInst(NewPred, BOp1, BOp0);
3803 "Non-ctpop intrin in ctpop fold");
3838 Type *Ty =
II->getType();
3842 switch (
II->getIntrinsicID()) {
3843 case Intrinsic::abs:
3846 if (
C.isZero() ||
C.isMinSignedValue())
3847 return new ICmpInst(Pred,
II->getArgOperand(0), ConstantInt::get(Ty,
C));
3850 case Intrinsic::bswap:
3852 return new ICmpInst(Pred,
II->getArgOperand(0),
3853 ConstantInt::get(Ty,
C.byteSwap()));
3855 case Intrinsic::bitreverse:
3857 return new ICmpInst(Pred,
II->getArgOperand(0),
3858 ConstantInt::get(Ty,
C.reverseBits()));
3860 case Intrinsic::ctlz:
3861 case Intrinsic::cttz: {
3864 return new ICmpInst(Pred,
II->getArgOperand(0),
3870 unsigned Num =
C.getLimitedValue(
BitWidth);
3872 bool IsTrailing =
II->getIntrinsicID() == Intrinsic::cttz;
3875 APInt Mask2 = IsTrailing
3879 ConstantInt::get(Ty, Mask2));
3884 case Intrinsic::ctpop: {
3887 bool IsZero =
C.isZero();
3889 return new ICmpInst(Pred,
II->getArgOperand(0),
3896 case Intrinsic::fshl:
3897 case Intrinsic::fshr:
3898 if (
II->getArgOperand(0) ==
II->getArgOperand(1)) {
3899 const APInt *RotAmtC;
3903 return new ICmpInst(Pred,
II->getArgOperand(0),
3904 II->getIntrinsicID() == Intrinsic::fshl
3905 ? ConstantInt::get(Ty,
C.rotr(*RotAmtC))
3906 : ConstantInt::get(Ty,
C.rotl(*RotAmtC)));
3910 case Intrinsic::umax:
3911 case Intrinsic::uadd_sat: {
3914 if (
C.isZero() &&
II->hasOneUse()) {
3921 case Intrinsic::ssub_sat:
3924 return new ICmpInst(Pred,
II->getArgOperand(0),
II->getArgOperand(1));
3926 case Intrinsic::usub_sat: {
3931 return new ICmpInst(NewPred,
II->getArgOperand(0),
II->getArgOperand(1));
3946 assert(Cmp.isEquality());
3949 Value *Op0 = Cmp.getOperand(0);
3950 Value *Op1 = Cmp.getOperand(1);
3951 const auto *IIOp0 = dyn_cast<IntrinsicInst>(Op0);
3952 const auto *IIOp1 = dyn_cast<IntrinsicInst>(Op1);
3953 if (!IIOp0 || !IIOp1 || IIOp0->getIntrinsicID() != IIOp1->getIntrinsicID())
3956 switch (IIOp0->getIntrinsicID()) {
3957 case Intrinsic::bswap:
3958 case Intrinsic::bitreverse:
3961 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3962 case Intrinsic::fshl:
3963 case Intrinsic::fshr: {
3966 if (IIOp0->getOperand(0) != IIOp0->getOperand(1))
3968 if (IIOp1->getOperand(0) != IIOp1->getOperand(1))
3970 if (IIOp0->getOperand(2) == IIOp1->getOperand(2))
3971 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3977 unsigned OneUses = IIOp0->hasOneUse() + IIOp1->hasOneUse();
3982 Builder.
CreateSub(IIOp0->getOperand(2), IIOp1->getOperand(2));
3984 Op0->
getType(), IIOp0->getIntrinsicID(),
3985 {IIOp0->getOperand(0), IIOp0->getOperand(0), SubAmt});
3986 return new ICmpInst(Pred, IIOp1->getOperand(0), CombinedRotate);
4003 if (
auto *
II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0))) {
4004 switch (
II->getIntrinsicID()) {
4007 case Intrinsic::fshl:
4008 case Intrinsic::fshr:
4009 if (Cmp.isEquality() &&
II->getArgOperand(0) ==
II->getArgOperand(1)) {
4011 if (
C.isZero() ||
C.isAllOnes())
4012 return new ICmpInst(Pred,
II->getArgOperand(0), Cmp.getOperand(1));
4026 case Instruction::Xor:
4030 case Instruction::And:
4034 case Instruction::Or:
4038 case Instruction::Mul:
4042 case Instruction::Shl:
4046 case Instruction::LShr:
4047 case Instruction::AShr:
4051 case Instruction::SRem:
4055 case Instruction::UDiv:
4059 case Instruction::SDiv:
4063 case Instruction::Sub:
4067 case Instruction::Add:
4091 if (!
II->hasOneUse())
4107 Value *Op0 =
II->getOperand(0);
4108 Value *Op1 =
II->getOperand(1);
4117 switch (
II->getIntrinsicID()) {
4120 "This function only works with usub_sat and uadd_sat for now!");
4121 case Intrinsic::uadd_sat:
4124 case Intrinsic::usub_sat:
4134 II->getBinaryOp(), *COp1,
II->getNoWrapKind());
4141 if (
II->getBinaryOp() == Instruction::Add)
4147 SatValCheck ? Instruction::BinaryOps::Or : Instruction::BinaryOps::And;
4149 std::optional<ConstantRange> Combination;
4150 if (CombiningOp == Instruction::BinaryOps::Or)
4162 Combination->getEquivalentICmp(EquivPred, EquivInt, EquivOffset);
4167 ConstantInt::get(Op1->
getType(), EquivInt));
4174 std::optional<ICmpInst::Predicate> NewPredicate = std::nullopt;
4179 NewPredicate = Pred;
4183 else if (
C.isAllOnes())
4191 else if (
C.isZero())
4208 if (!
C.isZero() && !
C.isAllOnes())
4219 if (
I->getIntrinsicID() == Intrinsic::scmp)
4233 switch (
II->getIntrinsicID()) {
4236 case Intrinsic::uadd_sat:
4237 case Intrinsic::usub_sat:
4242 case Intrinsic::ctpop: {
4247 case Intrinsic::scmp:
4248 case Intrinsic::ucmp:
4254 if (Cmp.isEquality())
4257 Type *Ty =
II->getType();
4259 switch (
II->getIntrinsicID()) {
4260 case Intrinsic::ctpop: {
4272 case Intrinsic::ctlz: {
4275 unsigned Num =
C.getLimitedValue();
4278 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4283 unsigned Num =
C.getLimitedValue();
4286 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4290 case Intrinsic::cttz: {
4292 if (!
II->hasOneUse())
4312 case Intrinsic::ssub_sat:
4316 return new ICmpInst(Pred,
II->getArgOperand(0),
II->getArgOperand(1));
4320 II->getArgOperand(1));
4324 II->getArgOperand(1));
4336 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
4337 Constant *RHSC = dyn_cast<Constant>(Op1);
4343 case Instruction::IntToPtr:
4352 case Instruction::Load:
4355 dyn_cast<GetElementPtrInst>(LHSI->
getOperand(0)))
4370 auto SimplifyOp = [&](
Value *
Op,
bool SelectCondIsTrue) ->
Value * {
4374 SI->getCondition(), Pred,
Op,
RHS,
DL, SelectCondIsTrue))
4375 return ConstantInt::get(
I.getType(), *Impl);
4380 Value *Op1 = SimplifyOp(SI->getOperand(1),
true);
4382 CI = dyn_cast<ConstantInt>(Op1);
4384 Value *Op2 = SimplifyOp(SI->getOperand(2),
false);
4386 CI = dyn_cast<ConstantInt>(Op2);
4388 auto Simplifies = [&](
Value *
Op,
unsigned Idx) {
4392 (isa<CmpIntrinsic>(SI->getOperand(
Idx)) &&
4403 bool Transform =
false;
4406 else if (Simplifies(Op1, 1) || Simplifies(Op2, 2)) {
4408 if (SI->hasOneUse())
4411 else if (CI && !CI->
isZero())
4430 unsigned Depth = 0) {
4433 if (V->getType()->getScalarSizeInBits() == 1)
4441 switch (
I->getOpcode()) {
4442 case Instruction::ZExt:
4445 case Instruction::SExt:
4449 case Instruction::And:
4450 case Instruction::Or:
4457 case Instruction::Xor:
4467 case Instruction::Select:
4471 case Instruction::Shl:
4474 case Instruction::LShr:
4477 case Instruction::AShr:
4481 case Instruction::Add:
4487 case Instruction::Sub:
4493 case Instruction::Call: {
4494 if (
auto *
II = dyn_cast<IntrinsicInst>(
I)) {
4495 switch (
II->getIntrinsicID()) {
4498 case Intrinsic::umax:
4499 case Intrinsic::smax:
4500 case Intrinsic::umin:
4501 case Intrinsic::smin:
4506 case Intrinsic::bitreverse:
4596 auto IsLowBitMask = [&]() {
4614 auto Check = [&]() {
4632 auto Check = [&]() {
4651 if (!IsLowBitMask())
4670 const APInt *C0, *C1;
4687 const APInt &MaskedBits = *C0;
4688 assert(MaskedBits != 0 &&
"shift by zero should be folded away already.");
4709 auto *XType =
X->getType();
4710 const unsigned XBitWidth = XType->getScalarSizeInBits();
4712 assert(
BitWidth.ugt(MaskedBits) &&
"shifts should leave some bits untouched");
4743 !
I.getOperand(0)->hasOneUse())
4768 assert(NarrowestTy ==
I.getOperand(0)->getType() &&
4769 "We did not look past any shifts while matching XShift though.");
4770 bool HadTrunc = WidestTy !=
I.getOperand(0)->getType();
4777 auto XShiftOpcode = XShift->
getOpcode();
4778 if (XShiftOpcode == YShift->
getOpcode())
4781 Value *
X, *XShAmt, *
Y, *YShAmt;
4788 if (!isa<Constant>(
X) && !isa<Constant>(
Y)) {
4790 if (!
match(
I.getOperand(0),
4816 unsigned MaximalPossibleTotalShiftAmount =
4819 APInt MaximalRepresentableShiftAmount =
4821 if (MaximalRepresentableShiftAmount.
ult(MaximalPossibleTotalShiftAmount))
4825 auto *NewShAmt = dyn_cast_or_null<Constant>(
4830 if (NewShAmt->getType() != WidestTy) {
4840 if (!
match(NewShAmt,
4842 APInt(WidestBitWidth, WidestBitWidth))))
4847 auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
4853 ? NewShAmt->getSplatValue()
4856 if (NewShAmtSplat &&
4862 if (
auto *
C = dyn_cast<Constant>(NarrowestShift->getOperand(0))) {
4866 unsigned MaxActiveBits = Known.
getBitWidth() - MinLeadZero;
4867 if (MaxActiveBits <= 1)
4873 if (
auto *
C = dyn_cast<Constant>(WidestShift->
getOperand(0))) {
4877 unsigned MaxActiveBits = Known.
getBitWidth() - MinLeadZero;
4878 if (MaxActiveBits <= 1)
4881 if (NewShAmtSplat) {
4884 if (AdjNewShAmt.
ule(MinLeadZero))
4898 Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
4920 if (!
I.isEquality() &&
4930 NeedNegation =
false;
4933 NeedNegation =
true;
4939 if (
I.isEquality() &&
4954 bool MulHadOtherUses =
Mul && !
Mul->hasOneUse();
4955 if (MulHadOtherUses)
4959 Div->
getOpcode() == Instruction::UDiv ? Intrinsic::umul_with_overflow
4960 : Intrinsic::smul_with_overflow,
4961 X->getType(), {X, Y},
nullptr,
"mul");
4966 if (MulHadOtherUses)
4975 if (MulHadOtherUses)
5001 Type *Ty =
X->getType();
5015 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
A;
5077 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
A;
5099 Op1, !isa<Constant>(Op1) && !Op1->hasNUsesOrMore(3), &IC.
Builder))
5112 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
A;
5128 return new ICmpInst(PredOut, Op0, Op1);
5148 return new ICmpInst(NewPred, Op0, Const);
5160 if (!
C.isPowerOf2())
5173 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5241 return new ICmpInst(NewPred, Op1, Zero);
5250 return new ICmpInst(NewPred, Op0, Zero);
5254 bool NoOp0WrapProblem =
false, NoOp1WrapProblem =
false;
5255 bool Op0HasNUW =
false, Op1HasNUW =
false;
5256 bool Op0HasNSW =
false, Op1HasNSW =
false;
5260 bool &HasNSW,
bool &HasNUW) ->
bool {
5261 if (isa<OverflowingBinaryOperator>(BO)) {
5267 }
else if (BO.
getOpcode() == Instruction::Or) {
5275 Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr;
5279 NoOp0WrapProblem = hasNoWrapProblem(*BO0, Pred, Op0HasNSW, Op0HasNUW);
5283 NoOp1WrapProblem = hasNoWrapProblem(*BO1, Pred, Op1HasNSW, Op1HasNUW);
5288 if ((
A == Op1 ||
B == Op1) && NoOp0WrapProblem)
5294 if ((
C == Op0 ||
D == Op0) && NoOp1WrapProblem)
5299 if (
A &&
C && (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D) && NoOp0WrapProblem &&
5307 }
else if (
A ==
D) {
5311 }
else if (
B ==
C) {
5328 bool IsNegative) ->
bool {
5329 const APInt *OffsetC;
5341 if (!
C.isStrictlyPositive())
5362 if (
A && NoOp0WrapProblem &&
5363 ShareCommonDivisor(
A, Op1,
B,
5374 if (
C && NoOp1WrapProblem &&
5375 ShareCommonDivisor(Op0,
C,
D,
5388 if (
A &&
C && NoOp0WrapProblem && NoOp1WrapProblem &&
5390 const APInt *AP1, *AP2;
5398 if (AP1Abs.
uge(AP2Abs)) {
5399 APInt Diff = *AP1 - *AP2;
5402 A, C3,
"", Op0HasNUW && Diff.
ule(*AP1), Op0HasNSW);
5405 APInt Diff = *AP2 - *AP1;
5408 C, C3,
"", Op1HasNUW && Diff.
ule(*AP2), Op1HasNSW);
5427 if (BO0 && BO0->
getOpcode() == Instruction::Sub) {
5431 if (BO1 && BO1->
getOpcode() == Instruction::Sub) {
5437 if (
A == Op1 && NoOp0WrapProblem)
5440 if (
C == Op0 && NoOp1WrapProblem)
5460 if (
B &&
D &&
B ==
D && NoOp0WrapProblem && NoOp1WrapProblem)
5464 if (
A &&
C &&
A ==
C && NoOp0WrapProblem && NoOp1WrapProblem)
5471 if (
Constant *RHSC = dyn_cast<Constant>(Op1))
5472 if (RHSC->isNotMinSignedValue())
5473 return new ICmpInst(
I.getSwappedPredicate(),
X,
5491 if (Op0HasNSW && Op1HasNSW) {
5504 if (GreaterThan &&
match(GreaterThan,
m_One()))
5511 if (((Op0HasNSW && Op1HasNSW) || (Op0HasNUW && Op1HasNUW)) &&
5523 if (NonZero && BO0 && BO1 && Op0HasNSW && Op1HasNSW)
5530 if (NonZero && BO0 && BO1 && Op0HasNUW && Op1HasNUW)
5541 else if (BO1 && BO1->
getOpcode() == Instruction::SRem &&
5571 case Instruction::Add:
5572 case Instruction::Sub:
5573 case Instruction::Xor: {
5580 if (
C->isSignMask()) {
5586 if (BO0->
getOpcode() == Instruction::Xor &&
C->isMaxSignedValue()) {
5588 NewPred =
I.getSwappedPredicate(NewPred);
5594 case Instruction::Mul: {
5595 if (!
I.isEquality())
5603 if (
unsigned TZs =
C->countr_zero()) {
5609 return new ICmpInst(Pred, And1, And2);
5614 case Instruction::UDiv:
5615 case Instruction::LShr:
5620 case Instruction::SDiv:
5626 case Instruction::AShr:
5631 case Instruction::Shl: {
5632 bool NUW = Op0HasNUW && Op1HasNUW;
5633 bool NSW = Op0HasNSW && Op1HasNSW;
5636 if (!NSW &&
I.isSigned())
5700 auto IsCondKnownTrue = [](
Value *Val) -> std::optional<bool> {
5702 return std::nullopt;
5707 return std::nullopt;
5716 if (!CmpXZ.has_value() && !CmpYZ.has_value())
5718 if (!CmpXZ.has_value()) {
5724 if (CmpYZ.has_value())
5748 if (!MinMaxCmpXZ.has_value()) {
5756 if (!MinMaxCmpXZ.has_value())
5772 return FoldIntoCmpYZ();
5799 return FoldIntoCmpYZ();
5808 return FoldIntoCmpYZ();
5830 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5834 if (
I.isEquality()) {
5869 Type *Ty =
A->getType();
5872 ConstantInt::get(Ty, 2))
5874 ConstantInt::get(Ty, 1));
5881using OffsetOp = std::pair<Instruction::BinaryOps, Value *>;
5883 bool AllowRecursion) {
5889 case Instruction::Add:
5890 Offsets.emplace_back(Instruction::Sub, Inst->
getOperand(1));
5891 Offsets.emplace_back(Instruction::Sub, Inst->
getOperand(0));
5893 case Instruction::Sub:
5894 Offsets.emplace_back(Instruction::Add, Inst->
getOperand(1));
5896 case Instruction::Xor:
5897 Offsets.emplace_back(Instruction::Xor, Inst->
getOperand(1));
5898 Offsets.emplace_back(Instruction::Xor, Inst->
getOperand(0));
5900 case Instruction::Select:
5901 if (AllowRecursion) {
5918 return {OffsetKind::Invalid,
nullptr,
nullptr,
nullptr};
5921 return {OffsetKind::Value, V,
nullptr,
nullptr};
5924 return {OffsetKind::Select,
Cond, TrueV, FalseV};
5926 bool isValid()
const {
return Kind != OffsetKind::Invalid; }
5929 case OffsetKind::Invalid:
5931 case OffsetKind::Value:
5933 case OffsetKind::Select:
5946 assert(
I.isEquality() &&
"Expected an equality icmp");
5947 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5958 if (!Simplified || Simplified == V)
5967 auto ApplyOffset = [&](
Value *V,
unsigned BinOpc,
5969 if (
auto *Sel = dyn_cast<SelectInst>(V)) {
5970 if (!Sel->hasOneUse())
5972 Value *TrueVal = ApplyOffsetImpl(Sel->getTrueValue(), BinOpc,
RHS);
5975 Value *FalseVal = ApplyOffsetImpl(Sel->getFalseValue(), BinOpc,
RHS);
5980 if (
Value *Simplified = ApplyOffsetImpl(V, BinOpc,
RHS))
5985 for (
auto [BinOp,
RHS] : OffsetOps) {
5986 auto BinOpc =
static_cast<unsigned>(BinOp);
5988 auto Op0Result = ApplyOffset(Op0, BinOpc,
RHS);
5989 if (!Op0Result.isValid())
5991 auto Op1Result = ApplyOffset(Op1, BinOpc,
RHS);
5992 if (!Op1Result.isValid())
5995 Value *NewLHS = Op0Result.materialize(Builder);
5996 Value *NewRHS = Op1Result.materialize(Builder);
5997 return new ICmpInst(
I.getPredicate(), NewLHS, NewRHS);
6004 if (!
I.isEquality())
6007 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
6011 if (
A == Op1 ||
B == Op1) {
6012 Value *OtherVal =
A == Op1 ?
B :
A;
6040 Value *OtherVal =
A == Op0 ?
B :
A;
6047 Value *
X =
nullptr, *
Y =
nullptr, *Z =
nullptr;
6053 }
else if (
A ==
D) {
6057 }
else if (
B ==
C) {
6061 }
else if (
B ==
D) {
6071 const APInt *C0, *C1;
6073 (*C0 ^ *C1).isNegatedPowerOf2();
6079 int(Op0->
hasOneUse()) + int(Op1->hasOneUse()) +
6081 if (XorIsNegP2 || UseCnt >= 2) {
6104 (Op0->
hasOneUse() || Op1->hasOneUse())) {
6109 MaskC->
countr_one() ==
A->getType()->getScalarSizeInBits())
6115 const APInt *AP1, *AP2;
6124 if (ShAmt < TypeBits && ShAmt != 0) {
6129 return new ICmpInst(NewPred,
Xor, ConstantInt::get(
A->getType(), CmpVal));
6139 if (ShAmt < TypeBits && ShAmt != 0) {
6157 unsigned ASize = cast<IntegerType>(
A->getType())->getPrimitiveSizeInBits();
6159 if (ShAmt < ASize) {
6182 A->getType()->getScalarSizeInBits() ==
BitWidth * 2 &&
6183 (
I.getOperand(0)->hasOneUse() ||
I.getOperand(1)->hasOneUse())) {
6188 Add, ConstantInt::get(
A->getType(),
C.shl(1)));
6211 m_OneUse(m_Intrinsic<Intrinsic::fshr>(
6230 std::optional<bool> IsZero = std::nullopt;
6272 Constant *
C = ConstantInt::get(Res->X->getType(), Res->C);
6276 unsigned SrcBits =
X->getType()->getScalarSizeInBits();
6277 if (
auto *
II = dyn_cast<IntrinsicInst>(
X)) {
6278 if (
II->getIntrinsicID() == Intrinsic::cttz ||
6279 II->getIntrinsicID() == Intrinsic::ctlz) {
6280 unsigned MaxRet = SrcBits;
6300 assert(isa<CastInst>(ICmp.
getOperand(0)) &&
"Expected cast for operand 0");
6301 auto *CastOp0 = cast<CastInst>(ICmp.
getOperand(0));
6306 bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
6307 bool IsSignedCmp = ICmp.
isSigned();
6312 bool IsZext0 = isa<ZExtInst>(ICmp.
getOperand(0));
6313 bool IsZext1 = isa<ZExtInst>(ICmp.
getOperand(1));
6315 if (IsZext0 != IsZext1) {
6320 if (ICmp.
isEquality() &&
X->getType()->isIntOrIntVectorTy(1) &&
6321 Y->getType()->isIntOrIntVectorTy(1))
6328 auto *NonNegInst0 = dyn_cast<PossiblyNonNegInst>(ICmp.
getOperand(0));
6329 auto *NonNegInst1 = dyn_cast<PossiblyNonNegInst>(ICmp.
getOperand(1));
6331 bool IsNonNeg0 = NonNegInst0 && NonNegInst0->hasNonNeg();
6332 bool IsNonNeg1 = NonNegInst1 && NonNegInst1->hasNonNeg();
6334 if ((IsZext0 && IsNonNeg0) || (IsZext1 && IsNonNeg1))
6341 Type *XTy =
X->getType(), *YTy =
Y->getType();
6348 IsSignedExt ? Instruction::SExt : Instruction::ZExt;
6364 if (IsSignedCmp && IsSignedExt)
6377 Type *SrcTy = CastOp0->getSrcTy();
6385 if (IsSignedExt && IsSignedCmp)
6397 if (IsSignedCmp || !IsSignedExt || !isa<ConstantInt>(
C))
6416 Value *SimplifiedOp0 = simplifyIntToPtrRoundTripCast(ICmp.
getOperand(0));
6417 Value *SimplifiedOp1 = simplifyIntToPtrRoundTripCast(ICmp.
getOperand(1));
6418 if (SimplifiedOp0 || SimplifiedOp1)
6420 SimplifiedOp0 ? SimplifiedOp0 : ICmp.
getOperand(0),
6421 SimplifiedOp1 ? SimplifiedOp1 : ICmp.
getOperand(1));
6423 auto *CastOp0 = dyn_cast<CastInst>(ICmp.
getOperand(0));
6429 Value *Op0Src = CastOp0->getOperand(0);
6430 Type *SrcTy = CastOp0->getSrcTy();
6431 Type *DestTy = CastOp0->getDestTy();
6435 auto CompatibleSizes = [&](
Type *PtrTy,
Type *IntTy) {
6436 if (isa<VectorType>(PtrTy)) {
6437 PtrTy = cast<VectorType>(PtrTy)->getElementType();
6438 IntTy = cast<VectorType>(IntTy)->getElementType();
6442 if (CastOp0->getOpcode() == Instruction::PtrToInt &&
6443 CompatibleSizes(SrcTy, DestTy)) {
6444 Value *NewOp1 =
nullptr;
6445 if (
auto *PtrToIntOp1 = dyn_cast<PtrToIntOperator>(ICmp.
getOperand(1))) {
6446 Value *PtrSrc = PtrToIntOp1->getOperand(0);
6448 NewOp1 = PtrToIntOp1->getOperand(0);
6449 }
else if (
auto *RHSC = dyn_cast<Constant>(ICmp.
getOperand(1))) {
6458 if (CastOp0->getOpcode() == Instruction::IntToPtr &&
6459 CompatibleSizes(DestTy, SrcTy)) {
6460 Value *NewOp1 =
nullptr;
6461 if (
auto *IntToPtrOp1 = dyn_cast<IntToPtrInst>(ICmp.
getOperand(1))) {
6462 Value *IntSrc = IntToPtrOp1->getOperand(0);
6464 NewOp1 = IntToPtrOp1->getOperand(0);
6465 }
else if (
auto *RHSC = dyn_cast<Constant>(ICmp.
getOperand(1))) {
6484 case Instruction::Add:
6485 case Instruction::Sub:
6487 case Instruction::Mul:
6500 case Instruction::Add:
6505 case Instruction::Sub:
6510 case Instruction::Mul:
6519 bool IsSigned,
Value *LHS,
6523 if (OrigI.
isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
6533 if (
auto *LHSTy = dyn_cast<VectorType>(
LHS->
getType()))
6548 Result->takeName(&OrigI);
6553 Result->takeName(&OrigI);
6555 if (
auto *Inst = dyn_cast<Instruction>(Result)) {
6557 Inst->setHasNoSignedWrap();
6559 Inst->setHasNoUnsignedWrap();
6582 const APInt *OtherVal,
6586 if (!isa<IntegerType>(MulVal->
getType()))
6589 auto *MulInstr = dyn_cast<Instruction>(MulVal);
6592 assert(MulInstr->getOpcode() == Instruction::Mul);
6594 auto *
LHS = cast<ZExtInst>(MulInstr->getOperand(0)),
6595 *
RHS = cast<ZExtInst>(MulInstr->getOperand(1));
6596 assert(
LHS->getOpcode() == Instruction::ZExt);
6597 assert(
RHS->getOpcode() == Instruction::ZExt);
6601 Type *TyA =
A->getType(), *TyB =
B->getType();
6603 WidthB = TyB->getPrimitiveSizeInBits();
6606 if (WidthB > WidthA) {
6621 if (
TruncInst *TI = dyn_cast<TruncInst>(U)) {
6624 if (TruncWidth > MulWidth)
6628 if (BO->getOpcode() != Instruction::And)
6630 if (
ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
6631 const APInt &CVal = CI->getValue();
6647 switch (
I.getPredicate()) {
6654 if (MaxVal.
eq(*OtherVal))
6664 if (MaxVal.
eq(*OtherVal))
6678 if (WidthA < MulWidth)
6680 if (WidthB < MulWidth)
6684 {MulA, MulB},
nullptr,
"umul");
6695 if (
TruncInst *TI = dyn_cast<TruncInst>(U)) {
6696 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
6701 assert(BO->getOpcode() == Instruction::And);
6703 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
6739 switch (
I.getPredicate()) {
6770 assert(DI && UI &&
"Instruction not defined\n");
6781 auto *Usr = cast<Instruction>(U);
6782 if (Usr != UI && !
DT.
dominates(DB, Usr->getParent()))
6793 auto *BI = dyn_cast_or_null<BranchInst>(BB->
getTerminator());
6794 if (!BI || BI->getNumSuccessors() != 2)
6796 auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
6797 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
6844 const unsigned SIOpd) {
6845 assert((SIOpd == 1 || SIOpd == 2) &&
"Invalid select operand!");
6847 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
6861 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
6871 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
6899 if (!isa<Constant>(Op0) && Op0Known.
isConstant())
6902 if (!isa<Constant>(Op1) && Op1Known.
isConstant())
6931 if (!Cmp.hasOneUse())
6940 if (!isMinMaxCmp(
I)) {
6945 if (Op1Min == Op0Max)
6950 if (*CmpC == Op0Min + 1)
6952 ConstantInt::get(Op1->getType(), *CmpC - 1));
6962 if (Op1Max == Op0Min)
6967 if (*CmpC == Op0Max - 1)
6969 ConstantInt::get(Op1->getType(), *CmpC + 1));
6979 if (Op1Min == Op0Max)
6983 if (*CmpC == Op0Min + 1)
6985 ConstantInt::get(Op1->getType(), *CmpC - 1));
6990 if (Op1Max == Op0Min)
6994 if (*CmpC == Op0Max - 1)
6996 ConstantInt::get(Op1->getType(), *CmpC + 1));
7013 APInt Op0KnownZeroInverted = ~Op0Known.Zero;
7019 *LHSC != Op0KnownZeroInverted)
7025 Type *XTy =
X->getType();
7027 APInt C2 = Op0KnownZeroInverted;
7028 APInt C2Pow2 = (C2 & ~(*C1 - 1)) + *C1;
7034 auto *CmpC = ConstantInt::get(XTy, Log2C2 - Log2C1);
7044 (Op0Known & Op1Known) == Op0Known)
7050 if (Op1Min == Op0Max)
7054 if (Op1Max == Op0Min)
7058 if (Op1Min == Op0Max)
7062 if (Op1Max == Op0Min)
7070 if ((
I.isSigned() || (
I.isUnsigned() && !
I.hasSameSign())) &&
7073 I.setPredicate(
I.getUnsignedPredicate());
7108 bool IsSExt = ExtI->
getOpcode() == Instruction::SExt;
7110 auto CreateRangeCheck = [&] {
7125 }
else if (!IsSExt || HasOneUse) {
7130 return CreateRangeCheck();
7132 }
else if (IsSExt ?
C->isAllOnes() :
C->isOne()) {
7140 }
else if (!IsSExt || HasOneUse) {
7145 return CreateRangeCheck();
7159 Instruction::ICmp, Pred1,
X,
7178 Value *Op0 =
I.getOperand(0);
7179 Value *Op1 =
I.getOperand(1);
7180 auto *Op1C = dyn_cast<Constant>(Op1);
7185 if (!FlippedStrictness)
7188 return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
7206 I.setName(
I.getName() +
".not");
7217 Value *
A =
I.getOperand(0), *
B =
I.getOperand(1);
7218 assert(
A->getType()->isIntOrIntVectorTy(1) &&
"Bools only");
7224 switch (
I.getPredicate()) {
7233 switch (
I.getPredicate()) {
7243 switch (
I.getPredicate()) {
7252 return BinaryOperator::CreateXor(
A,
B);
7260 return BinaryOperator::CreateAnd(Builder.
CreateNot(
A),
B);
7268 return BinaryOperator::CreateAnd(Builder.
CreateNot(
B),
A);
7276 return BinaryOperator::CreateOr(Builder.
CreateNot(
A),
B);
7284 return BinaryOperator::CreateOr(Builder.
CreateNot(
B),
A);
7340 Value *
LHS = Cmp.getOperand(0), *
RHS = Cmp.getOperand(1);
7345 if (
auto *
I = dyn_cast<Instruction>(V))
7346 I->copyIRFlags(&Cmp);
7347 Module *M = Cmp.getModule();
7349 M, Intrinsic::vector_reverse, V->getType());
7357 return createCmpReverse(Pred, V1, V2);
7361 return createCmpReverse(Pred, V1,
RHS);
7365 return createCmpReverse(Pred,
LHS, V2);
7390 Constant *ScalarC =
C->getSplatValue(
true);
7409 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
7413 auto UAddOvResultPat = m_ExtractValue<0>(
7415 if (
match(Op0, UAddOvResultPat) &&
7424 UAddOv = cast<ExtractValueInst>(Op0)->getAggregateOperand();
7426 (Op0 ==
A || Op0 ==
B))
7428 UAddOv = cast<ExtractValueInst>(Op1)->getAggregateOperand();
7436 if (!
I.getOperand(0)->getType()->isPointerTy() ||
7438 I.getParent()->getParent(),
7439 I.getOperand(0)->getType()->getPointerAddressSpace())) {
7445 Op->isLaunderOrStripInvariantGroup()) {
7447 Op->getOperand(0),
I.getOperand(1));
7459 if (
I.getType()->isVectorTy())
7481 auto *LHSTy = dyn_cast<FixedVectorType>(
LHS->
getType());
7482 if (!LHSTy || !LHSTy->getElementType()->isIntegerTy())
7485 LHSTy->getNumElements() * LHSTy->getElementType()->getIntegerBitWidth();
7487 if (!
DL.isLegalInteger(NumBits))
7491 auto *ScalarTy = Builder.
getIntNTy(NumBits);
7506 if (
auto *
GEP = dyn_cast<GEPOperator>(Op0))
7510 if (
auto *SI = dyn_cast<SelectInst>(Op0))
7514 if (
auto *
MinMax = dyn_cast<MinMaxIntrinsic>(Op0))
7545 bool IsIntMinPosion =
C->isAllOnesValue();
7557 CxtI, IsIntMinPosion
7560 X, ConstantInt::get(
X->getType(),
SMin + 1)));
7566 CxtI, IsIntMinPosion
7569 X, ConstantInt::get(
X->getType(),
SMin)));
7582 auto CheckUGT1 = [](
const APInt &Divisor) {
return Divisor.ugt(1); };
7597 auto CheckNE0 = [](
const APInt &Shift) {
return !Shift.isZero(); };
7615 bool Changed =
false;
7617 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
7624 if (Op0Cplxity < Op1Cplxity) {
7639 if (
Value *V = dyn_castNegVal(SelectTrue)) {
7640 if (V == SelectFalse)
7642 }
else if (
Value *V = dyn_castNegVal(SelectFalse)) {
7643 if (V == SelectTrue)
7687 if (
SelectInst *SI = dyn_cast<SelectInst>(
I.user_back())) {
7736 if (
auto *PN = dyn_cast<PHINode>(Op0))
7739 if (
auto *PN = dyn_cast<PHINode>(Op1))
7752 if (
I.isCommutative()) {
7753 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
7777 (Op0->
hasOneUse() || Op1->hasOneUse())) {
7799 auto *I0 = cast<OverflowingBinaryOperator>(Op0);
7800 auto *I1 = cast<OverflowingBinaryOperator>(Op1);
7801 bool I0NUW = I0->hasNoUnsignedWrap();
7802 bool I1NUW = I1->hasNoUnsignedWrap();
7803 bool I0NSW = I0->hasNoSignedWrap();
7804 bool I1NSW = I1->hasNoSignedWrap();
7808 ((I0NUW || I0NSW) && (I1NUW || I1NSW)))) {
7810 ConstantInt::get(Op0->
getType(), 0));
7817 assert(Op1->getType()->isPointerTy() &&
7818 "Comparing pointer with non-pointer?");
7847 bool ConsumesOp0, ConsumesOp1;
7850 (ConsumesOp0 || ConsumesOp1)) {
7853 assert(InvOp0 && InvOp1 &&
7854 "Mismatch between isFreeToInvert and getFreelyInverted");
7855 return new ICmpInst(
I.getSwappedPredicate(), InvOp0, InvOp1);
7862 isa<IntegerType>(
X->getType())) {
7867 if (AddI->
getOpcode() == Instruction::Add &&
7868 OptimizeOverflowCheck(Instruction::Add,
false,
X,
Y, *AddI,
7869 Result, Overflow)) {
7887 if ((
I.isUnsigned() ||
I.isEquality()) &&
7890 Y->getType()->getScalarSizeInBits() == 1 &&
7891 (Op0->
hasOneUse() || Op1->hasOneUse())) {
7898 unsigned ShiftOpc = ShiftI->
getOpcode();
7899 if ((ExtOpc == Instruction::ZExt && ShiftOpc == Instruction::LShr) ||
7900 (ExtOpc == Instruction::SExt && ShiftOpc == Instruction::AShr)) {
7929 if (
auto *EVI = dyn_cast<ExtractValueInst>(Op0))
7930 if (
auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
7931 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
7938 if (
I.getType()->isVectorTy())
7950 const APInt *C1, *C2;
7957 Type *InputTy =
A->getType();
7964 TruncC1.
setBit(InputBitWidth - 1);
7968 ConstantInt::get(InputTy, C2->
trunc(InputBitWidth)));
7974 return Changed ? &
I :
nullptr;
7988 if (MantissaWidth == -1)
7993 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
7995 if (
I.isEquality()) {
7997 bool IsExact =
false;
7998 APSInt RHSCvt(IntWidth, LHSUnsigned);
8007 if (*
RHS != RHSRoundInt) {
8027 if ((
int)IntWidth > MantissaWidth) {
8032 if (MaxExponent < (
int)IntWidth - !LHSUnsigned)
8038 if (MantissaWidth <= Exp && Exp <= (
int)IntWidth - !LHSUnsigned)
8047 assert(!
RHS->isNaN() &&
"NaN comparison not already folded!");
8050 switch (
I.getPredicate()) {
8141 APSInt RHSInt(IntWidth, LHSUnsigned);
8144 if (!
RHS->isZero()) {
8159 if (
RHS->isNegative())
8165 if (
RHS->isNegative())
8171 if (
RHS->isNegative())
8178 if (!
RHS->isNegative())
8184 if (
RHS->isNegative())
8190 if (
RHS->isNegative())
8196 if (
RHS->isNegative())
8203 if (!
RHS->isNegative())
8257 if (
C->isNegative())
8258 Pred =
I.getSwappedPredicate();
8274 bool RoundDown =
false;
8299 auto NextValue = [](
const APFloat &
Value,
bool RoundDown) {
8301 NextValue.
next(RoundDown);
8305 APFloat NextCValue = NextValue(*CValue, RoundDown);
8311 APFloat ExtCValue = ConvertFltSema(*CValue, DestFltSema);
8312 APFloat ExtNextCValue = ConvertFltSema(NextCValue, DestFltSema);
8319 APFloat PrevCValue = NextValue(*CValue, !RoundDown);
8320 APFloat Bias = ConvertFltSema(*CValue - PrevCValue, DestFltSema);
8322 ExtNextCValue = ExtCValue + Bias;
8329 C.getType()->getScalarType()->getFltSemantics();
8332 APFloat MidValue = ConvertFltSema(ExtMidValue, SrcFltSema);
8333 if (MidValue != *CValue)
8334 ExtMidValue.
next(!RoundDown);
8342 if (ConvertFltSema(ExtMidValue, SrcFltSema).isInfinity())
8346 APFloat NextExtMidValue = NextValue(ExtMidValue, RoundDown);
8347 if (ConvertFltSema(NextExtMidValue, SrcFltSema).
isFinite())
8352 ConstantFP::get(DestType, ExtMidValue),
"", &
I);
8365 if (!
C->isPosZero()) {
8366 if (!
C->isSmallestNormalized())
8379 switch (
I.getPredicate()) {
8405 switch (
I.getPredicate()) {
8430 assert(!
I.hasNoNaNs() &&
"fcmp should have simplified");
8435 assert(!
I.hasNoNaNs() &&
"fcmp should have simplified");
8449 return replacePredAndOp0(&
I,
I.getPredicate(),
X);
8471 if (!cast<Instruction>(
I.getOperand(0))->hasNoInfs())
8472 I.setHasNoInfs(
false);
8474 switch (
I.getPredicate()) {
8519 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
8524 Pred =
I.getSwappedPredicate();
8533 return new FCmpInst(Pred, Op0, Zero,
"", &
I);
8569 I.getFunction()->getDenormalMode(
8591 if (!FloorX && !CeilX) {
8595 Pred =
I.getSwappedPredicate();
8652 bool Changed =
false;
8663 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
8670 assert(OpType == Op1->getType() &&
"fcmp with different-typed operands?");
8695 if (
I.isCommutative()) {
8696 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
8718 return new FCmpInst(
I.getSwappedPredicate(),
X,
Y,
"", &
I);
8731 if (
SelectInst *SI = dyn_cast<SelectInst>(
I.user_back())) {
8800 Type *IntTy =
X->getType();
8812 case Instruction::Select:
8820 case Instruction::FSub:
8825 case Instruction::PHI:
8829 case Instruction::SIToFP:
8830 case Instruction::UIToFP:
8834 case Instruction::FDiv:
8838 case Instruction::Load:
8839 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(LHSI->
getOperand(0)))
8840 if (
auto *GV = dyn_cast<GlobalVariable>(
GEP->getOperand(0)))
8842 cast<LoadInst>(LHSI),
GEP, GV,
I))
8845 case Instruction::FPTrunc:
8866 return new FCmpInst(
I.getSwappedPredicate(),
X, NegC,
"", &
I);
8885 X->getType()->getScalarType()->getFltSemantics();
8921 Constant *NewC = ConstantFP::get(
X->getType(), TruncC);
8935 if (
auto *VecTy = dyn_cast<VectorType>(OpType))
8947 Value *CanonLHS =
nullptr, *CanonRHS =
nullptr;
8948 match(Op0, m_Intrinsic<Intrinsic::canonicalize>(
m_Value(CanonLHS)));
8949 match(Op1, m_Intrinsic<Intrinsic::canonicalize>(
m_Value(CanonRHS)));
8952 if (CanonLHS == Op1)
8953 return new FCmpInst(Pred, Op1, Op1,
"", &
I);
8956 if (CanonRHS == Op0)
8957 return new FCmpInst(Pred, Op0, Op0,
"", &
I);
8960 if (CanonLHS && CanonRHS)
8961 return new FCmpInst(Pred, CanonLHS, CanonRHS,
"", &
I);
8964 if (
I.getType()->isVectorTy())
8968 return Changed ? &
I :
nullptr;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Instruction * foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
static Instruction * foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize fabs(X) compared with zero.
static void collectOffsetOp(Value *V, SmallVectorImpl< OffsetOp > &Offsets, bool AllowRecursion)
static Value * rewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags NW, const DataLayout &DL, SetVector< Value * > &Explored, InstCombiner &IC)
Returns a re-written value of Start as an indexed GEP using Base as a pointer.
static Instruction * foldICmpEqualityWithOffset(ICmpInst &I, InstCombiner::BuilderTy &Builder, const SimplifyQuery &SQ)
Offset both sides of an equality icmp to see if we can save some instructions: icmp eq/ne X,...
static bool addWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1+In2, returning true if the result overflowed for this type.
static Instruction * foldICmpAndXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldVectorCmp(CmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q, unsigned Depth=0)
static Value * createLogicFromTable(const std::bitset< 4 > &Table, Value *Op0, Value *Op1, IRBuilderBase &Builder, bool HasOneUse)
static Instruction * foldICmpOfUAddOv(ICmpInst &I)
static bool isChainSelectCmpBranch(const SelectInst *SI)
Return true when the instruction sequence within a block is select-cmp-br.
static Instruction * foldICmpInvariantGroup(ICmpInst &I)
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static Instruction * foldReductionIdiom(ICmpInst &I, InstCombiner::BuilderTy &Builder, const DataLayout &DL)
This function folds patterns produced by lowering of reduce idioms, such as llvm.vector....
static Instruction * canonicalizeICmpBool(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Integer compare with boolean values can always be turned into bitwise ops.
static Instruction * foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI, Constant *RHSC, InstCombinerImpl &CI)
static Value * foldICmpOrXorSubChain(ICmpInst &Cmp, BinaryOperator *Or, InstCombiner::BuilderTy &Builder)
Fold icmp eq/ne (or (xor/sub (X1, X2), xor/sub (X3, X4))), 0.
static bool hasBranchUse(ICmpInst &I)
Given an icmp instruction, return true if any use of this comparison is a branch on sign bit comparis...
static Value * foldICmpWithLowBitMaskedVal(CmpPredicate Pred, Value *Op0, Value *Op1, const SimplifyQuery &Q, InstCombiner &IC)
Some comparisons can be simplified.
static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth)
When performing a comparison against a constant, it is possible that not all the bits in the LHS are ...
static Instruction * foldICmpShlLHSC(ICmpInst &Cmp, Instruction *Shl, const APInt &C)
Fold icmp (shl nuw C2, Y), C.
static Instruction * foldFCmpWithFloorAndCeil(FCmpInst &I, InstCombinerImpl &IC)
static Instruction * foldICmpXorXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldICmpOfCmpIntrinsicWithConstant(CmpPredicate Pred, IntrinsicInst *I, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * processUMulZExtIdiom(ICmpInst &I, Value *MulVal, const APInt *OtherVal, InstCombinerImpl &IC)
Recognize and process idiom involving test for multiplication overflow.
static Instruction * foldSqrtWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize sqrt(X) compared with zero.
static Instruction * foldFCmpFNegCommonOp(FCmpInst &I)
static Instruction * foldICmpWithHighBitMask(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static ICmpInst * canonicalizeCmpWithConstant(ICmpInst &I)
If we have an icmp le or icmp ge instruction with a constant operand, turn it into the appropriate ic...
static Instruction * foldICmpIntrinsicWithIntrinsic(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
Fold an icmp with LLVM intrinsics.
static Instruction * foldICmpUSubSatOrUAddSatWithConstant(CmpPredicate Pred, SaturatingInst *II, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * foldICmpPow2Test(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static bool subWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1-In2, returning true if the result overflowed for this type.
static bool canRewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags &NW, const DataLayout &DL, SetVector< Value * > &Explored)
Returns true if we can rewrite Start as a GEP with pointer Base and some integer offset.
static Instruction * foldFCmpFpTrunc(FCmpInst &I, const Instruction &FPTrunc, const Constant &C)
static Instruction * foldICmpXNegX(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static Instruction * processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, ConstantInt *CI2, ConstantInt *CI1, InstCombinerImpl &IC)
The caller has matched a pattern of the form: I = icmp ugt (add (add A, B), CI2), CI1 If this is of t...
static Value * foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ, InstCombiner::BuilderTy &Builder)
static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C)
Returns true if the exploded icmp can be expressed as a signed comparison to zero and updates the pre...
static Instruction * transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, const DataLayout &DL, InstCombiner &IC)
Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
static Instruction * foldCtpopPow2Test(ICmpInst &I, IntrinsicInst *CtpopLhs, const APInt &CRhs, InstCombiner::BuilderTy &Builder, const SimplifyQuery &Q)
static void setInsertionPoint(IRBuilder<> &Builder, Value *V, bool Before=true)
static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS, bool IsSigned)
static bool isMultipleOf(Value *X, const APInt &C, const SimplifyQuery &Q)
Return true if X is a multiple of C.
static Value * foldICmpWithTruncSignExtendedVal(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Some comparisons can be simplified.
static Instruction * foldICmpOrXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
mir Rename Register Operands
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
opStatus next(bool nextDown)
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
LLVM_ABI FPClassTest classify() const
Return the FPClassTest which will return true for the value.
opStatus roundToIntegral(roundingMode RM)
Class for arbitrary precision integers.
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
APInt abs() const
Get the absolute value.
unsigned ceilLogBase2() const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
bool eq(const APInt &RHS) const
Equality comparison.
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
void negate()
Negate this APInt in place.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
void flipAllBits()
Toggle every bit to its opposite value.
unsigned countl_one() const
Count the number of leading one bits.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
An arbitrary precision integer that knows its signedness.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Class to represent array types.
LLVM Basic Block Representation.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Conditional or Unconditional Branch instruction.
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This class is the base class for the comparison instructions.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
bool isStrictPredicate() const
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
bool isIntPredicate() const
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
CmpInst::Predicate dropSameSign() const
Drops samesign information.
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPointerBitCastOrAddrSpaceCast(Constant *C, Type *Ty)
Create a BitCast or AddrSpaceCast for a pointer type depending on the address space.
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getXor(Constant *C1, Constant *C2)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI std::optional< ConstantRange > exactUnionWith(const ConstantRange &CR) const
Union the two ranges and return the result if it can be represented exactly, otherwise return std::nu...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
LLVM_ABI ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
LLVM_ABI ConstantRange difference(const ConstantRange &CR) const
Subtract the specified range from this range (aka relative complement of the sets).
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI std::optional< ConstantRange > exactIntersectWith(const ConstantRange &CR) const
Intersect the two ranges and return the result if it can be represented exactly, otherwise return std...
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
This is an important base class in LLVM.
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI const APInt & getUniqueInteger() const
If C is a constant integer then return its value, otherwise C must be a vector of constant integers,...
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI unsigned getPointerTypeSizeInBits(Type *) const
The pointer representation size in bits for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
LLVM_ABI Type * getSmallestLegalIntType(LLVMContext &C, unsigned Width=0) const
Returns the smallest integer type with size at least as big as Width bits.
iterator find(const_arg_type_t< KeyT > Val)
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
ArrayRef< BranchInst * > conditionsFor(const Value *V) const
Access the list of branches which affect this value.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction compares its operands according to the predicate given to the constructor.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
LLVM_ABI Type * getSourceElementType() const
Value * getPointerOperand()
GEPNoWrapFlags getNoWrapFlags() const
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
Common base class shared among various IRBuilders.
Value * CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Value * CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
ConstantInt * getTrue()
Get the constant value for i1 true.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
LLVM_ABI Value * createIsFPClass(Value *FPNum, unsigned Test)
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getFalse()
Get the constant value for i1 false.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Value * CreateURem(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Instruction * foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, const APInt &C)
Fold icmp ({al}shr X, Y), C.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldICmpWithZextOrSext(ICmpInst &ICmp)
Instruction * foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, ConstantInt *C)
Instruction * foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Instruction * foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp with BinaryOp and constant operand: icmp Pred BO, C.
Instruction * foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, const APInt &C)
Fold icmp (or X, Y), C.
Instruction * foldICmpTruncWithTruncOrExt(ICmpInst &Cmp, const SimplifyQuery &Q)
Fold icmp (trunc nuw/nsw X), (trunc nuw/nsw Y).
Instruction * foldSignBitTest(ICmpInst &I)
Fold equality-comparison between zero and any (maybe truncated) right-shift by one-less-than-bitwidth...
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Value * insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, bool isSigned, bool Inside)
Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise (V < Lo || V >= Hi).
Instruction * foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ)
Try to fold icmp (binop), X or icmp X, (binop).
Instruction * foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, const APInt &C)
Fold icmp (sub X, Y), C.
Instruction * foldICmpInstWithConstantNotInt(ICmpInst &Cmp)
Handle icmp with constant (but not simple integer constant) RHS.
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
Instruction * foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (shl AP2, A), AP1)" -> (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
Value * reassociateShiftAmtsOfTwoSameDirectionShifts(BinaryOperator *Sh0, const SimplifyQuery &SQ, bool AnalyzeForSignBitExtraction=false)
Instruction * foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an equality icmp with LLVM intrinsic and constant operand.
Value * foldMultiplicationOverflowCheck(ICmpInst &Cmp)
Fold (-1 u/ x) u< y ((x * y) ?/ x) != y to @llvm.
Instruction * foldICmpWithConstant(ICmpInst &Cmp)
Fold icmp Pred X, C.
CmpInst * canonicalizeICmpPredicate(CmpInst &I)
If we have a comparison with a non-canonical predicate, if we can update all the users,...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * foldICmpWithZero(ICmpInst &Cmp)
Instruction * foldICmpCommutative(CmpPredicate Pred, Value *Op0, Value *Op1, ICmpInst &CxtI)
Instruction * foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp equality instruction with binary operator LHS and constant RHS: icmp eq/ne BO,...
Instruction * foldICmpUsingBoolRange(ICmpInst &I)
If one operand of an icmp is effectively a bool (value range of {0,1}), then try to reduce patterns b...
Instruction * foldICmpWithTrunc(ICmpInst &Cmp)
Instruction * foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, ConstantInt *&Less, ConstantInt *&Equal, ConstantInt *&Greater)
Match a select chain which produces one of three values based on whether the LHS is less than,...
Instruction * foldCmpLoadFromIndexedGlobal(LoadInst *LI, GetElementPtrInst *GEP, GlobalVariable *GV, CmpInst &ICI, ConstantInt *AndCst=nullptr)
This is called when we see this pattern: cmp pred (load (gep GV, ...)), cmpcst where GV is a global v...
Instruction * visitFCmpInst(FCmpInst &I)
Instruction * foldICmpUsingKnownBits(ICmpInst &Cmp)
Try to fold the comparison based on range information we can get by checking whether bits are known t...
Instruction * foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, const APInt &C)
Fold icmp ({su}div X, Y), C.
Instruction * foldIRemByPowerOfTwoToBitTest(ICmpInst &I)
If we have: icmp eq/ne (urem/srem x, y), 0 iff y is a power-of-two, we can replace this with a bit te...
Instruction * foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold fcmp ([us]itofp x, cst) if possible.
Instruction * foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Fold icmp (udiv X, Y), C.
Instruction * foldICmpAddOpConst(Value *X, const APInt &C, CmpPredicate Pred)
Fold "icmp pred (X+C), X".
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
Instruction * foldICmpWithCastOp(ICmpInst &ICmp)
Handle icmp (cast x), (cast or constant).
Instruction * foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, const APInt &C)
Fold icmp (trunc X), C.
Instruction * foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, const APInt &C)
Fold icmp (add X, Y), C.
Instruction * foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, const APInt &C)
Fold icmp (mul X, Y), C.
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Instruction * foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
Fold icmp (xor X, Y), C.
Instruction * foldSelectICmp(CmpPredicate Pred, SelectInst *SI, Value *RHS, const ICmpInst &I)
Instruction * foldICmpInstWithConstantAllowPoison(ICmpInst &Cmp, const APInt &C)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldIsMultipleOfAPowerOfTwo(ICmpInst &Cmp)
Fold icmp eq (num + mask) & ~mask, num to icmp eq (and num, mask), 0 Where mask is a low bit mask.
Instruction * foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1, const APInt &C2)
Fold icmp (and (sh X, Y), C2), C1.
Instruction * foldICmpBinOpWithConstantViaTruthTable(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Instruction * foldICmpInstWithConstant(ICmpInst &Cmp)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldICmpXorShiftConst(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
For power-of-2 C: ((X s>> ShiftC) ^ X) u< C --> (X + C) u< (C << 1) ((X s>> ShiftC) ^ X) u> (C - 1) -...
Instruction * foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, const APInt &C)
Fold icmp (shl X, Y), C.
Instruction * foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, const APInt &C)
Fold icmp (and X, Y), C.
Instruction * foldICmpEquality(ICmpInst &Cmp)
Instruction * foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax, Value *Z, CmpPredicate Pred)
Fold icmp Pred min|max(X, Y), Z.
bool dominatesAllUses(const Instruction *DI, const Instruction *UI, const BasicBlock *DB) const
True when DB dominates all uses of DI except UI.
bool foldAllocaCmp(AllocaInst *Alloca)
Instruction * visitICmpInst(ICmpInst &I)
OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const
Instruction * foldICmpWithDominatingICmp(ICmpInst &Cmp)
Canonicalize icmp instructions based on dominating conditions.
bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, const unsigned SIOpd)
Try to replace select with select operand SIOpd in SI-ICmp sequence.
Instruction * foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" -> (icmp eq/ne A, Log2(AP2/AP1)) -> (icmp eq/ne A,...
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
Instruction * foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1)
Fold icmp (and X, C2), C1.
Instruction * foldICmpBitCast(ICmpInst &Cmp)
Instruction * foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, Instruction &I)
Fold comparisons between a GEP instruction and something else.
The core instruction combiner logic.
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI, bool IsNSW=false) const
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser)
Given i1 V, can every user of V be freely adapted if V is changed to !V ? InstCombine's freelyInvertA...
void addToWorklist(Instruction *I)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoInfs() const LLVM_READONLY
Determine whether the no-infs flag is set.
bool isArithmeticShift() const
Return true if this is an arithmetic shift right.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
This class represents min/max intrinsics.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Represents a saturating add/sub intrinsic.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
bool contains(const key_type &key) const
Check if the SetVector contains the given key.
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
reverse_iterator rbegin()
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Class to represent struct types.
This class represents a truncation of integer types.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVectorTy() const
True if this is an instance of VectorType.
LLVM_ABI int getFPMantissaWidth() const
Return the width of the mantissa of this type.
bool isArrayTy() const
True if this is an instance of ArrayType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI const fltSemantics & getFltSemantics() const
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
A Use represents the edge between a Value definition and its users.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVM_ABI bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< use_iterator > uses()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A unsign-divided by B, rounded by the given rounding mode.
LLVM_ABI APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A sign-divided by B, rounded by the given rounding mode.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
NoWrapTrunc_match< OpTy, TruncInst::NoSignedWrap > m_NSWTrunc(const OpTy &Op)
Matches trunc nsw.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< is_negated_power2_or_zero > m_NegatedPower2OrZero()
Match a integer or vector negated power-of-2.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
cst_pred_ty< is_lowbit_mask_or_zero > m_LowBitMaskOrZero()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Signum_match< Val_t > m_Signum(const Val_t &V)
Matches a signum pattern.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
UAddWithOverflow_match< LHS_t, RHS_t, Sum_t > m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S)
Match an icmp instruction checking for unsigned overflow on addition.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
CastInst_match< OpTy, FPTruncInst > m_FPTrunc(const OpTy &Op)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_unless< Ty > m_Unless(const Ty &M)
Match if the inner matcher does NOT match.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
This is an optimization pass for GlobalISel generic memory operations.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not an infinity or if the floating-point vector val...
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
LLVM_ABI Value * stripNullTest(Value *V)
Returns the inner value X if the expression has the form f(X) where f(X) == 0 if and only if X == 0,...
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
LLVM_ABI Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
SelectPatternFlavor
Specific patterns of select instructions we can match.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
Returns: X * 2^Exp for integral exponents.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Sub
Subtraction of integers.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
DWARFExpression::Operation Op
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
constexpr unsigned BitWidth
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI std::optional< std::pair< CmpPredicate, Constant * > > getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C)
Convert an integer comparison with a constant RHS into an equivalent form with the strictness flipped...
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
std::optional< DecomposedBitTest > decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate Pred, bool LookThroughTrunc=true, bool AllowNonZeroC=false, bool DecomposeAnd=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Value * materialize(InstCombiner::BuilderTy &Builder) const
static OffsetResult value(Value *V)
static OffsetResult select(Value *Cond, Value *TrueV, Value *FalseV)
static OffsetResult invalid()
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardZero
This callback is used in conjunction with PointerMayBeCaptured.
static CommonPointerBase compute(Value *LHS, Value *RHS)
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
static constexpr DenormalMode getIEEE()
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
APInt getSignedMaxValue() const
Return the maximal signed value possible given these KnownBits.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
unsigned getBitWidth() const
Get the bit width of this value.
bool isConstant() const
Returns true if we know the value of all bits.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
bool isNegative() const
Returns true if this value is known to be negative.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
APInt getSignedMinValue() const
Return the minimal signed value possible given these KnownBits.
const APInt & getConstant() const
Returns the value when all bits have a known value.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
SimplifyQuery getWithInstruction(const Instruction *I) const
SimplifyQuery getWithoutDomCondCache() const
A MapVector that performs no allocations if smaller than a certain size.
Capture information for a specific Use.