33using namespace PatternMatch;
35#define DEBUG_TYPE "instcombine"
43 bool IsSigned =
false) {
46 Result = In1.
sadd_ov(In2, Overflow);
48 Result = In1.
uadd_ov(In2, Overflow);
56 bool IsSigned =
false) {
59 Result = In1.
ssub_ov(In2, Overflow);
61 Result = In1.
usub_ov(In2, Overflow);
69 for (
auto *U :
I.users())
70 if (isa<BranchInst>(U))
80 if (!ICmpInst::isSigned(Pred))
87 if (Pred == ICmpInst::ICMP_SLT) {
88 Pred = ICmpInst::ICMP_SLE;
91 }
else if (
C.isAllOnes()) {
92 if (Pred == ICmpInst::ICMP_SGT) {
93 Pred = ICmpInst::ICMP_SGE;
118 if (!isa<ConstantArray>(
Init) && !isa<ConstantDataArray>(
Init))
121 uint64_t ArrayElementCount =
Init->getType()->getArrayNumElements();
130 if (
GEP->getNumOperands() < 3 || !isa<ConstantInt>(
GEP->getOperand(1)) ||
131 !cast<ConstantInt>(
GEP->getOperand(1))->isZero() ||
132 isa<Constant>(
GEP->getOperand(2)))
140 Type *EltTy =
Init->getType()->getArrayElementType();
141 for (
unsigned i = 3, e =
GEP->getNumOperands(); i != e; ++i) {
147 if ((
unsigned)IdxVal != IdxVal)
150 if (
StructType *STy = dyn_cast<StructType>(EltTy))
151 EltTy = STy->getElementType(IdxVal);
152 else if (
ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
153 if (IdxVal >= ATy->getNumElements())
155 EltTy = ATy->getElementType();
163 enum { Overdefined = -3, Undefined = -2 };
172 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
176 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
184 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
193 for (
unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
199 if (!LaterIndices.
empty()) {
214 CompareRHS,
DL, &
TLI);
219 if (isa<UndefValue>(
C)) {
222 if (TrueRangeEnd == (
int)i - 1)
224 if (FalseRangeEnd == (
int)i - 1)
231 if (!isa<ConstantInt>(
C))
236 bool IsTrueForElt = !cast<ConstantInt>(
C)->isZero();
241 if (FirstTrueElement == Undefined)
242 FirstTrueElement = TrueRangeEnd = i;
245 if (SecondTrueElement == Undefined)
246 SecondTrueElement = i;
248 SecondTrueElement = Overdefined;
251 if (TrueRangeEnd == (
int)i - 1)
254 TrueRangeEnd = Overdefined;
258 if (FirstFalseElement == Undefined)
259 FirstFalseElement = FalseRangeEnd = i;
262 if (SecondFalseElement == Undefined)
263 SecondFalseElement = i;
265 SecondFalseElement = Overdefined;
268 if (FalseRangeEnd == (
int)i - 1)
271 FalseRangeEnd = Overdefined;
276 if (i < 64 && IsTrueForElt)
277 MagicBitvector |= 1ULL << i;
282 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
283 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
284 FalseRangeEnd == Overdefined)
295 if (!
GEP->isInBounds()) {
298 if (
Idx->getType()->getPrimitiveSizeInBits().getFixedValue() > OffsetSize)
309 unsigned ElementSize =
322 if (SecondTrueElement != Overdefined) {
325 if (FirstTrueElement == Undefined)
328 Value *FirstTrueIdx = ConstantInt::get(
Idx->getType(), FirstTrueElement);
331 if (SecondTrueElement == Undefined)
336 Value *SecondTrueIdx = ConstantInt::get(
Idx->getType(), SecondTrueElement);
338 return BinaryOperator::CreateOr(C1, C2);
343 if (SecondFalseElement != Overdefined) {
346 if (FirstFalseElement == Undefined)
349 Value *FirstFalseIdx = ConstantInt::get(
Idx->getType(), FirstFalseElement);
352 if (SecondFalseElement == Undefined)
357 Value *SecondFalseIdx =
358 ConstantInt::get(
Idx->getType(), SecondFalseElement);
360 return BinaryOperator::CreateAnd(C1, C2);
365 if (TrueRangeEnd != Overdefined) {
366 assert(TrueRangeEnd != FirstTrueElement &&
"Should emit single compare");
370 if (FirstTrueElement) {
371 Value *Offs = ConstantInt::get(
Idx->getType(), -FirstTrueElement);
376 ConstantInt::get(
Idx->getType(), TrueRangeEnd - FirstTrueElement + 1);
381 if (FalseRangeEnd != Overdefined) {
382 assert(FalseRangeEnd != FirstFalseElement &&
"Should emit single compare");
385 if (FirstFalseElement) {
386 Value *Offs = ConstantInt::get(
Idx->getType(), -FirstFalseElement);
391 ConstantInt::get(
Idx->getType(), FalseRangeEnd - FirstFalseElement);
404 if (ArrayElementCount <= Idx->
getType()->getIntegerBitWidth())
438 while (!WorkList.
empty()) {
441 while (!WorkList.
empty()) {
442 if (Explored.
size() >= 100)
452 if (!isa<GetElementPtrInst>(V) && !isa<PHINode>(V))
457 if (
auto *
GEP = dyn_cast<GEPOperator>(V)) {
459 auto IsNonConst = [](
Value *V) {
return !isa<ConstantInt>(V); };
460 if (!
GEP->isInBounds() ||
count_if(
GEP->indices(), IsNonConst) > 1)
468 if (WorkList.
back() == V) {
474 if (
auto *PN = dyn_cast<PHINode>(V)) {
476 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
484 for (
auto *PN : PHIs)
485 for (
Value *
Op : PN->incoming_values())
493 for (
Value *Val : Explored) {
496 auto *
PHI = dyn_cast<PHINode>(
Use);
497 auto *Inst = dyn_cast<Instruction>(Val);
499 if (Inst ==
Base || Inst ==
PHI || !Inst || !
PHI ||
503 if (
PHI->getParent() == Inst->getParent())
514 if (
auto *
PHI = dyn_cast<PHINode>(V)) {
519 if (
auto *
I = dyn_cast<Instruction>(V)) {
521 I = &*std::next(
I->getIterator());
525 if (
auto *
A = dyn_cast<Argument>(V)) {
527 BasicBlock &Entry =
A->getParent()->getEntryBlock();
533 assert(isa<Constant>(V) &&
"Setting insertion point for unknown value!");
550 Base->getContext(),
DL.getIndexTypeSizeInBits(Start->getType()));
556 for (
Value *Val : Explored) {
561 if (
auto *
PHI = dyn_cast<PHINode>(Val))
564 PHI->getName() +
".idx",
PHI->getIterator());
569 for (
Value *Val : Explored) {
573 if (
auto *
GEP = dyn_cast<GEPOperator>(Val)) {
577 if (isa<ConstantInt>(
Op) && cast<ConstantInt>(
Op)->
isZero())
578 NewInsts[
GEP] = OffsetV;
581 Op, OffsetV,
GEP->getOperand(0)->getName() +
".add",
586 if (isa<PHINode>(Val))
593 for (
Value *Val : Explored) {
598 if (
auto *
PHI = dyn_cast<PHINode>(Val)) {
600 for (
unsigned I = 0, E =
PHI->getNumIncomingValues();
I < E; ++
I) {
601 Value *NewIncoming =
PHI->getIncomingValue(
I);
603 auto It = NewInsts.
find(NewIncoming);
604 if (It != NewInsts.
end())
605 NewIncoming = It->second;
612 for (
Value *Val : Explored) {
619 Val->getName() +
".ptr", NW);
626 return NewInsts[Start];
688 if (!isa<GetElementPtrInst>(
RHS))
720 isa<Constant>(
RHS) && cast<Constant>(
RHS)->isNullValue() &&
742 auto EC = cast<VectorType>(GEPLHS->
getType())->getElementCount();
747 cast<Constant>(
RHS),
Base->getType()));
753 if (PtrBase != GEPRHS->getOperand(0)) {
754 bool IndicesTheSame =
757 GEPRHS->getPointerOperand()->getType() &&
761 if (GEPLHS->
getOperand(i) != GEPRHS->getOperand(i)) {
762 IndicesTheSame =
false;
768 if (IndicesTheSame &&
776 if (GEPLHS->
isInBounds() && GEPRHS->isInBounds() &&
778 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
782 Value *LOffset = EmitGEPOffset(GEPLHS);
783 Value *ROffset = EmitGEPOffset(GEPRHS);
790 if (LHSIndexTy != RHSIndexTy) {
812 unsigned NumDifferences = 0;
813 unsigned DiffOperand = 0;
814 for (
unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
815 if (GEPLHS->
getOperand(i) != GEPRHS->getOperand(i)) {
817 Type *RHSType = GEPRHS->getOperand(i)->getType();
828 if (NumDifferences++)
833 if (NumDifferences == 0)
841 Value *RHSV = GEPRHS->getOperand(DiffOperand);
842 return NewICmp(NW, LHSV, RHSV);
848 Value *L = EmitGEPOffset(GEPLHS,
true);
849 Value *R = EmitGEPOffset(GEPRHS,
true);
850 return NewICmp(NW, L, R);
876 bool Captured =
false;
881 CmpCaptureTracker(
AllocaInst *Alloca) : Alloca(Alloca) {}
883 void tooManyUses()
override { Captured =
true; }
885 bool captured(
const Use *U)
override {
886 auto *ICmp = dyn_cast<ICmpInst>(U->getUser());
894 ICmps[ICmp] |= 1u << U->getOperandNo();
903 CmpCaptureTracker Tracker(Alloca);
905 if (Tracker.Captured)
908 bool Changed =
false;
909 for (
auto [ICmp,
Operands] : Tracker.ICmps) {
915 auto *Res = ConstantInt::get(ICmp->getType(),
941 assert(!!
C &&
"C should not be zero!");
957 ConstantInt::get(
X->getType(), -
C));
969 ConstantInt::get(
X->getType(),
SMax -
C));
980 ConstantInt::get(
X->getType(),
SMax - (
C - 1)));
989 assert(
I.isEquality() &&
"Cannot fold icmp gt/lt");
992 if (
I.getPredicate() ==
I.ICMP_NE)
1001 bool IsAShr = isa<AShrOperator>(
I.getOperand(0));
1013 return getICmp(
I.ICMP_UGT,
A,
1014 ConstantInt::get(
A->getType(), AP2.
logBase2()));
1026 if (IsAShr && AP1 == AP2.
ashr(Shift)) {
1030 return getICmp(
I.ICMP_UGE,
A, ConstantInt::get(
A->getType(), Shift));
1031 return getICmp(
I.ICMP_EQ,
A, ConstantInt::get(
A->getType(), Shift));
1032 }
else if (AP1 == AP2.
lshr(Shift)) {
1033 return getICmp(
I.ICMP_EQ,
A, ConstantInt::get(
A->getType(), Shift));
1039 auto *TorF = ConstantInt::get(
I.getType(),
I.getPredicate() ==
I.ICMP_NE);
1048 assert(
I.isEquality() &&
"Cannot fold icmp gt/lt");
1051 if (
I.getPredicate() ==
I.ICMP_NE)
1062 if (!AP1 && AP2TrailingZeros != 0)
1065 ConstantInt::get(
A->getType(), AP2.
getBitWidth() - AP2TrailingZeros));
1073 if (Shift > 0 && AP2.
shl(Shift) == AP1)
1074 return getICmp(
I.ICMP_EQ,
A, ConstantInt::get(
A->getType(), Shift));
1078 auto *TorF = ConstantInt::get(
I.getType(),
I.getPredicate() ==
I.ICMP_NE);
1099 Instruction *AddWithCst = cast<Instruction>(
I.getOperand(0));
1107 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1131 if (U == AddWithCst)
1149 I.getModule(), Intrinsic::sadd_with_overflow, NewType);
1178 if (!
I.isEquality())
1209 APInt(XBitWidth, XBitWidth - 1))))
1211 }
else if (isa<BinaryOperator>(Val) &&
1236 return new ICmpInst(Pred,
B, Cmp.getOperand(1));
1238 return new ICmpInst(Pred,
A, Cmp.getOperand(1));
1255 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
1267 return new ICmpInst(Pred,
Y, Cmp.getOperand(1));
1273 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
1275 auto *BO0 = cast<OverflowingBinaryOperator>(Cmp.getOperand(0));
1276 if (BO0->hasNoUnsignedWrap() || BO0->hasNoSignedWrap()) {
1284 return new ICmpInst(Pred,
Y, Cmp.getOperand(1));
1289 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
1321 Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
1334 if (
auto *Phi = dyn_cast<PHINode>(Op0))
1335 if (
all_of(Phi->operands(), [](
Value *V) { return isa<Constant>(V); })) {
1337 for (
Value *V : Phi->incoming_values()) {
1346 for (
auto [V, Pred] :
zip(Ops, Phi->blocks()))
1361 Value *
X = Cmp.getOperand(0), *
Y = Cmp.getOperand(1);
1394 if (Cmp.isEquality() || (IsSignBit &&
hasBranchUse(Cmp)))
1399 if (Cmp.hasOneUse() &&
1413 if (!
match(BI->getCondition(),
1419 if (
auto *V = handleDomCond(DomPred, DomC))
1439 Type *SrcTy =
X->getType();
1445 if (shouldChangeType(Trunc->
getType(), SrcTy)) {
1447 return new ICmpInst(Pred,
X, ConstantInt::get(SrcTy,
C.sext(SrcBits)));
1449 return new ICmpInst(Pred,
X, ConstantInt::get(SrcTy,
C.zext(SrcBits)));
1452 if (
C.isOne() &&
C.getBitWidth() > 1) {
1457 ConstantInt::get(V->getType(), 1));
1467 auto NewPred = (Pred == Cmp.ICMP_EQ) ? Cmp.ICMP_UGE : Cmp.ICMP_ULT;
1468 return new ICmpInst(NewPred,
Y, ConstantInt::get(SrcTy, DstBits));
1473 return new ICmpInst(Pred,
Y, ConstantInt::get(SrcTy,
C.logBase2()));
1476 if (Cmp.isEquality() && Trunc->
hasOneUse()) {
1479 if (!SrcTy->
isVectorTy() && shouldChangeType(DstBits, SrcBits)) {
1483 Constant *WideC = ConstantInt::get(SrcTy,
C.zext(SrcBits));
1492 if ((Known.
Zero | Known.
One).countl_one() >= SrcBits - DstBits) {
1494 APInt NewRHS =
C.zext(SrcBits);
1496 return new ICmpInst(Pred,
X, ConstantInt::get(SrcTy, NewRHS));
1504 const APInt *ShAmtC;
1525 bool YIsSExt =
false;
1528 unsigned NoWrapFlags = cast<TruncInst>(Cmp.getOperand(0))->getNoWrapKind() &
1529 cast<TruncInst>(Cmp.getOperand(1))->getNoWrapKind();
1530 if (Cmp.isSigned()) {
1541 if (
X->getType() !=
Y->getType() &&
1542 (!Cmp.getOperand(0)->hasOneUse() || !Cmp.getOperand(1)->hasOneUse()))
1544 if (!isDesirableIntType(
X->getType()->getScalarSizeInBits()) &&
1545 isDesirableIntType(
Y->getType()->getScalarSizeInBits())) {
1547 Pred = Cmp.getSwappedPredicate(Pred);
1552 else if (!Cmp.isSigned() &&
1562 isa<SExtInst>(Cmp.getOperand(0)) || isa<SExtInst>(Cmp.getOperand(1));
1566 Type *TruncTy = Cmp.getOperand(0)->getType();
1571 if (isDesirableIntType(TruncBits) &&
1572 !isDesirableIntType(
X->getType()->getScalarSizeInBits()))
1595 bool TrueIfSigned =
false;
1612 if (
Xor->hasOneUse()) {
1614 if (!Cmp.isEquality() && XorC->
isSignMask()) {
1615 Pred = Cmp.getFlippedSignednessPredicate();
1616 return new ICmpInst(Pred,
X, ConstantInt::get(
X->getType(),
C ^ *XorC));
1621 Pred = Cmp.getFlippedSignednessPredicate();
1622 Pred = Cmp.getSwappedPredicate(Pred);
1623 return new ICmpInst(Pred,
X, ConstantInt::get(
X->getType(),
C ^ *XorC));
1630 if (*XorC == ~
C && (
C + 1).isPowerOf2())
1633 if (*XorC ==
C && (
C + 1).isPowerOf2())
1638 if (*XorC == -
C &&
C.isPowerOf2())
1640 ConstantInt::get(
X->getType(), ~
C));
1642 if (*XorC ==
C && (-
C).isPowerOf2())
1644 ConstantInt::get(
X->getType(), ~
C));
1666 const APInt *ShiftC;
1671 Type *XType =
X->getType();
1677 return new ICmpInst(Pred,
Add, ConstantInt::get(XType, Bound));
1686 if (!Shift || !Shift->
isShift())
1694 unsigned ShiftOpcode = Shift->
getOpcode();
1695 bool IsShl = ShiftOpcode == Instruction::Shl;
1698 APInt NewAndCst, NewCmpCst;
1699 bool AnyCmpCstBitsShiftedOut;
1700 if (ShiftOpcode == Instruction::Shl) {
1708 NewCmpCst = C1.
lshr(*C3);
1709 NewAndCst = C2.
lshr(*C3);
1710 AnyCmpCstBitsShiftedOut = NewCmpCst.
shl(*C3) != C1;
1711 }
else if (ShiftOpcode == Instruction::LShr) {
1716 NewCmpCst = C1.
shl(*C3);
1717 NewAndCst = C2.
shl(*C3);
1718 AnyCmpCstBitsShiftedOut = NewCmpCst.
lshr(*C3) != C1;
1724 assert(ShiftOpcode == Instruction::AShr &&
"Unknown shift opcode");
1725 NewCmpCst = C1.
shl(*C3);
1726 NewAndCst = C2.
shl(*C3);
1727 AnyCmpCstBitsShiftedOut = NewCmpCst.
ashr(*C3) != C1;
1728 if (NewAndCst.
ashr(*C3) != C2)
1732 if (AnyCmpCstBitsShiftedOut) {
1742 Shift->
getOperand(0), ConstantInt::get(
And->getType(), NewAndCst));
1743 return new ICmpInst(Cmp.getPredicate(), NewAnd,
1744 ConstantInt::get(
And->getType(), NewCmpCst));
1761 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1776 if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.
isZero() &&
1778 return new TruncInst(
And->getOperand(0), Cmp.getType());
1789 ConstantInt::get(
X->getType(), ~*C2));
1794 ConstantInt::get(
X->getType(), -*C2));
1797 if (!
And->hasOneUse())
1800 if (Cmp.isEquality() && C1.
isZero()) {
1818 Constant *NegBOC = ConstantInt::get(
And->getType(), -NewC2);
1820 return new ICmpInst(NewPred,
X, NegBOC);
1838 if (!Cmp.getType()->isVectorTy()) {
1839 Type *WideType = W->getType();
1841 Constant *ZextC1 = ConstantInt::get(WideType, C1.
zext(WideScalarBits));
1842 Constant *ZextC2 = ConstantInt::get(WideType, C2->
zext(WideScalarBits));
1844 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1855 if (!Cmp.isSigned() && C1.
isZero() &&
And->getOperand(0)->hasOneUse() &&
1857 Constant *One = cast<Constant>(
And->getOperand(1));
1862 unsigned UsesRemoved = 0;
1863 if (
And->hasOneUse())
1865 if (
Or->hasOneUse())
1872 if (UsesRemoved >= RequireUsesRemoved) {
1876 One,
Or->getName());
1878 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1892 if (!Cmp.getParent()->getParent()->hasFnAttribute(
1893 Attribute::NoImplicitFloat) &&
1896 Type *FPType = V->getType()->getScalarType();
1897 if (FPType->isIEEELikeFPTy() && (C1.
isZero() || C1 == *C2)) {
1898 APInt ExponentMask =
1900 if (*C2 == ExponentMask) {
1901 unsigned Mask = C1.
isZero()
1935 Constant *MinSignedC = ConstantInt::get(
1939 return new ICmpInst(NewPred,
X, MinSignedC);
1948 if (
auto *C2 = dyn_cast<ConstantInt>(
Y))
1949 if (
auto *LI = dyn_cast<LoadInst>(
X))
1950 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1951 if (
auto *GV = dyn_cast<GlobalVariable>(
GEP->getOperand(0)))
1956 if (!Cmp.isEquality())
1962 if (Cmp.getOperand(1) ==
Y &&
C.isNegatedPowerOf2()) {
1965 return new ICmpInst(NewPred,
X,
SubOne(cast<Constant>(Cmp.getOperand(1))));
1978 assert(Cmp.isEquality() &&
"Not expecting non-equality predicates");
1980 const APInt *TC, *FC;
1997 X->getType()->isIntOrIntVectorTy(1) && (
C.isZero() ||
C.isOne())) {
2003 return BinaryOperator::CreateAnd(TruncY,
X);
2021 const APInt *Addend, *Msk;
2025 APInt NewComperand = (
C - *Addend) & *Msk;
2028 ConstantInt::get(MaskA->
getType(), NewComperand));
2050 while (!WorkList.
empty()) {
2051 auto MatchOrOperatorArgument = [&](
Value *OrOperatorArgument) {
2054 if (
match(OrOperatorArgument,
2060 if (
match(OrOperatorArgument,
2070 Value *OrOperatorLhs, *OrOperatorRhs;
2072 if (!
match(CurrentValue,
2077 MatchOrOperatorArgument(OrOperatorRhs);
2078 MatchOrOperatorArgument(OrOperatorLhs);
2084 CmpValues.
rbegin()->second);
2086 for (
auto It = CmpValues.
rbegin() + 1; It != CmpValues.
rend(); ++It) {
2088 LhsCmp = Builder.
CreateBinOp(BOpc, LhsCmp, RhsCmp);
2104 ConstantInt::get(V->getType(), 1));
2107 Value *OrOp0 =
Or->getOperand(0), *OrOp1 =
Or->getOperand(1);
2112 cast<PossiblyDisjointInst>(
Or)->isDisjoint()) {
2115 return new ICmpInst(Pred, OrOp0, NewC);
2119 if (
match(OrOp1,
m_APInt(MaskC)) && Cmp.isEquality()) {
2120 if (*MaskC ==
C && (
C + 1).isPowerOf2()) {
2125 return new ICmpInst(Pred, OrOp0, OrOp1);
2132 if (
Or->hasOneUse()) {
2134 Constant *NewC = ConstantInt::get(
Or->getType(),
C ^ (*MaskC));
2146 Constant *NewC = ConstantInt::get(
X->getType(), TrueIfSigned ? 1 : 0);
2174 if (!Cmp.isEquality() || !
C.isZero() || !
Or->hasOneUse())
2206 if (Cmp.isEquality() &&
C.isZero() &&
X ==
Mul->getOperand(1) &&
2207 (
Mul->hasNoUnsignedWrap() ||
Mul->hasNoSignedWrap()))
2229 if (Cmp.isEquality()) {
2231 if (
Mul->hasNoSignedWrap() &&
C.srem(*MulC).isZero()) {
2232 Constant *NewC = ConstantInt::get(MulTy,
C.sdiv(*MulC));
2240 if (
C.urem(*MulC).isZero()) {
2243 if ((*MulC & 1).isOne() ||
Mul->hasNoUnsignedWrap()) {
2244 Constant *NewC = ConstantInt::get(MulTy,
C.udiv(*MulC));
2257 if (
C.isMinSignedValue() && MulC->
isAllOnes())
2263 NewC = ConstantInt::get(
2267 "Unexpected predicate");
2268 NewC = ConstantInt::get(
2273 NewC = ConstantInt::get(
2277 "Unexpected predicate");
2278 NewC = ConstantInt::get(
2283 return NewC ?
new ICmpInst(Pred,
X, NewC) :
nullptr;
2295 unsigned TypeBits =
C.getBitWidth();
2297 if (Cmp.isUnsigned()) {
2317 return new ICmpInst(Pred,
Y, ConstantInt::get(ShiftType, CLog2));
2318 }
else if (Cmp.isSigned() && C2->
isOne()) {
2319 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
2340 const APInt *ShiftVal;
2370 const APInt *ShiftAmt;
2376 unsigned TypeBits =
C.getBitWidth();
2377 if (ShiftAmt->
uge(TypeBits))
2389 APInt ShiftedC =
C.ashr(*ShiftAmt);
2390 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2393 C.ashr(*ShiftAmt).shl(*ShiftAmt) ==
C) {
2394 APInt ShiftedC =
C.ashr(*ShiftAmt);
2395 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2402 assert(!
C.isMinSignedValue() &&
"Unexpected icmp slt");
2403 APInt ShiftedC = (
C - 1).ashr(*ShiftAmt) + 1;
2404 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2414 APInt ShiftedC =
C.lshr(*ShiftAmt);
2415 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2418 C.lshr(*ShiftAmt).shl(*ShiftAmt) ==
C) {
2419 APInt ShiftedC =
C.lshr(*ShiftAmt);
2420 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2427 assert(
C.ugt(0) &&
"ult 0 should have been eliminated");
2428 APInt ShiftedC = (
C - 1).lshr(*ShiftAmt) + 1;
2429 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2433 if (Cmp.isEquality() && Shl->
hasOneUse()) {
2439 Constant *LShrC = ConstantInt::get(ShType,
C.lshr(*ShiftAmt));
2444 bool TrueIfSigned =
false;
2456 if (Cmp.isUnsigned() && Shl->
hasOneUse()) {
2458 if ((
C + 1).isPowerOf2() &&
2466 if (
C.isPowerOf2() &&
2496 Pred, ConstantInt::get(ShType->
getContext(),
C))) {
2497 CmpPred = FlippedStrictness->first;
2498 RHSC = cast<ConstantInt>(FlippedStrictness->second)->getValue();
2505 ConstantInt::get(TruncTy, RHSC.
ashr(*ShiftAmt).
trunc(TypeBits - Amt));
2524 if (Cmp.isEquality() && Shr->
isExact() &&
C.isZero())
2525 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
2527 bool IsAShr = Shr->
getOpcode() == Instruction::AShr;
2528 const APInt *ShiftValC;
2530 if (Cmp.isEquality())
2548 assert(ShiftValC->
uge(
C) &&
"Expected simplify of compare");
2549 assert((IsUGT || !
C.isZero()) &&
"Expected X u< 0 to simplify");
2551 unsigned CmpLZ = IsUGT ?
C.countl_zero() : (
C - 1).
countl_zero();
2559 const APInt *ShiftAmtC;
2565 unsigned TypeBits =
C.getBitWidth();
2567 if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2570 bool IsExact = Shr->
isExact();
2578 (
C - 1).isPowerOf2() &&
C.countLeadingZeros() > ShAmtVal) {
2584 APInt ShiftedC = (
C - 1).shl(ShAmtVal) + 1;
2585 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2591 APInt ShiftedC =
C.shl(ShAmtVal);
2592 if (ShiftedC.
ashr(ShAmtVal) ==
C)
2593 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2597 APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
2598 if (!
C.isMaxSignedValue() && !(
C + 1).shl(ShAmtVal).isMinSignedValue() &&
2599 (ShiftedC + 1).ashr(ShAmtVal) == (
C + 1))
2600 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2606 APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
2607 if ((ShiftedC + 1).ashr(ShAmtVal) == (
C + 1) ||
2608 (
C + 1).shl(ShAmtVal).isMinSignedValue())
2609 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2616 if (
C.getBitWidth() > 2 &&
C.getNumSignBits() <= ShAmtVal) {
2626 }
else if (!IsAShr) {
2630 APInt ShiftedC =
C.shl(ShAmtVal);
2631 if (ShiftedC.
lshr(ShAmtVal) ==
C)
2632 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2636 APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
2637 if ((ShiftedC + 1).lshr(ShAmtVal) == (
C + 1))
2638 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2642 if (!Cmp.isEquality())
2650 assert(((IsAShr &&
C.shl(ShAmtVal).ashr(ShAmtVal) ==
C) ||
2651 (!IsAShr &&
C.shl(ShAmtVal).lshr(ShAmtVal) ==
C)) &&
2652 "Expected icmp+shr simplify did not occur.");
2657 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy,
C << ShAmtVal));
2663 ConstantInt::get(ShrTy, (
C + 1).shl(ShAmtVal)));
2666 ConstantInt::get(ShrTy, (
C + 1).shl(ShAmtVal) - 1));
2673 Constant *Mask = ConstantInt::get(ShrTy, Val);
2675 return new ICmpInst(Pred,
And, ConstantInt::get(ShrTy,
C << ShAmtVal));
2692 const APInt *DivisorC;
2699 "ult X, 0 should have been simplified already.");
2705 "srem X, 0 should have been simplified already.");
2706 if (!NormalizedC.
uge(DivisorC->
abs() - 1))
2729 const APInt *DivisorC;
2738 !
C.isStrictlyPositive()))
2744 Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
2748 return new ICmpInst(Pred,
And, ConstantInt::get(Ty,
C));
2775 assert(*C2 != 0 &&
"udiv 0, X should have been simplified already.");
2780 "icmp ugt X, UINT_MAX should have been simplified already.");
2782 ConstantInt::get(Ty, C2->
udiv(
C + 1)));
2787 assert(
C != 0 &&
"icmp ult X, 0 should have been simplified already.");
2789 ConstantInt::get(Ty, C2->
udiv(
C)));
2803 bool DivIsSigned = Div->
getOpcode() == Instruction::SDiv;
2813 if (Cmp.isEquality() && Div->
hasOneUse() &&
C.isSignBitSet() &&
2814 (!DivIsSigned ||
C.isMinSignedValue())) {
2839 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2858 bool ProdOV = (DivIsSigned ? Prod.
sdiv(*C2) : Prod.
udiv(*C2)) !=
C;
2871 int LoOverflow = 0, HiOverflow = 0;
2872 APInt LoBound, HiBound;
2877 HiOverflow = LoOverflow = ProdOV;
2886 LoBound = -(RangeSize - 1);
2887 HiBound = RangeSize;
2888 }
else if (
C.isStrictlyPositive()) {
2890 HiOverflow = LoOverflow = ProdOV;
2896 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2898 APInt DivNeg = -RangeSize;
2899 LoOverflow =
addWithOverflow(LoBound, HiBound, DivNeg,
true) ? -1 : 0;
2907 LoBound = RangeSize + 1;
2908 HiBound = -RangeSize;
2909 if (HiBound == *C2) {
2913 }
else if (
C.isStrictlyPositive()) {
2916 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2922 LoOverflow = HiOverflow = ProdOV;
2935 if (LoOverflow && HiOverflow)
2939 X, ConstantInt::get(Ty, LoBound));
2942 X, ConstantInt::get(Ty, HiBound));
2946 if (LoOverflow && HiOverflow)
2950 X, ConstantInt::get(Ty, LoBound));
2953 X, ConstantInt::get(Ty, HiBound));
2958 if (LoOverflow == +1)
2960 if (LoOverflow == -1)
2962 return new ICmpInst(Pred,
X, ConstantInt::get(Ty, LoBound));
2965 if (HiOverflow == +1)
2967 if (HiOverflow == -1)
3000 ((Cmp.isUnsigned() && HasNUW) || (Cmp.isSigned() && HasNSW)) &&
3002 return new ICmpInst(SwappedPred,
Y, ConstantInt::get(Ty, SubResult));
3010 if (Cmp.isEquality() &&
C.isZero() &&
3046 (*C2 & (
C - 1)) == (
C - 1))
3059 return new ICmpInst(SwappedPred,
Add, ConstantInt::get(Ty, ~
C));
3065 auto FoldConstant = [&](
bool Val) {
3069 cast<VectorType>(Op0->
getType())->getElementCount(), Res);
3073 switch (Table.to_ulong()) {
3075 return FoldConstant(
false);
3105 return FoldConstant(
true);
3128 unsigned BW =
C.getBitWidth();
3129 std::bitset<4> Table;
3130 auto ComputeTable = [&](
bool Op0Val,
bool Op1Val) {
3133 Res +=
APInt(BW, isa<ZExtInst>(Ext0) ? 1 : -1,
true);
3135 Res +=
APInt(BW, isa<ZExtInst>(Ext1) ? 1 : -1,
true);
3139 Table[0] = ComputeTable(
false,
false);
3140 Table[1] = ComputeTable(
false,
true);
3141 Table[2] = ComputeTable(
true,
false);
3142 Table[3] = ComputeTable(
true,
true);
3157 if ((
Add->hasNoSignedWrap() &&
3159 (
Add->hasNoUnsignedWrap() &&
3163 Cmp.isSigned() ?
C.ssub_ov(*C2, Overflow) :
C.usub_ov(*C2, Overflow);
3169 return new ICmpInst(Pred,
X, ConstantInt::get(Ty, NewC));
3173 C.isNonNegative() && (
C - *C2).isNonNegative() &&
3176 ConstantInt::get(Ty,
C - *C2));
3181 if (Cmp.isSigned()) {
3182 if (
Lower.isSignMask())
3184 if (
Upper.isSignMask())
3187 if (
Lower.isMinValue())
3189 if (
Upper.isMinValue())
3222 if (!
Add->hasOneUse())
3237 ConstantInt::get(Ty,
C * 2));
3252 ConstantInt::get(Ty, ~
C));
3257 Type *NewCmpTy = V->getType();
3259 if (shouldChangeType(Ty, NewCmpTy)) {
3260 if (CR.getActiveBits() <= NewCmpBW) {
3272 ConstantInt::get(NewCmpTy, EquivInt));
3295 Value *EqualVal = SI->getTrueValue();
3296 Value *UnequalVal = SI->getFalseValue();
3319 auto FlippedStrictness =
3321 if (!FlippedStrictness)
3324 "basic correctness failure");
3325 RHS2 = FlippedStrictness->second;
3337 assert(
C &&
"Cmp RHS should be a constant int!");
3343 Value *OrigLHS, *OrigRHS;
3344 ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
3345 if (Cmp.hasOneUse() &&
3348 assert(C1LessThan && C2Equal && C3GreaterThan);
3351 C1LessThan->
getValue(),
C->getValue(), Cmp.getPredicate());
3353 Cmp.getPredicate());
3355 C3GreaterThan->
getValue(),
C->getValue(), Cmp.getPredicate());
3366 if (TrueWhenLessThan)
3372 if (TrueWhenGreaterThan)
3382 auto *Bitcast = dyn_cast<BitCastInst>(Cmp.getOperand(0));
3387 Value *Op1 = Cmp.getOperand(1);
3388 Value *BCSrcOp = Bitcast->getOperand(0);
3389 Type *SrcType = Bitcast->getSrcTy();
3390 Type *DstType = Bitcast->getType();
3410 return new ICmpInst(Pred,
X, ConstantInt::get(
X->getType(), 1));
3437 Type *XType =
X->getType();
3442 if (
auto *XVTy = dyn_cast<VectorType>(XType))
3456 if (!Cmp.getParent()->getParent()->hasFnAttribute(
3457 Attribute::NoImplicitFloat) &&
3458 Cmp.isEquality() && FPType->isIEEELikeFPTy()) {
3482 if (Cmp.isEquality() &&
C->isAllOnes() && Bitcast->hasOneUse()) {
3483 if (
Value *NotBCSrcOp =
3494 if (Cmp.isEquality() &&
C->isZero() && Bitcast->hasOneUse() &&
3496 if (
auto *VecTy = dyn_cast<FixedVectorType>(
X->getType())) {
3515 auto *VecTy = cast<VectorType>(SrcType);
3516 auto *EltTy = cast<IntegerType>(VecTy->getElementType());
3517 if (
C->isSplat(EltTy->getBitWidth())) {
3525 Value *NewC = ConstantInt::get(EltTy,
C->trunc(EltTy->getBitWidth()));
3526 return new ICmpInst(Pred, Extract, NewC);
3539 if (
auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0)))
3543 if (
auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0)))
3547 if (
auto *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1)))
3551 if (
auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0)))
3555 if (
auto *
II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)))
3562 Value *Cmp0 = Cmp.getOperand(0);
3564 if (
C->isZero() && Cmp.isEquality() && Cmp0->
hasOneUse() &&
3566 m_ExtractValue<0>(m_Intrinsic<Intrinsic::ssub_with_overflow>(
3569 m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>(
3571 return new ICmpInst(Cmp.getPredicate(),
X,
Y);
3586 if (!Cmp.isEquality())
3591 Constant *
RHS = cast<Constant>(Cmp.getOperand(1));
3595 case Instruction::SRem:
3606 case Instruction::Add: {
3610 if (
Constant *C2 = dyn_cast<Constant>(BOp1)) {
3613 }
else if (
C.isZero()) {
3616 if (
Value *NegVal = dyn_castNegVal(BOp1))
3617 return new ICmpInst(Pred, BOp0, NegVal);
3618 if (
Value *NegVal = dyn_castNegVal(BOp0))
3619 return new ICmpInst(Pred, NegVal, BOp1);
3628 return new ICmpInst(Pred, BOp0, Neg);
3633 case Instruction::Xor:
3634 if (
Constant *BOC = dyn_cast<Constant>(BOp1)) {
3638 }
else if (
C.isZero()) {
3640 return new ICmpInst(Pred, BOp0, BOp1);
3643 case Instruction::Or: {
3664 Cond->getType() == Cmp.getType()) {
3702 case Instruction::UDiv:
3703 case Instruction::SDiv:
3713 return new ICmpInst(Pred, BOp0, BOp1);
3716 Instruction::Mul, BO->
getOpcode() == Instruction::SDiv, BOp1,
3717 Cmp.getOperand(1), BO);
3721 return new ICmpInst(Pred, YC, BOp0);
3725 if (BO->
getOpcode() == Instruction::UDiv &&
C.isZero()) {
3728 return new ICmpInst(NewPred, BOp1, BOp0);
3742 "Non-ctpop intrin in ctpop fold");
3778 Type *Ty =
II->getType();
3782 switch (
II->getIntrinsicID()) {
3783 case Intrinsic::abs:
3786 if (
C.isZero() ||
C.isMinSignedValue())
3787 return new ICmpInst(Pred,
II->getArgOperand(0), ConstantInt::get(Ty,
C));
3790 case Intrinsic::bswap:
3792 return new ICmpInst(Pred,
II->getArgOperand(0),
3793 ConstantInt::get(Ty,
C.byteSwap()));
3795 case Intrinsic::bitreverse:
3797 return new ICmpInst(Pred,
II->getArgOperand(0),
3798 ConstantInt::get(Ty,
C.reverseBits()));
3800 case Intrinsic::ctlz:
3801 case Intrinsic::cttz: {
3804 return new ICmpInst(Pred,
II->getArgOperand(0),
3810 unsigned Num =
C.getLimitedValue(
BitWidth);
3812 bool IsTrailing =
II->getIntrinsicID() == Intrinsic::cttz;
3815 APInt Mask2 = IsTrailing
3819 ConstantInt::get(Ty, Mask2));
3824 case Intrinsic::ctpop: {
3827 bool IsZero =
C.isZero();
3829 return new ICmpInst(Pred,
II->getArgOperand(0),
3836 case Intrinsic::fshl:
3837 case Intrinsic::fshr:
3838 if (
II->getArgOperand(0) ==
II->getArgOperand(1)) {
3839 const APInt *RotAmtC;
3843 return new ICmpInst(Pred,
II->getArgOperand(0),
3844 II->getIntrinsicID() == Intrinsic::fshl
3845 ? ConstantInt::get(Ty,
C.rotr(*RotAmtC))
3846 : ConstantInt::get(Ty,
C.rotl(*RotAmtC)));
3850 case Intrinsic::umax:
3851 case Intrinsic::uadd_sat: {
3854 if (
C.isZero() &&
II->hasOneUse()) {
3861 case Intrinsic::ssub_sat:
3864 return new ICmpInst(Pred,
II->getArgOperand(0),
II->getArgOperand(1));
3866 case Intrinsic::usub_sat: {
3871 return new ICmpInst(NewPred,
II->getArgOperand(0),
II->getArgOperand(1));
3886 assert(Cmp.isEquality());
3889 Value *Op0 = Cmp.getOperand(0);
3890 Value *Op1 = Cmp.getOperand(1);
3891 const auto *IIOp0 = dyn_cast<IntrinsicInst>(Op0);
3892 const auto *IIOp1 = dyn_cast<IntrinsicInst>(Op1);
3893 if (!IIOp0 || !IIOp1 || IIOp0->getIntrinsicID() != IIOp1->getIntrinsicID())
3896 switch (IIOp0->getIntrinsicID()) {
3897 case Intrinsic::bswap:
3898 case Intrinsic::bitreverse:
3901 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3902 case Intrinsic::fshl:
3903 case Intrinsic::fshr: {
3906 if (IIOp0->getOperand(0) != IIOp0->getOperand(1))
3908 if (IIOp1->getOperand(0) != IIOp1->getOperand(1))
3910 if (IIOp0->getOperand(2) == IIOp1->getOperand(2))
3911 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3917 unsigned OneUses = IIOp0->hasOneUse() + IIOp1->hasOneUse();
3922 Builder.
CreateSub(IIOp0->getOperand(2), IIOp1->getOperand(2));
3924 Op0->
getType(), IIOp0->getIntrinsicID(),
3925 {IIOp0->getOperand(0), IIOp0->getOperand(0), SubAmt});
3926 return new ICmpInst(Pred, IIOp1->getOperand(0), CombinedRotate);
3943 if (
auto *
II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0))) {
3944 switch (
II->getIntrinsicID()) {
3947 case Intrinsic::fshl:
3948 case Intrinsic::fshr:
3949 if (Cmp.isEquality() &&
II->getArgOperand(0) ==
II->getArgOperand(1)) {
3951 if (
C.isZero() ||
C.isAllOnes())
3952 return new ICmpInst(Pred,
II->getArgOperand(0), Cmp.getOperand(1));
3966 case Instruction::Xor:
3970 case Instruction::And:
3974 case Instruction::Or:
3978 case Instruction::Mul:
3982 case Instruction::Shl:
3986 case Instruction::LShr:
3987 case Instruction::AShr:
3991 case Instruction::SRem:
3995 case Instruction::UDiv:
3999 case Instruction::SDiv:
4003 case Instruction::Sub:
4007 case Instruction::Add:
4025 if (!
II->hasOneUse())
4041 Value *Op0 =
II->getOperand(0);
4042 Value *Op1 =
II->getOperand(1);
4051 switch (
II->getIntrinsicID()) {
4054 "This function only works with usub_sat and uadd_sat for now!");
4055 case Intrinsic::uadd_sat:
4058 case Intrinsic::usub_sat:
4068 II->getBinaryOp(), *COp1,
II->getNoWrapKind());
4075 if (
II->getBinaryOp() == Instruction::Add)
4081 SatValCheck ? Instruction::BinaryOps::Or : Instruction::BinaryOps::And;
4083 std::optional<ConstantRange> Combination;
4084 if (CombiningOp == Instruction::BinaryOps::Or)
4096 Combination->getEquivalentICmp(EquivPred, EquivInt, EquivOffset);
4101 ConstantInt::get(Op1->
getType(), EquivInt));
4108 std::optional<ICmpInst::Predicate> NewPredicate = std::nullopt;
4113 NewPredicate = Pred;
4117 else if (
C.isAllOnes())
4125 else if (
C.isZero())
4142 if (!
C.isZero() && !
C.isAllOnes())
4153 if (
I->getIntrinsicID() == Intrinsic::scmp)
4167 switch (
II->getIntrinsicID()) {
4170 case Intrinsic::uadd_sat:
4171 case Intrinsic::usub_sat:
4176 case Intrinsic::ctpop: {
4181 case Intrinsic::scmp:
4182 case Intrinsic::ucmp:
4188 if (Cmp.isEquality())
4191 Type *Ty =
II->getType();
4193 switch (
II->getIntrinsicID()) {
4194 case Intrinsic::ctpop: {
4206 case Intrinsic::ctlz: {
4209 unsigned Num =
C.getLimitedValue();
4212 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4217 unsigned Num =
C.getLimitedValue();
4220 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4224 case Intrinsic::cttz: {
4226 if (!
II->hasOneUse())
4246 case Intrinsic::ssub_sat:
4250 return new ICmpInst(Pred,
II->getArgOperand(0),
II->getArgOperand(1));
4254 II->getArgOperand(1));
4258 II->getArgOperand(1));
4270 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
4271 Constant *RHSC = dyn_cast<Constant>(Op1);
4277 case Instruction::PHI:
4281 case Instruction::IntToPtr:
4290 case Instruction::Load:
4293 dyn_cast<GetElementPtrInst>(LHSI->
getOperand(0)))
4308 auto SimplifyOp = [&](
Value *
Op,
bool SelectCondIsTrue) ->
Value * {
4312 SI->getCondition(), Pred,
Op,
RHS,
DL, SelectCondIsTrue))
4313 return ConstantInt::get(
I.getType(), *Impl);
4318 Value *Op1 = SimplifyOp(SI->getOperand(1),
true);
4320 CI = dyn_cast<ConstantInt>(Op1);
4322 Value *Op2 = SimplifyOp(SI->getOperand(2),
false);
4324 CI = dyn_cast<ConstantInt>(Op2);
4326 auto Simplifies = [&](
Value *
Op,
unsigned Idx) {
4330 (isa<CmpIntrinsic>(SI->getOperand(
Idx)) &&
4341 bool Transform =
false;
4344 else if (Simplifies(Op1, 1) || Simplifies(Op2, 2)) {
4346 if (SI->hasOneUse())
4349 else if (CI && !CI->
isZero())
4368 unsigned Depth = 0) {
4371 if (V->getType()->getScalarSizeInBits() == 1)
4379 switch (
I->getOpcode()) {
4380 case Instruction::ZExt:
4383 case Instruction::SExt:
4387 case Instruction::And:
4388 case Instruction::Or:
4395 case Instruction::Xor:
4405 case Instruction::Select:
4409 case Instruction::Shl:
4412 case Instruction::LShr:
4415 case Instruction::AShr:
4419 case Instruction::Add:
4425 case Instruction::Sub:
4431 case Instruction::Call: {
4432 if (
auto *
II = dyn_cast<IntrinsicInst>(
I)) {
4433 switch (
II->getIntrinsicID()) {
4436 case Intrinsic::umax:
4437 case Intrinsic::smax:
4438 case Intrinsic::umin:
4439 case Intrinsic::smin:
4444 case Intrinsic::bitreverse:
4534 auto IsLowBitMask = [&]() {
4552 auto Check = [&]() {
4570 auto Check = [&]() {
4589 if (!IsLowBitMask())
4608 const APInt *C0, *C1;
4625 const APInt &MaskedBits = *C0;
4626 assert(MaskedBits != 0 &&
"shift by zero should be folded away already.");
4647 auto *XType =
X->getType();
4648 const unsigned XBitWidth = XType->getScalarSizeInBits();
4650 assert(
BitWidth.ugt(MaskedBits) &&
"shifts should leave some bits untouched");
4681 !
I.getOperand(0)->hasOneUse())
4706 assert(NarrowestTy ==
I.getOperand(0)->getType() &&
4707 "We did not look past any shifts while matching XShift though.");
4708 bool HadTrunc = WidestTy !=
I.getOperand(0)->getType();
4715 auto XShiftOpcode = XShift->
getOpcode();
4716 if (XShiftOpcode == YShift->
getOpcode())
4719 Value *
X, *XShAmt, *
Y, *YShAmt;
4726 if (!isa<Constant>(
X) && !isa<Constant>(
Y)) {
4728 if (!
match(
I.getOperand(0),
4754 unsigned MaximalPossibleTotalShiftAmount =
4757 APInt MaximalRepresentableShiftAmount =
4759 if (MaximalRepresentableShiftAmount.
ult(MaximalPossibleTotalShiftAmount))
4763 auto *NewShAmt = dyn_cast_or_null<Constant>(
4768 if (NewShAmt->getType() != WidestTy) {
4778 if (!
match(NewShAmt,
4780 APInt(WidestBitWidth, WidestBitWidth))))
4785 auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
4791 ? NewShAmt->getSplatValue()
4794 if (NewShAmtSplat &&
4800 if (
auto *
C = dyn_cast<Constant>(NarrowestShift->getOperand(0))) {
4804 unsigned MaxActiveBits = Known.
getBitWidth() - MinLeadZero;
4805 if (MaxActiveBits <= 1)
4811 if (
auto *
C = dyn_cast<Constant>(WidestShift->
getOperand(0))) {
4815 unsigned MaxActiveBits = Known.
getBitWidth() - MinLeadZero;
4816 if (MaxActiveBits <= 1)
4819 if (NewShAmtSplat) {
4822 if (AdjNewShAmt.
ule(MinLeadZero))
4836 Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
4858 if (!
I.isEquality() &&
4868 NeedNegation =
false;
4871 NeedNegation =
true;
4877 if (
I.isEquality() &&
4892 bool MulHadOtherUses =
Mul && !
Mul->hasOneUse();
4893 if (MulHadOtherUses)
4897 Div->
getOpcode() == Instruction::UDiv ? Intrinsic::umul_with_overflow
4898 : Intrinsic::smul_with_overflow,
4899 X->getType(), {X, Y},
nullptr,
"mul");
4904 if (MulHadOtherUses)
4913 if (MulHadOtherUses)
4939 Type *Ty =
X->getType();
4953 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
A;
5015 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
A;
5050 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
A;
5066 return new ICmpInst(PredOut, Op0, Op1);
5086 return new ICmpInst(NewPred, Op0, Const);
5099 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5167 return new ICmpInst(NewPred, Op1, Zero);
5176 return new ICmpInst(NewPred, Op0, Zero);
5180 bool NoOp0WrapProblem =
false, NoOp1WrapProblem =
false;
5181 bool Op0HasNUW =
false, Op1HasNUW =
false;
5182 bool Op0HasNSW =
false, Op1HasNSW =
false;
5186 bool &HasNSW,
bool &HasNUW) ->
bool {
5187 if (isa<OverflowingBinaryOperator>(BO)) {
5193 }
else if (BO.
getOpcode() == Instruction::Or) {
5201 Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr;
5205 NoOp0WrapProblem = hasNoWrapProblem(*BO0, Pred, Op0HasNSW, Op0HasNUW);
5209 NoOp1WrapProblem = hasNoWrapProblem(*BO1, Pred, Op1HasNSW, Op1HasNUW);
5214 if ((
A == Op1 ||
B == Op1) && NoOp0WrapProblem)
5220 if ((
C == Op0 ||
D == Op0) && NoOp1WrapProblem)
5225 if (
A &&
C && (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D) && NoOp0WrapProblem &&
5233 }
else if (
A ==
D) {
5237 }
else if (
B ==
C) {
5318 if (
A &&
C && NoOp0WrapProblem && NoOp1WrapProblem &&
5320 const APInt *AP1, *AP2;
5328 if (AP1Abs.
uge(AP2Abs)) {
5329 APInt Diff = *AP1 - *AP2;
5332 A, C3,
"", Op0HasNUW && Diff.
ule(*AP1), Op0HasNSW);
5335 APInt Diff = *AP2 - *AP1;
5338 C, C3,
"", Op1HasNUW && Diff.
ule(*AP2), Op1HasNSW);
5357 if (BO0 && BO0->
getOpcode() == Instruction::Sub) {
5361 if (BO1 && BO1->
getOpcode() == Instruction::Sub) {
5367 if (
A == Op1 && NoOp0WrapProblem)
5370 if (
C == Op0 && NoOp1WrapProblem)
5390 if (
B &&
D &&
B ==
D && NoOp0WrapProblem && NoOp1WrapProblem)
5394 if (
A &&
C &&
A ==
C && NoOp0WrapProblem && NoOp1WrapProblem)
5401 if (
Constant *RHSC = dyn_cast<Constant>(Op1))
5402 if (RHSC->isNotMinSignedValue())
5403 return new ICmpInst(
I.getSwappedPredicate(),
X,
5421 if (Op0HasNSW && Op1HasNSW) {
5434 if (GreaterThan &&
match(GreaterThan,
m_One()))
5441 if (((Op0HasNSW && Op1HasNSW) || (Op0HasNUW && Op1HasNUW)) &&
5453 if (NonZero && BO0 && BO1 && Op0HasNSW && Op1HasNSW)
5460 if (NonZero && BO0 && BO1 && Op0HasNUW && Op1HasNUW)
5471 else if (BO1 && BO1->
getOpcode() == Instruction::SRem &&
5501 case Instruction::Add:
5502 case Instruction::Sub:
5503 case Instruction::Xor: {
5510 if (
C->isSignMask()) {
5516 if (BO0->
getOpcode() == Instruction::Xor &&
C->isMaxSignedValue()) {
5518 NewPred =
I.getSwappedPredicate(NewPred);
5524 case Instruction::Mul: {
5525 if (!
I.isEquality())
5533 if (
unsigned TZs =
C->countr_zero()) {
5539 return new ICmpInst(Pred, And1, And2);
5544 case Instruction::UDiv:
5545 case Instruction::LShr:
5550 case Instruction::SDiv:
5556 case Instruction::AShr:
5561 case Instruction::Shl: {
5562 bool NUW = Op0HasNUW && Op1HasNUW;
5563 bool NSW = Op0HasNSW && Op1HasNSW;
5566 if (!NSW &&
I.isSigned())
5630 auto IsCondKnownTrue = [](
Value *Val) -> std::optional<bool> {
5632 return std::nullopt;
5637 return std::nullopt;
5641 if (!CmpXZ.has_value() && !CmpYZ.has_value())
5643 if (!CmpXZ.has_value()) {
5649 if (CmpYZ.has_value())
5673 if (!MinMaxCmpXZ.has_value()) {
5681 if (!MinMaxCmpXZ.has_value())
5697 return FoldIntoCmpYZ();
5724 return FoldIntoCmpYZ();
5733 return FoldIntoCmpYZ();
5755 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5759 if (
I.isEquality()) {
5794 Type *Ty =
A->getType();
5797 ConstantInt::get(Ty, 2))
5799 ConstantInt::get(Ty, 1));
5806 if (!
I.isEquality())
5809 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5813 if (
A == Op1 ||
B == Op1) {
5814 Value *OtherVal =
A == Op1 ?
B :
A;
5842 Value *OtherVal =
A == Op0 ?
B :
A;
5849 Value *
X =
nullptr, *
Y =
nullptr, *Z =
nullptr;
5855 }
else if (
A ==
D) {
5859 }
else if (
B ==
C) {
5863 }
else if (
B ==
D) {
5873 const APInt *C0, *C1;
5875 (*C0 ^ *C1).isNegatedPowerOf2();
5881 int(Op0->
hasOneUse()) + int(Op1->hasOneUse()) +
5883 if (XorIsNegP2 || UseCnt >= 2) {
5906 (Op0->
hasOneUse() || Op1->hasOneUse())) {
5911 MaskC->
countr_one() ==
A->getType()->getScalarSizeInBits())
5917 const APInt *AP1, *AP2;
5926 if (ShAmt < TypeBits && ShAmt != 0) {
5931 return new ICmpInst(NewPred,
Xor, ConstantInt::get(
A->getType(), CmpVal));
5941 if (ShAmt < TypeBits && ShAmt != 0) {
5959 unsigned ASize = cast<IntegerType>(
A->getType())->getPrimitiveSizeInBits();
5961 if (ShAmt < ASize) {
5984 A->getType()->getScalarSizeInBits() ==
BitWidth * 2 &&
5985 (
I.getOperand(0)->hasOneUse() ||
I.getOperand(1)->hasOneUse())) {
5990 Add, ConstantInt::get(
A->getType(),
C.shl(1)));
6013 m_OneUse(m_Intrinsic<Intrinsic::fshr>(
6032 std::optional<bool> IsZero = std::nullopt;
6070 Constant *
C = ConstantInt::get(Res->X->getType(), Res->C);
6074 unsigned SrcBits =
X->getType()->getScalarSizeInBits();
6075 if (
auto *
II = dyn_cast<IntrinsicInst>(
X)) {
6076 if (
II->getIntrinsicID() == Intrinsic::cttz ||
6077 II->getIntrinsicID() == Intrinsic::ctlz) {
6078 unsigned MaxRet = SrcBits;
6098 assert(isa<CastInst>(ICmp.
getOperand(0)) &&
"Expected cast for operand 0");
6099 auto *CastOp0 = cast<CastInst>(ICmp.
getOperand(0));
6104 bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
6105 bool IsSignedCmp = ICmp.
isSigned();
6110 bool IsZext0 = isa<ZExtInst>(ICmp.
getOperand(0));
6111 bool IsZext1 = isa<ZExtInst>(ICmp.
getOperand(1));
6113 if (IsZext0 != IsZext1) {
6118 if (ICmp.
isEquality() &&
X->getType()->isIntOrIntVectorTy(1) &&
6119 Y->getType()->isIntOrIntVectorTy(1))
6126 auto *NonNegInst0 = dyn_cast<PossiblyNonNegInst>(ICmp.
getOperand(0));
6127 auto *NonNegInst1 = dyn_cast<PossiblyNonNegInst>(ICmp.
getOperand(1));
6129 bool IsNonNeg0 = NonNegInst0 && NonNegInst0->hasNonNeg();
6130 bool IsNonNeg1 = NonNegInst1 && NonNegInst1->hasNonNeg();
6132 if ((IsZext0 && IsNonNeg0) || (IsZext1 && IsNonNeg1))
6139 Type *XTy =
X->getType(), *YTy =
Y->getType();
6146 IsSignedExt ? Instruction::SExt : Instruction::ZExt;
6162 if (IsSignedCmp && IsSignedExt)
6175 Type *SrcTy = CastOp0->getSrcTy();
6183 if (IsSignedExt && IsSignedCmp)
6195 if (IsSignedCmp || !IsSignedExt || !isa<ConstantInt>(
C))
6214 Value *SimplifiedOp0 = simplifyIntToPtrRoundTripCast(ICmp.
getOperand(0));
6215 Value *SimplifiedOp1 = simplifyIntToPtrRoundTripCast(ICmp.
getOperand(1));
6216 if (SimplifiedOp0 || SimplifiedOp1)
6218 SimplifiedOp0 ? SimplifiedOp0 : ICmp.
getOperand(0),
6219 SimplifiedOp1 ? SimplifiedOp1 : ICmp.
getOperand(1));
6221 auto *CastOp0 = dyn_cast<CastInst>(ICmp.
getOperand(0));
6227 Value *Op0Src = CastOp0->getOperand(0);
6228 Type *SrcTy = CastOp0->getSrcTy();
6229 Type *DestTy = CastOp0->getDestTy();
6233 auto CompatibleSizes = [&](
Type *PtrTy,
Type *IntTy) {
6234 if (isa<VectorType>(PtrTy)) {
6235 PtrTy = cast<VectorType>(PtrTy)->getElementType();
6236 IntTy = cast<VectorType>(IntTy)->getElementType();
6240 if (CastOp0->getOpcode() == Instruction::PtrToInt &&
6241 CompatibleSizes(SrcTy, DestTy)) {
6242 Value *NewOp1 =
nullptr;
6243 if (
auto *PtrToIntOp1 = dyn_cast<PtrToIntOperator>(ICmp.
getOperand(1))) {
6244 Value *PtrSrc = PtrToIntOp1->getOperand(0);
6246 NewOp1 = PtrToIntOp1->getOperand(0);
6247 }
else if (
auto *RHSC = dyn_cast<Constant>(ICmp.
getOperand(1))) {
6256 if (CastOp0->getOpcode() == Instruction::IntToPtr &&
6257 CompatibleSizes(DestTy, SrcTy)) {
6258 Value *NewOp1 =
nullptr;
6259 if (
auto *IntToPtrOp1 = dyn_cast<IntToPtrInst>(ICmp.
getOperand(1))) {
6260 Value *IntSrc = IntToPtrOp1->getOperand(0);
6262 NewOp1 = IntToPtrOp1->getOperand(0);
6263 }
else if (
auto *RHSC = dyn_cast<Constant>(ICmp.
getOperand(1))) {
6282 case Instruction::Add:
6283 case Instruction::Sub:
6285 case Instruction::Mul:
6298 case Instruction::Add:
6303 case Instruction::Sub:
6308 case Instruction::Mul:
6317 bool IsSigned,
Value *LHS,
6321 if (OrigI.
isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
6331 if (
auto *LHSTy = dyn_cast<VectorType>(
LHS->
getType()))
6346 Result->takeName(&OrigI);
6351 Result->takeName(&OrigI);
6353 if (
auto *Inst = dyn_cast<Instruction>(Result)) {
6355 Inst->setHasNoSignedWrap();
6357 Inst->setHasNoUnsignedWrap();
6380 const APInt *OtherVal,
6384 if (!isa<IntegerType>(MulVal->
getType()))
6387 auto *MulInstr = dyn_cast<Instruction>(MulVal);
6390 assert(MulInstr->getOpcode() == Instruction::Mul);
6392 auto *
LHS = cast<ZExtInst>(MulInstr->getOperand(0)),
6393 *
RHS = cast<ZExtInst>(MulInstr->getOperand(1));
6394 assert(
LHS->getOpcode() == Instruction::ZExt);
6395 assert(
RHS->getOpcode() == Instruction::ZExt);
6399 Type *TyA =
A->getType(), *TyB =
B->getType();
6401 WidthB = TyB->getPrimitiveSizeInBits();
6404 if (WidthB > WidthA) {
6419 if (
TruncInst *TI = dyn_cast<TruncInst>(U)) {
6422 if (TruncWidth > MulWidth)
6426 if (BO->getOpcode() != Instruction::And)
6428 if (
ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
6429 const APInt &CVal = CI->getValue();
6445 switch (
I.getPredicate()) {
6452 if (MaxVal.
eq(*OtherVal))
6462 if (MaxVal.
eq(*OtherVal))
6476 if (WidthA < MulWidth)
6478 if (WidthB < MulWidth)
6482 {MulA, MulB},
nullptr,
"umul");
6493 if (
TruncInst *TI = dyn_cast<TruncInst>(U)) {
6494 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
6499 assert(BO->getOpcode() == Instruction::And);
6501 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
6537 switch (
I.getPredicate()) {
6568 assert(DI && UI &&
"Instruction not defined\n");
6579 auto *Usr = cast<Instruction>(U);
6580 if (Usr != UI && !
DT.
dominates(DB, Usr->getParent()))
6591 auto *BI = dyn_cast_or_null<BranchInst>(BB->
getTerminator());
6592 if (!BI || BI->getNumSuccessors() != 2)
6594 auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
6595 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
6642 const unsigned SIOpd) {
6643 assert((SIOpd == 1 || SIOpd == 2) &&
"Invalid select operand!");
6645 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
6659 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
6669 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
6698 if (!isa<Constant>(Op0) && Op0Known.
isConstant())
6701 if (!isa<Constant>(Op1) && Op1Known.
isConstant())
6730 if (!Cmp.hasOneUse())
6739 if (!isMinMaxCmp(
I)) {
6744 if (Op1Min == Op0Max)
6749 if (*CmpC == Op0Min + 1)
6751 ConstantInt::get(Op1->getType(), *CmpC - 1));
6761 if (Op1Max == Op0Min)
6766 if (*CmpC == Op0Max - 1)
6768 ConstantInt::get(Op1->getType(), *CmpC + 1));
6778 if (Op1Min == Op0Max)
6782 if (*CmpC == Op0Min + 1)
6784 ConstantInt::get(Op1->getType(), *CmpC - 1));
6789 if (Op1Max == Op0Min)
6793 if (*CmpC == Op0Max - 1)
6795 ConstantInt::get(Op1->getType(), *CmpC + 1));
6812 APInt Op0KnownZeroInverted = ~Op0Known.Zero;
6818 *LHSC != Op0KnownZeroInverted)
6824 Type *XTy =
X->getType();
6826 APInt C2 = Op0KnownZeroInverted;
6827 APInt C2Pow2 = (C2 & ~(*C1 - 1)) + *C1;
6833 auto *CmpC = ConstantInt::get(XTy, Log2C2 - Log2C1);
6843 (Op0Known & Op1Known) == Op0Known)
6849 if (Op1Min == Op0Max)
6853 if (Op1Max == Op0Min)
6857 if (Op1Min == Op0Max)
6861 if (Op1Max == Op0Min)
6869 if ((
I.isSigned() || (
I.isUnsigned() && !
I.hasSameSign())) &&
6872 I.setPredicate(
I.getUnsignedPredicate());
6907 bool IsSExt = ExtI->
getOpcode() == Instruction::SExt;
6909 auto CreateRangeCheck = [&] {
6924 }
else if (!IsSExt || HasOneUse) {
6929 return CreateRangeCheck();
6931 }
else if (IsSExt ?
C->isAllOnes() :
C->isOne()) {
6939 }
else if (!IsSExt || HasOneUse) {
6944 return CreateRangeCheck();
6958 Instruction::ICmp, Pred1,
X,
6977 Value *Op0 =
I.getOperand(0);
6978 Value *Op1 =
I.getOperand(1);
6979 auto *Op1C = dyn_cast<Constant>(Op1);
6984 if (!FlippedStrictness)
6987 return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
7005 I.setName(
I.getName() +
".not");
7016 Value *
A =
I.getOperand(0), *
B =
I.getOperand(1);
7017 assert(
A->getType()->isIntOrIntVectorTy(1) &&
"Bools only");
7023 switch (
I.getPredicate()) {
7032 switch (
I.getPredicate()) {
7042 switch (
I.getPredicate()) {
7051 return BinaryOperator::CreateXor(
A,
B);
7059 return BinaryOperator::CreateAnd(Builder.
CreateNot(
A),
B);
7067 return BinaryOperator::CreateAnd(Builder.
CreateNot(
B),
A);
7075 return BinaryOperator::CreateOr(Builder.
CreateNot(
A),
B);
7083 return BinaryOperator::CreateOr(Builder.
CreateNot(
B),
A);
7139 Value *
LHS = Cmp.getOperand(0), *
RHS = Cmp.getOperand(1);
7144 if (
auto *
I = dyn_cast<Instruction>(V))
7145 I->copyIRFlags(&Cmp);
7146 Module *M = Cmp.getModule();
7148 M, Intrinsic::vector_reverse, V->getType());
7156 return createCmpReverse(Pred, V1, V2);
7160 return createCmpReverse(Pred, V1,
RHS);
7164 return createCmpReverse(Pred,
LHS, V2);
7189 Constant *ScalarC =
C->getSplatValue(
true);
7208 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
7212 auto UAddOvResultPat = m_ExtractValue<0>(
7214 if (
match(Op0, UAddOvResultPat) &&
7223 UAddOv = cast<ExtractValueInst>(Op0)->getAggregateOperand();
7225 (Op0 ==
A || Op0 ==
B))
7227 UAddOv = cast<ExtractValueInst>(Op1)->getAggregateOperand();
7235 if (!
I.getOperand(0)->getType()->isPointerTy() ||
7237 I.getParent()->getParent(),
7238 I.getOperand(0)->getType()->getPointerAddressSpace())) {
7244 Op->isLaunderOrStripInvariantGroup()) {
7246 Op->getOperand(0),
I.getOperand(1));
7258 if (
I.getType()->isVectorTy())
7280 auto *LHSTy = dyn_cast<FixedVectorType>(
LHS->
getType());
7281 if (!LHSTy || !LHSTy->getElementType()->isIntegerTy())
7284 LHSTy->getNumElements() * LHSTy->getElementType()->getIntegerBitWidth();
7286 if (!
DL.isLegalInteger(NumBits))
7290 auto *ScalarTy = Builder.
getIntNTy(NumBits);
7305 if (
auto *
GEP = dyn_cast<GEPOperator>(Op0))
7309 if (
auto *SI = dyn_cast<SelectInst>(Op0))
7313 if (
auto *
MinMax = dyn_cast<MinMaxIntrinsic>(Op0))
7344 bool IsIntMinPosion =
C->isAllOnesValue();
7356 CxtI, IsIntMinPosion
7359 X, ConstantInt::get(
X->getType(),
SMin + 1)));
7365 CxtI, IsIntMinPosion
7368 X, ConstantInt::get(
X->getType(),
SMin)));
7381 auto CheckUGT1 = [](
const APInt &Divisor) {
return Divisor.ugt(1); };
7396 auto CheckNE0 = [](
const APInt &Shift) {
return !Shift.isZero(); };
7414 bool Changed =
false;
7416 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
7423 if (Op0Cplxity < Op1Cplxity) {
7438 if (
Value *V = dyn_castNegVal(SelectTrue)) {
7439 if (V == SelectFalse)
7441 }
else if (
Value *V = dyn_castNegVal(SelectFalse)) {
7442 if (V == SelectTrue)
7483 if (
SelectInst *SI = dyn_cast<SelectInst>(
I.user_back())) {
7541 if (
I.isCommutative()) {
7542 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
7566 (Op0->
hasOneUse() || Op1->hasOneUse())) {
7582 assert(Op1->getType()->isPointerTy() &&
7583 "Comparing pointer with non-pointer?");
7612 bool ConsumesOp0, ConsumesOp1;
7615 (ConsumesOp0 || ConsumesOp1)) {
7618 assert(InvOp0 && InvOp1 &&
7619 "Mismatch between isFreeToInvert and getFreelyInverted");
7620 return new ICmpInst(
I.getSwappedPredicate(), InvOp0, InvOp1);
7627 isa<IntegerType>(
X->getType())) {
7632 if (AddI->
getOpcode() == Instruction::Add &&
7633 OptimizeOverflowCheck(Instruction::Add,
false,
X,
Y, *AddI,
7634 Result, Overflow)) {
7652 if ((
I.isUnsigned() ||
I.isEquality()) &&
7655 Y->getType()->getScalarSizeInBits() == 1 &&
7656 (Op0->
hasOneUse() || Op1->hasOneUse())) {
7663 unsigned ShiftOpc = ShiftI->
getOpcode();
7664 if ((ExtOpc == Instruction::ZExt && ShiftOpc == Instruction::LShr) ||
7665 (ExtOpc == Instruction::SExt && ShiftOpc == Instruction::AShr)) {
7694 if (
auto *EVI = dyn_cast<ExtractValueInst>(Op0))
7695 if (
auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
7696 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
7703 if (
I.getType()->isVectorTy())
7715 const APInt *C1, *C2;
7722 Type *InputTy =
A->getType();
7729 TruncC1.
setBit(InputBitWidth - 1);
7733 ConstantInt::get(InputTy, C2->
trunc(InputBitWidth)));
7739 return Changed ? &
I :
nullptr;
7753 if (MantissaWidth == -1)
7758 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
7760 if (
I.isEquality()) {
7762 bool IsExact =
false;
7763 APSInt RHSCvt(IntWidth, LHSUnsigned);
7772 if (*
RHS != RHSRoundInt) {
7792 if ((
int)IntWidth > MantissaWidth) {
7794 int Exp = ilogb(*
RHS);
7797 if (MaxExponent < (
int)IntWidth - !LHSUnsigned)
7803 if (MantissaWidth <= Exp && Exp <= (
int)IntWidth - !LHSUnsigned)
7812 assert(!
RHS->isNaN() &&
"NaN comparison not already folded!");
7815 switch (
I.getPredicate()) {
7906 APSInt RHSInt(IntWidth, LHSUnsigned);
7909 if (!
RHS->isZero()) {
7924 if (
RHS->isNegative())
7930 if (
RHS->isNegative())
7936 if (
RHS->isNegative())
7943 if (!
RHS->isNegative())
7949 if (
RHS->isNegative())
7955 if (
RHS->isNegative())
7961 if (
RHS->isNegative())
7968 if (!
RHS->isNegative())
8022 if (
C->isNegative())
8023 Pred =
I.getSwappedPredicate();
8038 if (!
C->isPosZero()) {
8039 if (!
C->isSmallestNormalized())
8052 switch (
I.getPredicate()) {
8078 switch (
I.getPredicate()) {
8103 assert(!
I.hasNoNaNs() &&
"fcmp should have simplified");
8108 assert(!
I.hasNoNaNs() &&
"fcmp should have simplified");
8122 return replacePredAndOp0(&
I,
I.getPredicate(),
X);
8144 if (!cast<Instruction>(
I.getOperand(0))->hasNoInfs())
8145 I.setHasNoInfs(
false);
8147 switch (
I.getPredicate()) {
8192 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
8197 Pred =
I.getSwappedPredicate();
8206 return new FCmpInst(Pred, Op0, Zero,
"", &
I);
8243 I.getFunction()->getDenormalMode(
8265 if (!FloorX && !CeilX) {
8269 Pred =
I.getSwappedPredicate();
8326 bool Changed =
false;
8337 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
8344 assert(OpType == Op1->getType() &&
"fcmp with different-typed operands?");
8369 if (
I.isCommutative()) {
8370 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
8392 return new FCmpInst(
I.getSwappedPredicate(),
X,
Y,
"", &
I);
8405 if (
SelectInst *SI = dyn_cast<SelectInst>(
I.user_back())) {
8474 Type *IntTy =
X->getType();
8486 case Instruction::Select:
8494 case Instruction::FSub:
8499 case Instruction::PHI:
8503 case Instruction::SIToFP:
8504 case Instruction::UIToFP:
8508 case Instruction::FDiv:
8512 case Instruction::Load:
8513 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(LHSI->
getOperand(0)))
8514 if (
auto *GV = dyn_cast<GlobalVariable>(
GEP->getOperand(0)))
8516 cast<LoadInst>(LHSI),
GEP, GV,
I))
8536 return new FCmpInst(
I.getSwappedPredicate(),
X, NegC,
"", &
I);
8555 X->getType()->getScalarType()->getFltSemantics();
8591 Constant *NewC = ConstantFP::get(
X->getType(), TruncC);
8605 if (
auto *VecTy = dyn_cast<VectorType>(OpType))
8617 Value *CanonLHS =
nullptr, *CanonRHS =
nullptr;
8618 match(Op0, m_Intrinsic<Intrinsic::canonicalize>(
m_Value(CanonLHS)));
8619 match(Op1, m_Intrinsic<Intrinsic::canonicalize>(
m_Value(CanonRHS)));
8622 if (CanonLHS == Op1)
8623 return new FCmpInst(Pred, Op1, Op1,
"", &
I);
8626 if (CanonRHS == Op0)
8627 return new FCmpInst(Pred, Op0, Op0,
"", &
I);
8630 if (CanonLHS && CanonRHS)
8631 return new FCmpInst(Pred, CanonLHS, CanonRHS,
"", &
I);
8634 if (
I.getType()->isVectorTy())
8638 return Changed ? &
I :
nullptr;
AMDGPU Register Bank Select
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Instruction * foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
static Instruction * foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize fabs(X) compared with zero.
static Value * rewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags NW, const DataLayout &DL, SetVector< Value * > &Explored, InstCombiner &IC)
Returns a re-written value of Start as an indexed GEP using Base as a pointer.
static bool addWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1+In2, returning true if the result overflowed for this type.
static Instruction * foldICmpAndXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldVectorCmp(CmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q, unsigned Depth=0)
static Value * createLogicFromTable(const std::bitset< 4 > &Table, Value *Op0, Value *Op1, IRBuilderBase &Builder, bool HasOneUse)
static Instruction * foldICmpOfUAddOv(ICmpInst &I)
static bool isChainSelectCmpBranch(const SelectInst *SI)
Return true when the instruction sequence within a block is select-cmp-br.
static Instruction * foldICmpInvariantGroup(ICmpInst &I)
static Instruction * foldReductionIdiom(ICmpInst &I, InstCombiner::BuilderTy &Builder, const DataLayout &DL)
This function folds patterns produced by lowering of reduce idioms, such as llvm.vector....
static Instruction * canonicalizeICmpBool(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Integer compare with boolean values can always be turned into bitwise ops.
static Instruction * foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI, Constant *RHSC, InstCombinerImpl &CI)
static Value * foldICmpOrXorSubChain(ICmpInst &Cmp, BinaryOperator *Or, InstCombiner::BuilderTy &Builder)
Fold icmp eq/ne (or (xor/sub (X1, X2), xor/sub (X3, X4))), 0.
static bool hasBranchUse(ICmpInst &I)
Given an icmp instruction, return true if any use of this comparison is a branch on sign bit comparis...
static Value * foldICmpWithLowBitMaskedVal(CmpPredicate Pred, Value *Op0, Value *Op1, const SimplifyQuery &Q, InstCombiner &IC)
Some comparisons can be simplified.
static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth)
When performing a comparison against a constant, it is possible that not all the bits in the LHS are ...
static Instruction * foldICmpShlLHSC(ICmpInst &Cmp, Instruction *Shl, const APInt &C)
Fold icmp (shl nuw C2, Y), C.
static Instruction * foldFCmpWithFloorAndCeil(FCmpInst &I, InstCombinerImpl &IC)
static Instruction * foldICmpXorXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldICmpOfCmpIntrinsicWithConstant(CmpPredicate Pred, IntrinsicInst *I, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * processUMulZExtIdiom(ICmpInst &I, Value *MulVal, const APInt *OtherVal, InstCombinerImpl &IC)
Recognize and process idiom involving test for multiplication overflow.
static Instruction * foldSqrtWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize sqrt(X) compared with zero.
static Instruction * foldFCmpFNegCommonOp(FCmpInst &I)
static Instruction * foldICmpWithHighBitMask(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static ICmpInst * canonicalizeCmpWithConstant(ICmpInst &I)
If we have an icmp le or icmp ge instruction with a constant operand, turn it into the appropriate ic...
static Instruction * foldICmpIntrinsicWithIntrinsic(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
Fold an icmp with LLVM intrinsics.
static Instruction * foldICmpUSubSatOrUAddSatWithConstant(CmpPredicate Pred, SaturatingInst *II, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * foldICmpPow2Test(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static bool subWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1-In2, returning true if the result overflowed for this type.
static bool canRewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags &NW, const DataLayout &DL, SetVector< Value * > &Explored)
Returns true if we can rewrite Start as a GEP with pointer Base and some integer offset.
static Instruction * foldICmpXNegX(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static Instruction * processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, ConstantInt *CI2, ConstantInt *CI1, InstCombinerImpl &IC)
The caller has matched a pattern of the form: I = icmp ugt (add (add A, B), CI2), CI1 If this is of t...
static Value * foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ, InstCombiner::BuilderTy &Builder)
static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C)
Returns true if the exploded icmp can be expressed as a signed comparison to zero and updates the pre...
static Instruction * transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, const DataLayout &DL, InstCombiner &IC)
Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
static Instruction * foldCtpopPow2Test(ICmpInst &I, IntrinsicInst *CtpopLhs, const APInt &CRhs, InstCombiner::BuilderTy &Builder, const SimplifyQuery &Q)
static void setInsertionPoint(IRBuilder<> &Builder, Value *V, bool Before=true)
static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS, bool IsSigned)
static Value * foldICmpWithTruncSignExtendedVal(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Some comparisons can be simplified.
static Instruction * foldICmpOrXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
mir Rename Register Operands
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
FPClassTest classify() const
Return the FPClassTest which will return true for the value.
opStatus roundToIntegral(roundingMode RM)
Class for arbitrary precision integers.
APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
APInt abs() const
Get the absolute value.
unsigned ceilLogBase2() const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
bool eq(const APInt &RHS) const
Equality comparison.
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
void negate()
Negate this APInt in place.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
void flipAllBits()
Toggle every bit to its opposite value.
unsigned countl_one() const
Count the number of leading one bits.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
An arbitrary precision integer that knows its signedness.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Class to represent array types.
LLVM Basic Block Representation.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Conditional or Unconditional Branch instruction.
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This class is the base class for the comparison instructions.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
static CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
bool isStrictPredicate() const
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
bool isIntPredicate() const
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getPointerBitCastOrAddrSpaceCast(Constant *C, Type *Ty)
Create a BitCast or AddrSpaceCast for a pointer type depending on the address space.
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getNot(Constant *C)
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getXor(Constant *C1, Constant *C2)
static Constant * getNeg(Constant *C, bool HasNSW=false)
static Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
static ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static ConstantInt * getFalse(LLVMContext &Context)
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
const APInt & getValue() const
Return the constant as an APInt value reference.
static ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
std::optional< ConstantRange > exactUnionWith(const ConstantRange &CR) const
Union the two ranges and return the result if it can be represented exactly, otherwise return std::nu...
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
ConstantRange truncate(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
ConstantRange difference(const ConstantRange &CR) const
Subtract the specified range from this range (aka relative complement of the sets).
bool isEmptySet() const
Return true if this set contains no members.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantRange inverse() const
Return a new range that is the logical not of the current set.
std::optional< ConstantRange > exactIntersectWith(const ConstantRange &CR) const
Intersect the two ranges and return the result if it can be represented exactly, otherwise return std...
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
static Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
This is an important base class in LLVM.
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static Constant * getAllOnesValue(Type *Ty)
const APInt & getUniqueInteger() const
If C is a constant integer then return its value, otherwise C must be a vector of constant integers,...
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Type * getSmallestLegalIntType(LLVMContext &C, unsigned Width=0) const
Returns the smallest integer type with size at least as big as Width bits.
iterator find(const_arg_type_t< KeyT > Val)
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
ArrayRef< BranchInst * > conditionsFor(const Value *V) const
Access the list of branches which affect this value.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction compares its operands according to the predicate given to the constructor.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Type * getSourceElementType() const
Value * getPointerOperand()
GEPNoWrapFlags getNoWrapFlags() const
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Common base class shared among various IRBuilders.
Value * CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Value * CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
ConstantInt * getTrue()
Get the constant value for i1 true.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
Value * createIsFPClass(Value *FPNum, unsigned Test)
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getFalse()
Get the constant value for i1 false.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Value * CreateURem(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Instruction * foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, const APInt &C)
Fold icmp ({al}shr X, Y), C.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldICmpWithZextOrSext(ICmpInst &ICmp)
Instruction * foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, ConstantInt *C)
Instruction * foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Instruction * foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp with BinaryOp and constant operand: icmp Pred BO, C.
Instruction * foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, const APInt &C)
Fold icmp (or X, Y), C.
Instruction * foldICmpTruncWithTruncOrExt(ICmpInst &Cmp, const SimplifyQuery &Q)
Fold icmp (trunc nuw/nsw X), (trunc nuw/nsw Y).
Instruction * foldSignBitTest(ICmpInst &I)
Fold equality-comparison between zero and any (maybe truncated) right-shift by one-less-than-bitwidth...
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Value * insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, bool isSigned, bool Inside)
Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise (V < Lo || V >= Hi).
Instruction * foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ)
Try to fold icmp (binop), X or icmp X, (binop).
Instruction * foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, const APInt &C)
Fold icmp (sub X, Y), C.
Instruction * foldICmpInstWithConstantNotInt(ICmpInst &Cmp)
Handle icmp with constant (but not simple integer constant) RHS.
Instruction * foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (shl AP2, A), AP1)" -> (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
Value * reassociateShiftAmtsOfTwoSameDirectionShifts(BinaryOperator *Sh0, const SimplifyQuery &SQ, bool AnalyzeForSignBitExtraction=false)
Instruction * foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an equality icmp with LLVM intrinsic and constant operand.
Value * foldMultiplicationOverflowCheck(ICmpInst &Cmp)
Fold (-1 u/ x) u< y ((x * y) ?/ x) != y to @llvm.
Instruction * foldICmpWithConstant(ICmpInst &Cmp)
Fold icmp Pred X, C.
CmpInst * canonicalizeICmpPredicate(CmpInst &I)
If we have a comparison with a non-canonical predicate, if we can update all the users,...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * foldICmpWithZero(ICmpInst &Cmp)
Instruction * foldICmpCommutative(CmpPredicate Pred, Value *Op0, Value *Op1, ICmpInst &CxtI)
Instruction * foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp equality instruction with binary operator LHS and constant RHS: icmp eq/ne BO,...
Instruction * foldICmpUsingBoolRange(ICmpInst &I)
If one operand of an icmp is effectively a bool (value range of {0,1}), then try to reduce patterns b...
Instruction * foldICmpWithTrunc(ICmpInst &Cmp)
Instruction * foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, unsigned Depth, const SimplifyQuery &Q) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, ConstantInt *&Less, ConstantInt *&Equal, ConstantInt *&Greater)
Match a select chain which produces one of three values based on whether the LHS is less than,...
Instruction * foldCmpLoadFromIndexedGlobal(LoadInst *LI, GetElementPtrInst *GEP, GlobalVariable *GV, CmpInst &ICI, ConstantInt *AndCst=nullptr)
This is called when we see this pattern: cmp pred (load (gep GV, ...)), cmpcst where GV is a global v...
Instruction * visitFCmpInst(FCmpInst &I)
Instruction * foldICmpUsingKnownBits(ICmpInst &Cmp)
Try to fold the comparison based on range information we can get by checking whether bits are known t...
Instruction * foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, const APInt &C)
Fold icmp ({su}div X, Y), C.
Instruction * foldIRemByPowerOfTwoToBitTest(ICmpInst &I)
If we have: icmp eq/ne (urem/srem x, y), 0 iff y is a power-of-two, we can replace this with a bit te...
Instruction * foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold fcmp ([us]itofp x, cst) if possible.
Instruction * foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Fold icmp (udiv X, Y), C.
Instruction * foldICmpAddOpConst(Value *X, const APInt &C, CmpPredicate Pred)
Fold "icmp pred (X+C), X".
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
Instruction * foldICmpWithCastOp(ICmpInst &ICmp)
Handle icmp (cast x), (cast or constant).
Instruction * foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, const APInt &C)
Fold icmp (trunc X), C.
Instruction * foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, const APInt &C)
Fold icmp (add X, Y), C.
Instruction * foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, const APInt &C)
Fold icmp (mul X, Y), C.
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Instruction * foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
Fold icmp (xor X, Y), C.
Instruction * foldSelectICmp(CmpPredicate Pred, SelectInst *SI, Value *RHS, const ICmpInst &I)
Instruction * foldICmpInstWithConstantAllowPoison(ICmpInst &Cmp, const APInt &C)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1, const APInt &C2)
Fold icmp (and (sh X, Y), C2), C1.
Instruction * foldICmpInstWithConstant(ICmpInst &Cmp)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldICmpXorShiftConst(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
For power-of-2 C: ((X s>> ShiftC) ^ X) u< C --> (X + C) u< (C << 1) ((X s>> ShiftC) ^ X) u> (C - 1) -...
Instruction * foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, const APInt &C)
Fold icmp (shl X, Y), C.
Instruction * foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, const APInt &C)
Fold icmp (and X, Y), C.
Instruction * foldICmpEquality(ICmpInst &Cmp)
Instruction * foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax, Value *Z, CmpPredicate Pred)
Fold icmp Pred min|max(X, Y), Z.
bool dominatesAllUses(const Instruction *DI, const Instruction *UI, const BasicBlock *DB) const
True when DB dominates all uses of DI except UI.
bool foldAllocaCmp(AllocaInst *Alloca)
Instruction * visitICmpInst(ICmpInst &I)
OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const
Instruction * foldICmpWithDominatingICmp(ICmpInst &Cmp)
Canonicalize icmp instructions based on dominating conditions.
bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, const unsigned SIOpd)
Try to replace select with select operand SIOpd in SI-ICmp sequence.
Instruction * foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" -> (icmp eq/ne A, Log2(AP2/AP1)) -> (icmp eq/ne A,...
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
Instruction * foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1)
Fold icmp (and X, C2), C1.
Instruction * foldICmpBitCast(ICmpInst &Cmp)
Instruction * foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, Instruction &I)
Fold comparisons between a GEP instruction and something else.
The core instruction combiner logic.
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI, bool IsNSW=false) const
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, unsigned Depth=0, const Instruction *CxtI=nullptr)
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser)
Given i1 V, can every user of V be freely adapted if V is changed to !V ? InstCombine's freelyInvertA...
void addToWorklist(Instruction *I)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
unsigned ComputeMaxSignificantBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
bool hasNoInfs() const LLVM_READONLY
Determine whether the no-infs flag is set.
bool isArithmeticShift() const
Return true if this is an arithmetic shift right.
bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
This class represents min/max intrinsics.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Represents a saturating add/sub intrinsic.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
bool contains(const key_type &key) const
Check if the SetVector contains the given key.
This instruction constructs a fixed permutation of two input vectors.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
reverse_iterator rbegin()
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Class to represent struct types.
This class represents a truncation of integer types.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
int getFPMantissaWidth() const
Return the width of the mantissa of this type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
A Use represents the edge between a Value definition and its users.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A unsign-divided by B, rounded by the given rounding mode.
APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A sign-divided by B, rounded by the given rounding mode.
@ C
The default llvm calling convention, compatible with C.
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
NoWrapTrunc_match< OpTy, TruncInst::NoSignedWrap > m_NSWTrunc(const OpTy &Op)
Matches trunc nsw.
OneUse_match< T > m_OneUse(const T &SubPattern)
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
cst_pred_ty< is_negated_power2_or_zero > m_NegatedPower2OrZero()
Match a integer or vector negated power-of-2.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
cst_pred_ty< is_lowbit_mask_or_zero > m_LowBitMaskOrZero()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Signum_match< Val_t > m_Signum(const Val_t &V)
Matches a signum pattern.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
UAddWithOverflow_match< LHS_t, RHS_t, Sum_t > m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S)
Match an icmp instruction checking for unsigned overflow on addition.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
CastInst_match< OpTy, FPTruncInst > m_FPTrunc(const OpTy &Op)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_unless< Ty > m_Unless(const Ty &M)
Match if the inner matcher does NOT match.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
This is an optimization pass for GlobalISel generic memory operations.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
bool isKnownNeverInfinity(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if the floating-point scalar value is not an infinity or if the floating-point vector val...
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
constexpr unsigned MaxAnalysisRecursionDepth
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
SelectPatternFlavor
Specific patterns of select instructions we can match.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ And
Bitwise or logical AND of integers.
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
std::optional< DecomposedBitTest > decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate Pred, bool LookThroughTrunc=true, bool AllowNonZeroC=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
std::optional< std::pair< CmpPredicate, Constant * > > getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C)
Convert an integer comparison with a constant RHS into an equivalent form with the strictness flipped...
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
bool isKnownNeverNaN(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardZero
This callback is used in conjunction with PointerMayBeCaptured.
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
static constexpr DenormalMode getIEEE()
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
APInt getSignedMaxValue() const
Return the maximal signed value possible given these KnownBits.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
unsigned getBitWidth() const
Get the bit width of this value.
bool isConstant() const
Returns true if we know the value of all bits.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
bool isNegative() const
Returns true if this value is known to be negative.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
APInt getSignedMinValue() const
Return the minimal signed value possible given these KnownBits.
const APInt & getConstant() const
Returns the value when all bits have a known value.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
SimplifyQuery getWithInstruction(const Instruction *I) const
SimplifyQuery getWithoutDomCondCache() const
A MapVector that performs no allocations if smaller than a certain size.