58#include "llvm/IR/IntrinsicsAArch64.h"
59#include "llvm/IR/IntrinsicsAMDGPU.h"
60#include "llvm/IR/IntrinsicsRISCV.h"
61#include "llvm/IR/IntrinsicsX86.h"
96 if (
unsigned BitWidth = Ty->getScalarSizeInBits())
99 return DL.getPointerTypeSizeInBits(Ty);
119 const APInt &DemandedElts,
123 DemandedLHS = DemandedRHS = DemandedElts;
130 DemandedElts, DemandedLHS, DemandedRHS);
151 bool UseInstrInfo,
unsigned Depth) {
226 R->uge(
LHS->getType()->getScalarSizeInBits()))
239 assert(LHS->getType() == RHS->getType() &&
240 "LHS and RHS should have the same type");
241 assert(LHS->getType()->isIntOrIntVectorTy() &&
242 "LHS and RHS should be integers");
253 return !
I->user_empty() &&
258 return !
I->user_empty() &&
all_of(
I->users(), [](
const User *U) {
260 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
269 return ::isKnownToBeAPowerOfTwo(
285 return CI->getValue().isStrictlyPositive();
311 return ::isKnownNonEqual(V1, V2, DemandedElts, Q,
Depth);
318 return Mask.isSubsetOf(Known.
Zero);
325 unsigned Depth = 0) {
336 return ::ComputeNumSignBits(
346 return V->getType()->getScalarSizeInBits() - SignBits + 1;
351 const APInt &DemandedElts,
358 if (KnownOut.
isUnknown() && !NSW && !NUW)
371 bool NUW,
const APInt &DemandedElts,
388 bool isKnownNegativeOp0 = Known2.
isNegative();
391 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
403 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
405 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.
isNonZero());
409 bool SelfMultiply = Op0 == Op1;
418 unsigned OutValidBits = 2 * (TyBits - SignBits + 1);
420 if (OutValidBits < TyBits) {
421 APInt KnownZeroMask =
423 Known.
Zero |= KnownZeroMask;
441 unsigned NumRanges = Ranges.getNumOperands() / 2;
447 for (
unsigned i = 0; i < NumRanges; ++i) {
456 "Known bit width must match range bit width!");
459 unsigned CommonPrefixBits =
460 (
Range.getUnsignedMax() ^
Range.getUnsignedMin()).countl_zero();
463 Known.
One &= UnsignedMax & Mask;
464 Known.
Zero &= ~UnsignedMax & Mask;
479 while (!WorkSet.
empty()) {
481 if (!Visited.
insert(V).second)
486 return EphValues.count(cast<Instruction>(U));
491 if (V ==
I || (!V->mayHaveSideEffects() && !V->isTerminator())) {
495 for (
const Use &U : U->operands()) {
510 return CI->isAssumeLikeIntrinsic();
518 bool AllowEphemerals) {
536 if (!AllowEphemerals && Inv == CxtI)
591 for (
unsigned ElemIdx = 0, NElem = VC->getNumElements(); ElemIdx < NElem;
594 Pred, VC->getElementAsAPInt(ElemIdx));
603 const PHINode **PhiOut =
nullptr) {
607 CtxIOut =
PHI->getIncomingBlock(*U)->getTerminator();
623 IncPhi && IncPhi->getNumIncomingValues() == 2) {
624 for (
int Idx = 0; Idx < 2; ++Idx) {
625 if (IncPhi->getIncomingValue(Idx) ==
PHI) {
626 ValOut = IncPhi->getIncomingValue(1 - Idx);
629 CtxIOut = IncPhi->getIncomingBlock(1 - Idx)->getTerminator();
648 "Got assumption for the wrong function!");
651 if (!V->getType()->isPointerTy())
654 *
I,
I->bundle_op_info_begin()[Elem.Index])) {
656 (RK.AttrKind == Attribute::NonNull ||
657 (RK.AttrKind == Attribute::Dereferenceable &&
686 if (
RHS->getType()->isPointerTy()) {
728 Known.
Zero |= ~*
C & *Mask;
734 Known.
One |= *
C & ~*Mask;
793 Invert ? Cmp->getInversePredicate() : Cmp->getPredicate();
799 KnownBits DstKnown(
LHS->getType()->getScalarSizeInBits());
813 bool Invert,
unsigned Depth) {
895 "Got assumption for the wrong function!");
898 if (!V->getType()->isPointerTy())
901 *
I,
I->bundle_op_info_begin()[Elem.Index])) {
905 if (RK.WasOn == V && RK.AttrKind == Attribute::Alignment &&
917 Value *Arg =
I->getArgOperand(0);
933 if (Trunc && Trunc->getOperand(0) == V &&
935 if (Trunc->hasNoUnsignedWrap()) {
983 Known = KF(Known2, Known, ShAmtNonZero);
994 Value *
X =
nullptr, *
Y =
nullptr;
996 switch (
I->getOpcode()) {
997 case Instruction::And:
998 KnownOut = KnownLHS & KnownRHS;
1008 KnownOut = KnownLHS.
blsi();
1010 KnownOut = KnownRHS.
blsi();
1013 case Instruction::Or:
1014 KnownOut = KnownLHS | KnownRHS;
1016 case Instruction::Xor:
1017 KnownOut = KnownLHS ^ KnownRHS;
1027 const KnownBits &XBits =
I->getOperand(0) ==
X ? KnownLHS : KnownRHS;
1028 KnownOut = XBits.
blsmsk();
1041 if (!KnownOut.
Zero[0] && !KnownOut.
One[0] &&
1062 APInt DemandedEltsLHS, DemandedEltsRHS;
1064 DemandedElts, DemandedEltsLHS,
1067 const auto ComputeForSingleOpFunc =
1069 return KnownBitsFunc(
1074 if (DemandedEltsRHS.
isZero())
1075 return ComputeForSingleOpFunc(
I->getOperand(0), DemandedEltsLHS);
1076 if (DemandedEltsLHS.
isZero())
1077 return ComputeForSingleOpFunc(
I->getOperand(1), DemandedEltsRHS);
1079 return ComputeForSingleOpFunc(
I->getOperand(0), DemandedEltsLHS)
1080 .intersectWith(ComputeForSingleOpFunc(
I->getOperand(1), DemandedEltsRHS));
1090 APInt DemandedElts =
1098 Attribute Attr =
F->getFnAttribute(Attribute::VScaleRange);
1106 return ConstantRange::getEmpty(
BitWidth);
1117 Value *Arm,
bool Invert,
1156 "Input should be a Select!");
1166 const Value *LHS2 =
nullptr, *RHS2 =
nullptr;
1178 return CLow->
sle(*CHigh);
1183 const APInt *&CHigh) {
1184 assert((
II->getIntrinsicID() == Intrinsic::smin ||
1185 II->getIntrinsicID() == Intrinsic::smax) &&
1186 "Must be smin/smax");
1190 if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
1195 if (
II->getIntrinsicID() == Intrinsic::smin)
1197 return CLow->
sle(*CHigh);
1202 const APInt *CLow, *CHigh;
1209 const APInt &DemandedElts,
1216 switch (
I->getOpcode()) {
1218 case Instruction::Load:
1223 case Instruction::And:
1229 case Instruction::Or:
1235 case Instruction::Xor:
1241 case Instruction::Mul: {
1245 DemandedElts, Known, Known2, Q,
Depth);
1248 case Instruction::UDiv: {
1255 case Instruction::SDiv: {
1262 case Instruction::Select: {
1263 auto ComputeForArm = [&](
Value *Arm,
bool Invert) {
1271 ComputeForArm(
I->getOperand(1),
false)
1275 case Instruction::FPTrunc:
1276 case Instruction::FPExt:
1277 case Instruction::FPToUI:
1278 case Instruction::FPToSI:
1279 case Instruction::SIToFP:
1280 case Instruction::UIToFP:
1282 case Instruction::PtrToInt:
1283 case Instruction::IntToPtr:
1286 case Instruction::ZExt:
1287 case Instruction::Trunc: {
1288 Type *SrcTy =
I->getOperand(0)->getType();
1290 unsigned SrcBitWidth;
1298 assert(SrcBitWidth &&
"SrcBitWidth can't be zero");
1302 Inst && Inst->hasNonNeg() && !Known.
isNegative())
1307 case Instruction::BitCast: {
1308 Type *SrcTy =
I->getOperand(0)->getType();
1309 if (SrcTy->isIntOrPtrTy() &&
1312 !
I->getType()->isVectorTy()) {
1320 V->getType()->isFPOrFPVectorTy()) {
1321 Type *FPType = V->getType()->getScalarType();
1334 if (FPClasses &
fcInf)
1346 if (Result.SignBit) {
1347 if (*Result.SignBit)
1358 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1359 !
I->getType()->isIntOrIntVectorTy() ||
1367 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1383 unsigned SubScale =
BitWidth / SubBitWidth;
1385 for (
unsigned i = 0; i != NumElts; ++i) {
1386 if (DemandedElts[i])
1387 SubDemandedElts.
setBit(i * SubScale);
1391 for (
unsigned i = 0; i != SubScale; ++i) {
1394 unsigned ShiftElt = IsLE ? i : SubScale - 1 - i;
1395 Known.
insertBits(KnownSrc, ShiftElt * SubBitWidth);
1401 unsigned SubScale = SubBitWidth /
BitWidth;
1403 APInt SubDemandedElts =
1410 for (
unsigned i = 0; i != NumElts; ++i) {
1411 if (DemandedElts[i]) {
1412 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
1422 case Instruction::SExt: {
1424 unsigned SrcBitWidth =
I->getOperand(0)->getType()->getScalarSizeInBits();
1426 Known = Known.
trunc(SrcBitWidth);
1433 case Instruction::Shl: {
1437 bool ShAmtNonZero) {
1438 return KnownBits::shl(KnownVal, KnownAmt, NUW, NSW, ShAmtNonZero);
1448 case Instruction::LShr: {
1451 bool ShAmtNonZero) {
1462 case Instruction::AShr: {
1465 bool ShAmtNonZero) {
1472 case Instruction::Sub: {
1476 DemandedElts, Known, Known2, Q,
Depth);
1479 case Instruction::Add: {
1483 DemandedElts, Known, Known2, Q,
Depth);
1486 case Instruction::SRem:
1492 case Instruction::URem:
1497 case Instruction::Alloca:
1500 case Instruction::GetElementPtr: {
1507 APInt AccConstIndices(IndexWidth, 0);
1509 auto AddIndexToKnown = [&](
KnownBits IndexBits) {
1518 "Index width can't be larger than pointer width");
1524 for (
unsigned i = 1, e =
I->getNumOperands(); i != e; ++i, ++GTI) {
1529 Value *Index =
I->getOperand(i);
1540 "Access to structure field must be known at compile time");
1548 AccConstIndices +=
Offset;
1565 CI->getValue().
sextOrTrunc(IndexWidth) * StrideInBytes;
1589 case Instruction::PHI: {
1592 Value *R =
nullptr, *L =
nullptr;
1605 case Instruction::LShr:
1606 case Instruction::AShr:
1607 case Instruction::Shl:
1608 case Instruction::UDiv:
1615 case Instruction::URem: {
1628 case Instruction::Shl:
1632 case Instruction::LShr:
1633 case Instruction::UDiv:
1634 case Instruction::URem:
1639 case Instruction::AShr:
1651 case Instruction::Add:
1652 case Instruction::Sub:
1653 case Instruction::And:
1654 case Instruction::Or:
1655 case Instruction::Mul: {
1662 unsigned OpNum =
P->getOperand(0) == R ? 0 : 1;
1663 Instruction *RInst =
P->getIncomingBlock(OpNum)->getTerminator();
1664 Instruction *LInst =
P->getIncomingBlock(1 - OpNum)->getTerminator();
1693 case Instruction::Add: {
1703 case Instruction::Sub: {
1714 case Instruction::Mul:
1731 if (
P->getNumIncomingValues() == 0)
1743 for (
const Use &U :
P->operands()) {
1778 if ((TrueSucc == CxtPhi->
getParent()) !=
1795 Known2 = KnownUnion;
1809 case Instruction::Call:
1810 case Instruction::Invoke: {
1820 if (std::optional<ConstantRange>
Range = CB->getRange())
1823 if (
const Value *RV = CB->getReturnedArgOperand()) {
1824 if (RV->getType() ==
I->getType()) {
1836 switch (
II->getIntrinsicID()) {
1839 case Intrinsic::abs: {
1841 bool IntMinIsPoison =
match(
II->getArgOperand(1),
m_One());
1845 case Intrinsic::bitreverse:
1849 case Intrinsic::bswap:
1853 case Intrinsic::ctlz: {
1859 PossibleLZ = std::min(PossibleLZ,
BitWidth - 1);
1864 case Intrinsic::cttz: {
1870 PossibleTZ = std::min(PossibleTZ,
BitWidth - 1);
1875 case Intrinsic::ctpop: {
1886 case Intrinsic::fshr:
1887 case Intrinsic::fshl: {
1894 if (
II->getIntrinsicID() == Intrinsic::fshr)
1901 Known2 <<= ShiftAmt;
1906 case Intrinsic::uadd_sat:
1911 case Intrinsic::usub_sat:
1916 case Intrinsic::sadd_sat:
1921 case Intrinsic::ssub_sat:
1927 case Intrinsic::vector_reverse:
1933 case Intrinsic::vector_reduce_and:
1934 case Intrinsic::vector_reduce_or:
1935 case Intrinsic::vector_reduce_umax:
1936 case Intrinsic::vector_reduce_umin:
1937 case Intrinsic::vector_reduce_smax:
1938 case Intrinsic::vector_reduce_smin:
1941 case Intrinsic::vector_reduce_xor: {
1948 bool EvenCnt = VecTy->getElementCount().isKnownEven();
1952 if (VecTy->isScalableTy() || EvenCnt)
1956 case Intrinsic::umin:
1961 case Intrinsic::umax:
1966 case Intrinsic::smin:
1972 case Intrinsic::smax:
1978 case Intrinsic::ptrmask: {
1981 const Value *Mask =
I->getOperand(1);
1982 Known2 =
KnownBits(Mask->getType()->getScalarSizeInBits());
1988 case Intrinsic::x86_sse2_pmulh_w:
1989 case Intrinsic::x86_avx2_pmulh_w:
1990 case Intrinsic::x86_avx512_pmulh_w_512:
1995 case Intrinsic::x86_sse2_pmulhu_w:
1996 case Intrinsic::x86_avx2_pmulhu_w:
1997 case Intrinsic::x86_avx512_pmulhu_w_512:
2002 case Intrinsic::x86_sse42_crc32_64_64:
2005 case Intrinsic::x86_ssse3_phadd_d_128:
2006 case Intrinsic::x86_ssse3_phadd_w_128:
2007 case Intrinsic::x86_avx2_phadd_d:
2008 case Intrinsic::x86_avx2_phadd_w: {
2010 I, DemandedElts, Q,
Depth,
2016 case Intrinsic::x86_ssse3_phadd_sw_128:
2017 case Intrinsic::x86_avx2_phadd_sw: {
2022 case Intrinsic::x86_ssse3_phsub_d_128:
2023 case Intrinsic::x86_ssse3_phsub_w_128:
2024 case Intrinsic::x86_avx2_phsub_d:
2025 case Intrinsic::x86_avx2_phsub_w: {
2027 I, DemandedElts, Q,
Depth,
2033 case Intrinsic::x86_ssse3_phsub_sw_128:
2034 case Intrinsic::x86_avx2_phsub_sw: {
2039 case Intrinsic::riscv_vsetvli:
2040 case Intrinsic::riscv_vsetvlimax: {
2041 bool HasAVL =
II->getIntrinsicID() == Intrinsic::riscv_vsetvli;
2054 MaxVL = std::min(MaxVL, CI->getZExtValue());
2056 unsigned KnownZeroFirstBit =
Log2_32(MaxVL) + 1;
2061 case Intrinsic::vscale: {
2062 if (!
II->getParent() || !
II->getFunction())
2072 case Instruction::ShuffleVector: {
2081 APInt DemandedLHS, DemandedRHS;
2088 if (!!DemandedLHS) {
2089 const Value *
LHS = Shuf->getOperand(0);
2095 if (!!DemandedRHS) {
2096 const Value *
RHS = Shuf->getOperand(1);
2102 case Instruction::InsertElement: {
2107 const Value *Vec =
I->getOperand(0);
2108 const Value *Elt =
I->getOperand(1);
2111 APInt DemandedVecElts = DemandedElts;
2112 bool NeedsElt =
true;
2114 if (CIdx && CIdx->getValue().ult(NumElts)) {
2115 DemandedVecElts.
clearBit(CIdx->getZExtValue());
2116 NeedsElt = DemandedElts[CIdx->getZExtValue()];
2128 if (!DemandedVecElts.
isZero()) {
2134 case Instruction::ExtractElement: {
2137 const Value *Vec =
I->getOperand(0);
2138 const Value *Idx =
I->getOperand(1);
2147 if (CIdx && CIdx->getValue().ult(NumElts))
2152 case Instruction::ExtractValue:
2157 switch (
II->getIntrinsicID()) {
2159 case Intrinsic::uadd_with_overflow:
2160 case Intrinsic::sadd_with_overflow:
2162 true,
II->getArgOperand(0),
II->getArgOperand(1),
false,
2163 false, DemandedElts, Known, Known2, Q,
Depth);
2165 case Intrinsic::usub_with_overflow:
2166 case Intrinsic::ssub_with_overflow:
2168 false,
II->getArgOperand(0),
II->getArgOperand(1),
false,
2169 false, DemandedElts, Known, Known2, Q,
Depth);
2171 case Intrinsic::umul_with_overflow:
2172 case Intrinsic::smul_with_overflow:
2174 false, DemandedElts, Known, Known2, Q,
Depth);
2180 case Instruction::Freeze:
2224 if (!DemandedElts) {
2230 assert(V &&
"No Value?");
2234 Type *Ty = V->getType();
2237 assert((Ty->isIntOrIntVectorTy(
BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
2238 "Not integer or pointer type!");
2242 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
2243 "DemandedElt width should equal the fixed vector number of elements");
2246 "DemandedElt width should be 1 for scalars or scalable vectors");
2252 "V and Known should have same BitWidth");
2255 "V and Known should have same BitWidth");
2277 for (
unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
2278 if (!DemandedElts[i])
2280 APInt Elt = CDV->getElementAsAPInt(i);
2294 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
2295 if (!DemandedElts[i])
2305 const APInt &Elt = ElementCI->getValue();
2326 if (std::optional<ConstantRange>
Range =
A->getRange())
2327 Known =
Range->toKnownBits();
2336 if (!GA->isInterposable())
2344 if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange())
2345 Known = CR->toKnownBits();
2350 Align Alignment = V->getPointerAlignment(Q.
DL);
2366 Value *Start =
nullptr, *Step =
nullptr;
2372 if (U.get() == Start) {
2388 case Instruction::Mul:
2393 case Instruction::SDiv:
2399 case Instruction::UDiv:
2405 case Instruction::Shl:
2407 case Instruction::AShr:
2411 case Instruction::LShr:
2449 if (OrZero && V->getType()->getScalarSizeInBits() == 1)
2491 return F->hasFnAttribute(Attribute::VScaleRange);
2508 switch (
I->getOpcode()) {
2509 case Instruction::ZExt:
2511 case Instruction::Trunc:
2513 case Instruction::Shl:
2517 case Instruction::LShr:
2521 case Instruction::UDiv:
2525 case Instruction::Mul:
2529 case Instruction::And:
2540 case Instruction::Add: {
2546 if (
match(
I->getOperand(0),
2550 if (
match(
I->getOperand(1),
2555 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2564 if ((~(LHSBits.
Zero & RHSBits.
Zero)).isPowerOf2())
2577 case Instruction::Select:
2580 case Instruction::PHI: {
2601 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2602 return isKnownToBeAPowerOfTwo(U.get(), OrZero, RecQ, NewDepth);
2605 case Instruction::Invoke:
2606 case Instruction::Call: {
2608 switch (
II->getIntrinsicID()) {
2609 case Intrinsic::umax:
2610 case Intrinsic::smax:
2611 case Intrinsic::umin:
2612 case Intrinsic::smin:
2617 case Intrinsic::bitreverse:
2618 case Intrinsic::bswap:
2620 case Intrinsic::fshr:
2621 case Intrinsic::fshl:
2623 if (
II->getArgOperand(0) ==
II->getArgOperand(1))
2647 F =
I->getFunction();
2651 if (!
GEP->hasNoUnsignedWrap() &&
2652 !(
GEP->isInBounds() &&
2657 assert(
GEP->getType()->isPointerTy() &&
"We only support plain pointer GEP");
2668 GTI != GTE; ++GTI) {
2670 if (
StructType *STy = GTI.getStructTypeOrNull()) {
2675 if (ElementOffset > 0)
2681 if (GTI.getSequentialElementStride(Q.
DL).isZero())
2715 unsigned NumUsesExplored = 0;
2716 for (
auto &U : V->uses()) {
2725 if (V->getType()->isPointerTy()) {
2727 if (CB->isArgOperand(&U) &&
2728 CB->paramHasNonNullAttr(CB->getArgOperandNo(&U),
2756 NonNullIfTrue =
true;
2758 NonNullIfTrue =
false;
2764 for (
const auto *CmpU : UI->
users()) {
2766 if (Visited.
insert(CmpU).second)
2769 while (!WorkList.
empty()) {
2778 for (
const auto *CurrU : Curr->users())
2779 if (Visited.
insert(CurrU).second)
2785 assert(BI->isConditional() &&
"uses a comparison!");
2788 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2792 }
else if (NonNullIfTrue &&
isGuard(Curr) &&
2807 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2809 for (
unsigned i = 0; i < NumRanges; ++i) {
2825 Value *Start =
nullptr, *Step =
nullptr;
2826 const APInt *StartC, *StepC;
2832 case Instruction::Add:
2838 case Instruction::Mul:
2841 case Instruction::Shl:
2843 case Instruction::AShr:
2844 case Instruction::LShr:
2860 bool NUW,
unsigned Depth) {
2917 return ::isKnownNonEqual(
X,
Y, DemandedElts, Q,
Depth);
2922 bool NUW,
unsigned Depth) {
2951 auto ShiftOp = [&](
const APInt &Lhs,
const APInt &Rhs) {
2952 switch (
I->getOpcode()) {
2953 case Instruction::Shl:
2954 return Lhs.
shl(Rhs);
2955 case Instruction::LShr:
2956 return Lhs.
lshr(Rhs);
2957 case Instruction::AShr:
2958 return Lhs.
ashr(Rhs);
2964 auto InvShiftOp = [&](
const APInt &Lhs,
const APInt &Rhs) {
2965 switch (
I->getOpcode()) {
2966 case Instruction::Shl:
2967 return Lhs.
lshr(Rhs);
2968 case Instruction::LShr:
2969 case Instruction::AShr:
2970 return Lhs.
shl(Rhs);
2983 if (MaxShift.
uge(NumBits))
2986 if (!ShiftOp(KnownVal.
One, MaxShift).isZero())
2991 if (InvShiftOp(KnownVal.
Zero, NumBits - MaxShift)
3000 const APInt &DemandedElts,
3003 switch (
I->getOpcode()) {
3004 case Instruction::Alloca:
3006 return I->getType()->getPointerAddressSpace() == 0;
3007 case Instruction::GetElementPtr:
3008 if (
I->getType()->isPointerTy())
3011 case Instruction::BitCast: {
3039 Type *FromTy =
I->getOperand(0)->getType();
3044 case Instruction::IntToPtr:
3053 case Instruction::PtrToInt:
3061 case Instruction::Trunc:
3064 if (TI->hasNoSignedWrap() || TI->hasNoUnsignedWrap())
3070 case Instruction::Xor:
3071 case Instruction::Sub:
3073 I->getOperand(1),
Depth);
3074 case Instruction::Or:
3085 case Instruction::SExt:
3086 case Instruction::ZExt:
3090 case Instruction::Shl: {
3105 case Instruction::LShr:
3106 case Instruction::AShr: {
3121 case Instruction::UDiv:
3122 case Instruction::SDiv: {
3137 if (
I->getOpcode() == Instruction::SDiv) {
3139 XKnown = XKnown.
abs(
false);
3140 YKnown = YKnown.
abs(
false);
3146 return XUgeY && *XUgeY;
3148 case Instruction::Add: {
3158 case Instruction::Mul: {
3164 case Instruction::Select: {
3171 auto SelectArmIsNonZero = [&](
bool IsTrueArm) {
3173 Op = IsTrueArm ?
I->getOperand(1) :
I->getOperand(2);
3191 if (SelectArmIsNonZero(
true) &&
3192 SelectArmIsNonZero(
false))
3196 case Instruction::PHI: {
3207 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
3211 BasicBlock *TrueSucc, *FalseSucc;
3212 if (match(RecQ.CxtI,
3213 m_Br(m_c_ICmp(Pred, m_Specific(U.get()), m_Value(X)),
3214 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
3216 if ((TrueSucc == PN->getParent()) != (FalseSucc == PN->getParent())) {
3218 if (FalseSucc == PN->getParent())
3219 Pred = CmpInst::getInversePredicate(Pred);
3220 if (cmpExcludesZero(Pred, X))
3228 case Instruction::InsertElement: {
3232 const Value *Vec =
I->getOperand(0);
3233 const Value *Elt =
I->getOperand(1);
3237 APInt DemandedVecElts = DemandedElts;
3238 bool SkipElt =
false;
3240 if (CIdx && CIdx->getValue().ult(NumElts)) {
3241 DemandedVecElts.
clearBit(CIdx->getZExtValue());
3242 SkipElt = !DemandedElts[CIdx->getZExtValue()];
3248 (DemandedVecElts.
isZero() ||
3251 case Instruction::ExtractElement:
3253 const Value *Vec = EEI->getVectorOperand();
3254 const Value *Idx = EEI->getIndexOperand();
3257 unsigned NumElts = VecTy->getNumElements();
3259 if (CIdx && CIdx->getValue().ult(NumElts))
3265 case Instruction::ShuffleVector: {
3269 APInt DemandedLHS, DemandedRHS;
3275 return (DemandedRHS.
isZero() ||
3280 case Instruction::Freeze:
3284 case Instruction::Load: {
3301 case Instruction::ExtractValue: {
3307 case Instruction::Add:
3312 case Instruction::Sub:
3315 case Instruction::Mul:
3318 false,
false,
Depth);
3324 case Instruction::Call:
3325 case Instruction::Invoke: {
3327 if (
I->getType()->isPointerTy()) {
3328 if (
Call->isReturnNonNull())
3335 if (std::optional<ConstantRange>
Range =
Call->getRange()) {
3336 const APInt ZeroValue(
Range->getBitWidth(), 0);
3337 if (!
Range->contains(ZeroValue))
3340 if (
const Value *RV =
Call->getReturnedArgOperand())
3346 switch (
II->getIntrinsicID()) {
3347 case Intrinsic::sshl_sat:
3348 case Intrinsic::ushl_sat:
3349 case Intrinsic::abs:
3350 case Intrinsic::bitreverse:
3351 case Intrinsic::bswap:
3352 case Intrinsic::ctpop:
3356 case Intrinsic::ssub_sat:
3359 case Intrinsic::sadd_sat:
3361 II->getArgOperand(1),
3362 true,
false,
Depth);
3364 case Intrinsic::vector_reverse:
3368 case Intrinsic::vector_reduce_or:
3369 case Intrinsic::vector_reduce_umax:
3370 case Intrinsic::vector_reduce_umin:
3371 case Intrinsic::vector_reduce_smax:
3372 case Intrinsic::vector_reduce_smin:
3374 case Intrinsic::umax:
3375 case Intrinsic::uadd_sat:
3383 case Intrinsic::smax: {
3386 auto IsNonZero = [&](
Value *
Op, std::optional<bool> &OpNonZero,
3388 if (!OpNonZero.has_value())
3389 OpNonZero = OpKnown.isNonZero() ||
3394 std::optional<bool> Op0NonZero, Op1NonZero;
3398 IsNonZero(
II->getArgOperand(1), Op1NonZero, Op1Known))
3403 IsNonZero(
II->getArgOperand(0), Op0NonZero, Op0Known))
3405 return IsNonZero(
II->getArgOperand(1), Op1NonZero, Op1Known) &&
3406 IsNonZero(
II->getArgOperand(0), Op0NonZero, Op0Known);
3408 case Intrinsic::smin: {
3424 case Intrinsic::umin:
3427 case Intrinsic::cttz:
3430 case Intrinsic::ctlz:
3433 case Intrinsic::fshr:
3434 case Intrinsic::fshl:
3436 if (
II->getArgOperand(0) ==
II->getArgOperand(1))
3439 case Intrinsic::vscale:
3441 case Intrinsic::experimental_get_vector_length:
3455 return Known.
One != 0;
3466 Type *Ty = V->getType();
3473 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
3474 "DemandedElt width should equal the fixed vector number of elements");
3477 "DemandedElt width should be 1 for scalars");
3482 if (
C->isNullValue())
3491 for (
unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
3492 if (!DemandedElts[i])
3494 Constant *Elt =
C->getAggregateElement(i);
3511 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
3512 GV->getType()->getAddressSpace() == 0)
3522 if (std::optional<ConstantRange>
Range =
A->getRange()) {
3523 const APInt ZeroValue(
Range->getBitWidth(), 0);
3524 if (!
Range->contains(ZeroValue))
3541 if (((
A->hasPassPointeeByValueCopyAttr() &&
3543 A->hasNonNullAttr()))
3565 APInt DemandedElts =
3567 return ::isKnownNonZero(V, DemandedElts, Q,
Depth);
3576static std::optional<std::pair<Value*, Value*>>
3580 return std::nullopt;
3589 case Instruction::Or:
3594 case Instruction::Xor:
3595 case Instruction::Add: {
3603 case Instruction::Sub:
3609 case Instruction::Mul: {
3615 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3616 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3626 case Instruction::Shl: {
3631 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3632 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3639 case Instruction::AShr:
3640 case Instruction::LShr: {
3643 if (!PEO1->isExact() || !PEO2->isExact())
3650 case Instruction::SExt:
3651 case Instruction::ZExt:
3655 case Instruction::PHI: {
3663 Value *Start1 =
nullptr, *Step1 =
nullptr;
3665 Value *Start2 =
nullptr, *Step2 =
nullptr;
3681 if (Values->first != PN1 || Values->second != PN2)
3684 return std::make_pair(Start1, Start2);
3687 return std::nullopt;
3694 const APInt &DemandedElts,
3702 case Instruction::Or:
3706 case Instruction::Xor:
3707 case Instruction::Add:
3728 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3729 !
C->isZero() && !
C->isOne() &&
3743 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3757 bool UsedFullRecursion =
false;
3759 if (!VisitedBBs.
insert(IncomBB).second)
3763 const APInt *C1, *C2;
3768 if (UsedFullRecursion)
3772 RecQ.
CxtI = IncomBB->getTerminator();
3775 UsedFullRecursion =
true;
3789 const Value *Cond2 = SI2->getCondition();
3792 DemandedElts, Q,
Depth + 1) &&
3794 DemandedElts, Q,
Depth + 1);
3807 if (!
A->getType()->isPointerTy() || !
B->getType()->isPointerTy())
3811 if (!GEPA || GEPA->getNumIndices() != 1 || !
isa<Constant>(GEPA->idx_begin()))
3816 if (!PN || PN->getNumIncomingValues() != 2)
3821 Value *Start =
nullptr;
3823 if (PN->getIncomingValue(0) == Step)
3824 Start = PN->getIncomingValue(1);
3825 else if (PN->getIncomingValue(1) == Step)
3826 Start = PN->getIncomingValue(0);
3837 APInt StartOffset(IndexWidth, 0);
3838 Start = Start->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, StartOffset);
3839 APInt StepOffset(IndexWidth, 0);
3845 APInt OffsetB(IndexWidth, 0);
3846 B =
B->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, OffsetB);
3847 return Start ==
B &&
3859 auto IsKnownNonEqualFromDominatingCondition = [&](
const Value *V) {
3880 if (IsKnownNonEqualFromDominatingCondition(V1) ||
3881 IsKnownNonEqualFromDominatingCondition(V2))
3895 "Got assumption for the wrong function!");
3896 assert(
I->getIntrinsicID() == Intrinsic::assume &&
3897 "must be an assume intrinsic");
3927 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
3929 return isKnownNonEqual(Values->first, Values->second, DemandedElts, Q,
3991 const APInt &DemandedElts,
3997 unsigned MinSignBits = TyBits;
3999 for (
unsigned i = 0; i != NumElts; ++i) {
4000 if (!DemandedElts[i])
4007 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
4014 const APInt &DemandedElts,
4020 assert(Result > 0 &&
"At least one sign bit needs to be present!");
4032 const APInt &DemandedElts,
4034 Type *Ty = V->getType();
4040 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
4041 "DemandedElt width should equal the fixed vector number of elements");
4044 "DemandedElt width should be 1 for scalars");
4058 unsigned FirstAnswer = 1;
4069 case Instruction::BitCast: {
4070 Value *Src = U->getOperand(0);
4071 Type *SrcTy = Src->getType();
4075 if (!SrcTy->isIntOrIntVectorTy())
4081 if ((SrcBits % TyBits) != 0)
4094 case Instruction::SExt:
4095 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
4099 case Instruction::SDiv: {
4100 const APInt *Denominator;
4113 return std::min(TyBits, NumBits + Denominator->
logBase2());
4118 case Instruction::SRem: {
4121 const APInt *Denominator;
4142 unsigned ResBits = TyBits - Denominator->
ceilLogBase2();
4143 Tmp = std::max(Tmp, ResBits);
4149 case Instruction::AShr: {
4154 if (ShAmt->
uge(TyBits))
4157 Tmp += ShAmtLimited;
4158 if (Tmp > TyBits) Tmp = TyBits;
4162 case Instruction::Shl: {
4167 if (ShAmt->
uge(TyBits))
4172 ShAmt->
uge(TyBits -
X->getType()->getScalarSizeInBits())) {
4174 Tmp += TyBits -
X->getType()->getScalarSizeInBits();
4178 if (ShAmt->
uge(Tmp))
4185 case Instruction::And:
4186 case Instruction::Or:
4187 case Instruction::Xor:
4192 FirstAnswer = std::min(Tmp, Tmp2);
4199 case Instruction::Select: {
4203 const APInt *CLow, *CHigh;
4211 return std::min(Tmp, Tmp2);
4214 case Instruction::Add:
4218 if (Tmp == 1)
break;
4222 if (CRHS->isAllOnesValue()) {
4228 if ((Known.
Zero | 1).isAllOnes())
4240 return std::min(Tmp, Tmp2) - 1;
4242 case Instruction::Sub:
4249 if (CLHS->isNullValue()) {
4254 if ((Known.
Zero | 1).isAllOnes())
4271 return std::min(Tmp, Tmp2) - 1;
4273 case Instruction::Mul: {
4276 unsigned SignBitsOp0 =
4278 if (SignBitsOp0 == 1)
4280 unsigned SignBitsOp1 =
4282 if (SignBitsOp1 == 1)
4284 unsigned OutValidBits =
4285 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
4286 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
4289 case Instruction::PHI: {
4293 if (NumIncomingValues > 4)
break;
4295 if (NumIncomingValues == 0)
break;
4301 for (
unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
4302 if (Tmp == 1)
return Tmp;
4305 DemandedElts, RecQ,
Depth + 1));
4310 case Instruction::Trunc: {
4315 unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits();
4316 if (Tmp > (OperandTyBits - TyBits))
4317 return Tmp - (OperandTyBits - TyBits);
4322 case Instruction::ExtractElement:
4329 case Instruction::ShuffleVector: {
4337 APInt DemandedLHS, DemandedRHS;
4342 Tmp = std::numeric_limits<unsigned>::max();
4343 if (!!DemandedLHS) {
4344 const Value *
LHS = Shuf->getOperand(0);
4351 if (!!DemandedRHS) {
4352 const Value *
RHS = Shuf->getOperand(1);
4354 Tmp = std::min(Tmp, Tmp2);
4360 assert(Tmp <= TyBits &&
"Failed to determine minimum sign bits");
4363 case Instruction::Call: {
4365 switch (
II->getIntrinsicID()) {
4368 case Intrinsic::abs:
4376 case Intrinsic::smin:
4377 case Intrinsic::smax: {
4378 const APInt *CLow, *CHigh;
4393 if (
unsigned VecSignBits =
4411 if (
F->isIntrinsic())
4412 return F->getIntrinsicID();
4418 if (
F->hasLocalLinkage() || !TLI || !TLI->
getLibFunc(CB, Func) ||
4428 return Intrinsic::sin;
4432 return Intrinsic::cos;
4436 return Intrinsic::tan;
4440 return Intrinsic::asin;
4444 return Intrinsic::acos;
4448 return Intrinsic::atan;
4450 case LibFunc_atan2f:
4451 case LibFunc_atan2l:
4452 return Intrinsic::atan2;
4456 return Intrinsic::sinh;
4460 return Intrinsic::cosh;
4464 return Intrinsic::tanh;
4468 return Intrinsic::exp;
4472 return Intrinsic::exp2;
4474 case LibFunc_exp10f:
4475 case LibFunc_exp10l:
4476 return Intrinsic::exp10;
4480 return Intrinsic::log;
4482 case LibFunc_log10f:
4483 case LibFunc_log10l:
4484 return Intrinsic::log10;
4488 return Intrinsic::log2;
4492 return Intrinsic::fabs;
4496 return Intrinsic::minnum;
4500 return Intrinsic::maxnum;
4501 case LibFunc_copysign:
4502 case LibFunc_copysignf:
4503 case LibFunc_copysignl:
4504 return Intrinsic::copysign;
4506 case LibFunc_floorf:
4507 case LibFunc_floorl:
4508 return Intrinsic::floor;
4512 return Intrinsic::ceil;
4514 case LibFunc_truncf:
4515 case LibFunc_truncl:
4516 return Intrinsic::trunc;
4520 return Intrinsic::rint;
4521 case LibFunc_nearbyint:
4522 case LibFunc_nearbyintf:
4523 case LibFunc_nearbyintl:
4524 return Intrinsic::nearbyint;
4526 case LibFunc_roundf:
4527 case LibFunc_roundl:
4528 return Intrinsic::round;
4529 case LibFunc_roundeven:
4530 case LibFunc_roundevenf:
4531 case LibFunc_roundevenl:
4532 return Intrinsic::roundeven;
4536 return Intrinsic::pow;
4540 return Intrinsic::sqrt;
4547 Ty = Ty->getScalarType();
4556 bool &TrueIfSigned) {
4559 TrueIfSigned =
true;
4560 return RHS.isZero();
4562 TrueIfSigned =
true;
4563 return RHS.isAllOnes();
4565 TrueIfSigned =
false;
4566 return RHS.isAllOnes();
4568 TrueIfSigned =
false;
4569 return RHS.isZero();
4572 TrueIfSigned =
true;
4573 return RHS.isMaxSignedValue();
4576 TrueIfSigned =
true;
4577 return RHS.isMinSignedValue();
4580 TrueIfSigned =
false;
4581 return RHS.isMinSignedValue();
4584 TrueIfSigned =
false;
4585 return RHS.isMaxSignedValue();
4595 unsigned Depth = 0) {
4620 KnownFromContext.
knownNot(~(CondIsTrue ? MaskIfTrue : MaskIfFalse));
4624 KnownFromContext.
knownNot(CondIsTrue ? ~Mask : Mask);
4630 if (TrueIfSigned == CondIsTrue)
4646 return KnownFromContext;
4666 return KnownFromContext;
4676 "Got assumption for the wrong function!");
4677 assert(
I->getIntrinsicID() == Intrinsic::assume &&
4678 "must be an assume intrinsic");
4684 true, Q.
CxtI, KnownFromContext);
4687 return KnownFromContext;
4698 APInt DemandedElts =
4704 const APInt &DemandedElts,
4709 if ((InterestedClasses &
4715 KnownSrc, Q,
Depth + 1);
4730 assert(Known.
isUnknown() &&
"should not be called with known information");
4732 if (!DemandedElts) {
4742 Known.
SignBit = CFP->isNegative();
4763 bool SignBitAllZero =
true;
4764 bool SignBitAllOne =
true;
4767 unsigned NumElts = VFVTy->getNumElements();
4768 for (
unsigned i = 0; i != NumElts; ++i) {
4769 if (!DemandedElts[i])
4785 const APFloat &
C = CElt->getValueAPF();
4788 SignBitAllZero =
false;
4790 SignBitAllOne =
false;
4792 if (SignBitAllOne != SignBitAllZero)
4793 Known.
SignBit = SignBitAllOne;
4799 KnownNotFromFlags |= CB->getRetNoFPClass();
4801 KnownNotFromFlags |= Arg->getNoFPClass();
4805 if (FPOp->hasNoNaNs())
4806 KnownNotFromFlags |=
fcNan;
4807 if (FPOp->hasNoInfs())
4808 KnownNotFromFlags |=
fcInf;
4812 KnownNotFromFlags |= ~AssumedClasses.KnownFPClasses;
4816 InterestedClasses &= ~KnownNotFromFlags;
4835 const unsigned Opc =
Op->getOpcode();
4837 case Instruction::FNeg: {
4839 Known, Q,
Depth + 1);
4843 case Instruction::Select: {
4851 Value *TestedValue =
nullptr;
4857 Value *CmpLHS, *CmpRHS;
4864 bool LookThroughFAbsFNeg = CmpLHS !=
LHS && CmpLHS !=
RHS;
4865 std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
4871 MaskIfTrue = TestedMask;
4872 MaskIfFalse = ~TestedMask;
4875 if (TestedValue ==
LHS) {
4877 FilterLHS = MaskIfTrue;
4878 }
else if (TestedValue ==
RHS) {
4880 FilterRHS = MaskIfFalse;
4889 Known2, Q,
Depth + 1);
4895 case Instruction::Call: {
4899 case Intrinsic::fabs: {
4904 InterestedClasses, Known, Q,
Depth + 1);
4910 case Intrinsic::copysign: {
4914 Known, Q,
Depth + 1);
4916 KnownSign, Q,
Depth + 1);
4920 case Intrinsic::fma:
4921 case Intrinsic::fmuladd: {
4925 if (
II->getArgOperand(0) !=
II->getArgOperand(1))
4934 KnownAddend, Q,
Depth + 1);
4940 case Intrinsic::sqrt:
4941 case Intrinsic::experimental_constrained_sqrt: {
4944 if (InterestedClasses &
fcNan)
4948 KnownSrc, Q,
Depth + 1);
4966 II->getType()->getScalarType()->getFltSemantics();
4975 case Intrinsic::sin:
4976 case Intrinsic::cos: {
4980 KnownSrc, Q,
Depth + 1);
4986 case Intrinsic::maxnum:
4987 case Intrinsic::minnum:
4988 case Intrinsic::minimum:
4989 case Intrinsic::maximum:
4990 case Intrinsic::minimumnum:
4991 case Intrinsic::maximumnum: {
4994 KnownLHS, Q,
Depth + 1);
4996 KnownRHS, Q,
Depth + 1);
4999 Known = KnownLHS | KnownRHS;
5003 (IID == Intrinsic::minnum || IID == Intrinsic::maxnum ||
5004 IID == Intrinsic::minimumnum || IID == Intrinsic::maximumnum))
5007 if (IID == Intrinsic::maxnum || IID == Intrinsic::maximumnum) {
5015 }
else if (IID == Intrinsic::maximum) {
5021 }
else if (IID == Intrinsic::minnum || IID == Intrinsic::minimumnum) {
5029 }
else if (IID == Intrinsic::minimum) {
5052 II->getType()->getScalarType()->getFltSemantics());
5064 }
else if ((IID == Intrinsic::maximum || IID == Intrinsic::minimum ||
5065 IID == Intrinsic::maximumnum ||
5066 IID == Intrinsic::minimumnum) ||
5074 KnownLHS.
SignBit = std::nullopt;
5076 KnownRHS.
SignBit = std::nullopt;
5077 if ((IID == Intrinsic::maximum || IID == Intrinsic::maximumnum ||
5078 IID == Intrinsic::maxnum) &&
5081 else if ((IID == Intrinsic::minimum || IID == Intrinsic::minimumnum ||
5082 IID == Intrinsic::minnum) &&
5089 case Intrinsic::canonicalize: {
5092 KnownSrc, Q,
Depth + 1);
5116 II->getType()->getScalarType()->getFltSemantics();
5136 case Intrinsic::vector_reduce_fmax:
5137 case Intrinsic::vector_reduce_fmin:
5138 case Intrinsic::vector_reduce_fmaximum:
5139 case Intrinsic::vector_reduce_fminimum: {
5143 InterestedClasses, Q,
Depth + 1);
5150 case Intrinsic::vector_reverse:
5153 II->getFastMathFlags(), InterestedClasses, Q,
Depth + 1);
5155 case Intrinsic::trunc:
5156 case Intrinsic::floor:
5157 case Intrinsic::ceil:
5158 case Intrinsic::rint:
5159 case Intrinsic::nearbyint:
5160 case Intrinsic::round:
5161 case Intrinsic::roundeven: {
5169 KnownSrc, Q,
Depth + 1);
5178 if (IID == Intrinsic::trunc || !V->getType()->isMultiUnitFPType()) {
5193 case Intrinsic::exp:
5194 case Intrinsic::exp2:
5195 case Intrinsic::exp10: {
5202 KnownSrc, Q,
Depth + 1);
5210 case Intrinsic::fptrunc_round: {
5215 case Intrinsic::log:
5216 case Intrinsic::log10:
5217 case Intrinsic::log2:
5218 case Intrinsic::experimental_constrained_log:
5219 case Intrinsic::experimental_constrained_log10:
5220 case Intrinsic::experimental_constrained_log2: {
5236 KnownSrc, Q,
Depth + 1);
5250 II->getType()->getScalarType()->getFltSemantics();
5258 case Intrinsic::powi: {
5262 const Value *Exp =
II->getArgOperand(1);
5263 Type *ExpTy = Exp->getType();
5267 ExponentKnownBits, Q,
Depth + 1);
5269 if (ExponentKnownBits.
Zero[0]) {
5284 KnownSrc, Q,
Depth + 1);
5289 case Intrinsic::ldexp: {
5292 KnownSrc, Q,
Depth + 1);
5308 if ((InterestedClasses & ExpInfoMask) ==
fcNone)
5314 II->getType()->getScalarType()->getFltSemantics();
5316 const Value *ExpArg =
II->getArgOperand(1);
5320 const int MantissaBits = Precision - 1;
5327 II->getType()->getScalarType()->getFltSemantics();
5328 if (ConstVal && ConstVal->
isZero()) {
5353 case Intrinsic::arithmetic_fence: {
5355 Known, Q,
Depth + 1);
5358 case Intrinsic::experimental_constrained_sitofp:
5359 case Intrinsic::experimental_constrained_uitofp:
5369 if (IID == Intrinsic::experimental_constrained_uitofp)
5380 case Instruction::FAdd:
5381 case Instruction::FSub: {
5384 Op->getOpcode() == Instruction::FAdd &&
5386 bool WantNaN = (InterestedClasses &
fcNan) !=
fcNone;
5389 if (!WantNaN && !WantNegative && !WantNegZero)
5395 if (InterestedClasses &
fcNan)
5396 InterestedSrcs |=
fcInf;
5398 KnownRHS, Q,
Depth + 1);
5402 WantNegZero ||
Opc == Instruction::FSub) {
5407 KnownLHS, Q,
Depth + 1);
5417 if (
Op->getOpcode() == Instruction::FAdd) {
5425 Op->getType()->getScalarType()->getFltSemantics();
5439 Op->getType()->getScalarType()->getFltSemantics();
5453 case Instruction::FMul: {
5455 if (
Op->getOperand(0) ==
Op->getOperand(1))
5492 Type *OpTy =
Op->getType()->getScalarType();
5504 case Instruction::FDiv:
5505 case Instruction::FRem: {
5506 if (
Op->getOperand(0) ==
Op->getOperand(1)) {
5508 if (
Op->getOpcode() == Instruction::FDiv) {
5519 const bool WantNan = (InterestedClasses &
fcNan) !=
fcNone;
5521 const bool WantPositive =
5523 if (!WantNan && !WantNegative && !WantPositive)
5532 bool KnowSomethingUseful =
5535 if (KnowSomethingUseful || WantPositive) {
5541 InterestedClasses & InterestedLHS, KnownLHS, Q,
5547 Op->getType()->getScalarType()->getFltSemantics();
5549 if (
Op->getOpcode() == Instruction::FDiv) {
5588 case Instruction::FPExt: {
5591 Known, Q,
Depth + 1);
5594 Op->getType()->getScalarType()->getFltSemantics();
5596 Op->getOperand(0)->getType()->getScalarType()->getFltSemantics();
5612 case Instruction::FPTrunc: {
5617 case Instruction::SIToFP:
5618 case Instruction::UIToFP: {
5627 if (
Op->getOpcode() == Instruction::UIToFP)
5630 if (InterestedClasses &
fcInf) {
5634 int IntSize =
Op->getOperand(0)->getType()->getScalarSizeInBits();
5635 if (
Op->getOpcode() == Instruction::SIToFP)
5640 Type *FPTy =
Op->getType()->getScalarType();
5647 case Instruction::ExtractElement: {
5650 const Value *Vec =
Op->getOperand(0);
5652 APInt DemandedVecElts;
5654 unsigned NumElts = VecTy->getNumElements();
5657 if (CIdx && CIdx->getValue().ult(NumElts))
5660 DemandedVecElts =
APInt(1, 1);
5666 case Instruction::InsertElement: {
5670 const Value *Vec =
Op->getOperand(0);
5671 const Value *Elt =
Op->getOperand(1);
5674 APInt DemandedVecElts = DemandedElts;
5675 bool NeedsElt =
true;
5677 if (CIdx && CIdx->getValue().ult(NumElts)) {
5678 DemandedVecElts.
clearBit(CIdx->getZExtValue());
5679 NeedsElt = DemandedElts[CIdx->getZExtValue()];
5693 if (!DemandedVecElts.
isZero()) {
5702 case Instruction::ShuffleVector: {
5705 APInt DemandedLHS, DemandedRHS;
5710 if (!!DemandedLHS) {
5711 const Value *
LHS = Shuf->getOperand(0);
5722 if (!!DemandedRHS) {
5724 const Value *
RHS = Shuf->getOperand(1);
5732 case Instruction::ExtractValue: {
5739 switch (
II->getIntrinsicID()) {
5740 case Intrinsic::frexp: {
5745 InterestedClasses, KnownSrc, Q,
Depth + 1);
5749 Op->getType()->getScalarType()->getFltSemantics();
5784 case Instruction::PHI: {
5787 if (
P->getNumIncomingValues() == 0)
5794 if (
Depth < PhiRecursionLimit) {
5801 for (
const Use &U :
P->operands()) {
5831 case Instruction::BitCast: {
5834 !Src->getType()->isIntOrIntVectorTy())
5837 const Type *Ty =
Op->getType()->getScalarType();
5838 KnownBits Bits(Ty->getScalarSizeInBits());
5842 if (Bits.isNonNegative())
5844 else if (Bits.isNegative())
5847 if (Ty->isIEEELikeFPTy()) {
5857 else if (!
APFloat(Ty->getFltSemantics(), ~Bits.Zero).
isNaN())
5864 InfKB.Zero.clearSignBit();
5866 assert(!InfResult.value());
5868 }
else if (Bits == InfKB) {
5876 ZeroKB.Zero.clearSignBit();
5878 assert(!ZeroResult.value());
5880 }
else if (Bits == ZeroKB) {
5893 const APInt &DemandedElts,
5900 return KnownClasses;
5926 InterestedClasses &=
~fcNan;
5928 InterestedClasses &=
~fcInf;
5934 Result.KnownFPClasses &=
~fcNan;
5936 Result.KnownFPClasses &=
~fcInf;
5945 APInt DemandedElts =
5999 if (FPOp->hasNoSignedZeros())
6003 switch (
User->getOpcode()) {
6004 case Instruction::FPToSI:
6005 case Instruction::FPToUI:
6007 case Instruction::FCmp:
6010 case Instruction::Call:
6012 switch (
II->getIntrinsicID()) {
6013 case Intrinsic::fabs:
6015 case Intrinsic::copysign:
6016 return U.getOperandNo() == 0;
6017 case Intrinsic::is_fpclass:
6018 case Intrinsic::vp_is_fpclass: {
6038 if (FPOp->hasNoNaNs())
6042 switch (
User->getOpcode()) {
6043 case Instruction::FPToSI:
6044 case Instruction::FPToUI:
6047 case Instruction::FAdd:
6048 case Instruction::FSub:
6049 case Instruction::FMul:
6050 case Instruction::FDiv:
6051 case Instruction::FRem:
6052 case Instruction::FPTrunc:
6053 case Instruction::FPExt:
6054 case Instruction::FCmp:
6057 case Instruction::FNeg:
6058 case Instruction::Select:
6059 case Instruction::PHI:
6061 case Instruction::Ret:
6062 return User->getFunction()->getAttributes().getRetNoFPClass() &
6064 case Instruction::Call:
6065 case Instruction::Invoke: {
6067 switch (
II->getIntrinsicID()) {
6068 case Intrinsic::fabs:
6070 case Intrinsic::copysign:
6071 return U.getOperandNo() == 0;
6073 case Intrinsic::maxnum:
6074 case Intrinsic::minnum:
6075 case Intrinsic::maximum:
6076 case Intrinsic::minimum:
6077 case Intrinsic::maximumnum:
6078 case Intrinsic::minimumnum:
6079 case Intrinsic::canonicalize:
6080 case Intrinsic::fma:
6081 case Intrinsic::fmuladd:
6082 case Intrinsic::sqrt:
6083 case Intrinsic::pow:
6084 case Intrinsic::powi:
6085 case Intrinsic::fptoui_sat:
6086 case Intrinsic::fptosi_sat:
6087 case Intrinsic::is_fpclass:
6088 case Intrinsic::vp_is_fpclass:
6107 if (V->getType()->isIntegerTy(8))
6118 if (
DL.getTypeStoreSize(V->getType()).isZero())
6133 if (
C->isNullValue())
6140 if (CFP->getType()->isHalfTy())
6142 else if (CFP->getType()->isFloatTy())
6144 else if (CFP->getType()->isDoubleTy())
6153 if (CI->getBitWidth() % 8 == 0) {
6154 assert(CI->getBitWidth() > 8 &&
"8 bits should be handled above!");
6155 if (!CI->getValue().isSplat(8))
6157 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
6162 if (CE->getOpcode() == Instruction::IntToPtr) {
6164 unsigned BitWidth =
DL.getPointerSizeInBits(PtrTy->getAddressSpace());
6177 if (LHS == UndefInt8)
6179 if (RHS == UndefInt8)
6185 Value *Val = UndefInt8;
6186 for (
uint64_t I = 0, E = CA->getNumElements();
I != E; ++
I)
6193 Value *Val = UndefInt8;
6228 while (PrevTo != OrigTo) {
6275 unsigned IdxSkip = Idxs.
size();
6288 std::optional<BasicBlock::iterator> InsertBefore) {
6291 if (idx_range.
empty())
6294 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
6295 "Not looking at a struct or array?");
6297 "Invalid indices for type?");
6300 C =
C->getAggregateElement(idx_range[0]);
6301 if (!
C)
return nullptr;
6308 const unsigned *req_idx = idx_range.
begin();
6309 for (
const unsigned *i =
I->idx_begin(), *e =
I->idx_end();
6310 i != e; ++i, ++req_idx) {
6311 if (req_idx == idx_range.
end()) {
6341 ArrayRef(req_idx, idx_range.
end()), InsertBefore);
6350 unsigned size =
I->getNumIndices() + idx_range.
size();
6355 Idxs.
append(
I->idx_begin(),
I->idx_end());
6361 &&
"Number of indices added not correct?");
6378 assert(V &&
"V should not be null.");
6379 assert((ElementSize % 8) == 0 &&
6380 "ElementSize expected to be a multiple of the size of a byte.");
6381 unsigned ElementSizeInBytes = ElementSize / 8;
6393 APInt Off(
DL.getIndexTypeSizeInBits(V->getType()), 0);
6400 uint64_t StartIdx = Off.getLimitedValue();
6407 if ((StartIdx % ElementSizeInBytes) != 0)
6410 Offset += StartIdx / ElementSizeInBytes;
6416 uint64_t SizeInBytes =
DL.getTypeStoreSize(GVTy).getFixedValue();
6419 Slice.Array =
nullptr;
6431 Type *InitElTy = ArrayInit->getElementType();
6436 ArrayTy = ArrayInit->getType();
6441 if (ElementSize != 8)
6460 Slice.Array = Array;
6462 Slice.Length = NumElts -
Offset;
6476 if (Slice.Array ==
nullptr) {
6487 if (Slice.Length == 1) {
6499 Str = Str.
substr(Slice.Offset);
6505 Str = Str.substr(0, Str.find(
'\0'));
6518 unsigned CharSize) {
6520 V = V->stripPointerCasts();
6525 if (!PHIs.
insert(PN).second)
6530 for (
Value *IncValue : PN->incoming_values()) {
6532 if (Len == 0)
return 0;
6534 if (Len == ~0ULL)
continue;
6536 if (Len != LenSoFar && LenSoFar != ~0ULL)
6548 if (Len1 == 0)
return 0;
6550 if (Len2 == 0)
return 0;
6551 if (Len1 == ~0ULL)
return Len2;
6552 if (Len2 == ~0ULL)
return Len1;
6553 if (Len1 != Len2)
return 0;
6562 if (Slice.Array ==
nullptr)
6570 unsigned NullIndex = 0;
6571 for (
unsigned E = Slice.Length; NullIndex <
E; ++NullIndex) {
6572 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
6576 return NullIndex + 1;
6582 if (!V->getType()->isPointerTy())
6589 return Len == ~0ULL ? 1 : Len;
6594 bool MustPreserveNullness) {
6596 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
6597 if (
const Value *RV =
Call->getReturnedArgOperand())
6601 Call, MustPreserveNullness))
6602 return Call->getArgOperand(0);
6608 switch (
Call->getIntrinsicID()) {
6609 case Intrinsic::launder_invariant_group:
6610 case Intrinsic::strip_invariant_group:
6611 case Intrinsic::aarch64_irg:
6612 case Intrinsic::aarch64_tagp:
6622 case Intrinsic::amdgcn_make_buffer_rsrc:
6624 case Intrinsic::ptrmask:
6625 return !MustPreserveNullness;
6626 case Intrinsic::threadlocal_address:
6629 return !
Call->getParent()->getParent()->isPresplitCoroutine();
6646 if (!PrevValue || LI->
getLoopFor(PrevValue->getParent()) != L)
6648 if (!PrevValue || LI->
getLoopFor(PrevValue->getParent()) != L)
6657 if (!L->isLoopInvariant(Load->getPointerOperand()))
6663 for (
unsigned Count = 0; MaxLookup == 0 ||
Count < MaxLookup; ++
Count) {
6665 const Value *PtrOp =
GEP->getPointerOperand();
6676 if (GA->isInterposable())
6678 V = GA->getAliasee();
6682 if (
PHI->getNumIncomingValues() == 1) {
6683 V =
PHI->getIncomingValue(0);
6704 assert(V->getType()->isPointerTy() &&
"Unexpected operand type!");
6711 const LoopInfo *LI,
unsigned MaxLookup) {
6719 if (!Visited.
insert(
P).second)
6748 }
while (!Worklist.
empty());
6752 const unsigned MaxVisited = 8;
6757 const Value *Object =
nullptr;
6767 if (!Visited.
insert(
P).second)
6770 if (Visited.
size() == MaxVisited)
6786 else if (Object !=
P)
6788 }
while (!Worklist.
empty());
6790 return Object ? Object : FirstObject;
6800 if (U->getOpcode() == Instruction::PtrToInt)
6801 return U->getOperand(0);
6808 if (U->getOpcode() != Instruction::Add ||
6813 V = U->getOperand(0);
6817 assert(V->getType()->isIntegerTy() &&
"Unexpected operand type!");
6834 for (
const Value *V : Objs) {
6835 if (!Visited.
insert(V).second)
6840 if (O->getType()->isPointerTy()) {
6853 }
while (!Working.
empty());
6862 auto AddWork = [&](
Value *V) {
6863 if (Visited.
insert(V).second)
6873 if (Result && Result != AI)
6877 AddWork(CI->getOperand(0));
6879 for (
Value *IncValue : PN->incoming_values())
6882 AddWork(
SI->getTrueValue());
6883 AddWork(
SI->getFalseValue());
6885 if (OffsetZero && !
GEP->hasAllZeroIndices())
6887 AddWork(
GEP->getPointerOperand());
6889 Value *Returned = CB->getReturnedArgOperand();
6897 }
while (!Worklist.
empty());
6903 const Value *V,
bool AllowLifetime,
bool AllowDroppable) {
6909 if (AllowLifetime &&
II->isLifetimeStartOrEnd())
6912 if (AllowDroppable &&
II->isDroppable())
6933 return (!Shuffle || Shuffle->isSelect()) &&
6940 bool IgnoreUBImplyingAttrs) {
6942 AC, DT, TLI, UseVariableInfo,
6943 IgnoreUBImplyingAttrs);
6949 bool UseVariableInfo,
bool IgnoreUBImplyingAttrs) {
6953 auto hasEqualReturnAndLeadingOperandTypes =
6954 [](
const Instruction *Inst,
unsigned NumLeadingOperands) {
6958 for (
unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp)
6964 hasEqualReturnAndLeadingOperandTypes(Inst, 2));
6966 hasEqualReturnAndLeadingOperandTypes(Inst, 1));
6973 case Instruction::UDiv:
6974 case Instruction::URem: {
6981 case Instruction::SDiv:
6982 case Instruction::SRem: {
6984 const APInt *Numerator, *Denominator;
6988 if (*Denominator == 0)
7000 case Instruction::Load: {
7001 if (!UseVariableInfo)
7014 case Instruction::Call: {
7018 const Function *Callee = CI->getCalledFunction();
7022 if (!Callee || !Callee->isSpeculatable())
7026 return IgnoreUBImplyingAttrs || !CI->hasUBImplyingAttrs();
7028 case Instruction::VAArg:
7029 case Instruction::Alloca:
7030 case Instruction::Invoke:
7031 case Instruction::CallBr:
7032 case Instruction::PHI:
7033 case Instruction::Store:
7034 case Instruction::Ret:
7035 case Instruction::Br:
7036 case Instruction::IndirectBr:
7037 case Instruction::Switch:
7038 case Instruction::Unreachable:
7039 case Instruction::Fence:
7040 case Instruction::AtomicRMW:
7041 case Instruction::AtomicCmpXchg:
7042 case Instruction::LandingPad:
7043 case Instruction::Resume:
7044 case Instruction::CatchSwitch:
7045 case Instruction::CatchPad:
7046 case Instruction::CatchRet:
7047 case Instruction::CleanupPad:
7048 case Instruction::CleanupRet:
7054 if (
I.mayReadOrWriteMemory())
7122 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
7167 if (
Add &&
Add->hasNoSignedWrap()) {
7206 bool LHSOrRHSKnownNonNegative =
7208 bool LHSOrRHSKnownNegative =
7210 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
7213 if ((AddKnown.
isNonNegative() && LHSOrRHSKnownNonNegative) ||
7214 (AddKnown.
isNegative() && LHSOrRHSKnownNegative))
7289 assert(EVI->getNumIndices() == 1 &&
"Obvious from CI's type");
7291 if (EVI->getIndices()[0] == 0)
7294 assert(EVI->getIndices()[0] == 1 &&
"Obvious from CI's type");
7296 for (
const auto *U : EVI->users())
7298 assert(
B->isConditional() &&
"How else is it using an i1?");
7309 auto AllUsesGuardedByBranch = [&](
const BranchInst *BI) {
7315 for (
const auto *Result :
Results) {
7318 if (DT.
dominates(NoWrapEdge, Result->getParent()))
7321 for (
const auto &RU : Result->uses())
7329 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
7341 unsigned NumElts = FVTy->getNumElements();
7342 for (
unsigned i = 0; i < NumElts; ++i)
7343 ShiftAmounts.
push_back(
C->getAggregateElement(i));
7351 return CI && CI->getValue().ult(
C->getType()->getIntegerBitWidth());
7372 bool ConsiderFlagsAndMetadata) {
7375 Op->hasPoisonGeneratingAnnotations())
7378 unsigned Opcode =
Op->getOpcode();
7382 case Instruction::Shl:
7383 case Instruction::AShr:
7384 case Instruction::LShr:
7386 case Instruction::FPToSI:
7387 case Instruction::FPToUI:
7391 case Instruction::Call:
7393 switch (
II->getIntrinsicID()) {
7395 case Intrinsic::ctlz:
7396 case Intrinsic::cttz:
7397 case Intrinsic::abs:
7401 case Intrinsic::ctpop:
7402 case Intrinsic::bswap:
7403 case Intrinsic::bitreverse:
7404 case Intrinsic::fshl:
7405 case Intrinsic::fshr:
7406 case Intrinsic::smax:
7407 case Intrinsic::smin:
7408 case Intrinsic::scmp:
7409 case Intrinsic::umax:
7410 case Intrinsic::umin:
7411 case Intrinsic::ucmp:
7412 case Intrinsic::ptrmask:
7413 case Intrinsic::fptoui_sat:
7414 case Intrinsic::fptosi_sat:
7415 case Intrinsic::sadd_with_overflow:
7416 case Intrinsic::ssub_with_overflow:
7417 case Intrinsic::smul_with_overflow:
7418 case Intrinsic::uadd_with_overflow:
7419 case Intrinsic::usub_with_overflow:
7420 case Intrinsic::umul_with_overflow:
7421 case Intrinsic::sadd_sat:
7422 case Intrinsic::uadd_sat:
7423 case Intrinsic::ssub_sat:
7424 case Intrinsic::usub_sat:
7426 case Intrinsic::sshl_sat:
7427 case Intrinsic::ushl_sat:
7430 case Intrinsic::fma:
7431 case Intrinsic::fmuladd:
7432 case Intrinsic::sqrt:
7433 case Intrinsic::powi:
7434 case Intrinsic::sin:
7435 case Intrinsic::cos:
7436 case Intrinsic::pow:
7437 case Intrinsic::log:
7438 case Intrinsic::log10:
7439 case Intrinsic::log2:
7440 case Intrinsic::exp:
7441 case Intrinsic::exp2:
7442 case Intrinsic::exp10:
7443 case Intrinsic::fabs:
7444 case Intrinsic::copysign:
7445 case Intrinsic::floor:
7446 case Intrinsic::ceil:
7447 case Intrinsic::trunc:
7448 case Intrinsic::rint:
7449 case Intrinsic::nearbyint:
7450 case Intrinsic::round:
7451 case Intrinsic::roundeven:
7452 case Intrinsic::fptrunc_round:
7453 case Intrinsic::canonicalize:
7454 case Intrinsic::arithmetic_fence:
7455 case Intrinsic::minnum:
7456 case Intrinsic::maxnum:
7457 case Intrinsic::minimum:
7458 case Intrinsic::maximum:
7459 case Intrinsic::minimumnum:
7460 case Intrinsic::maximumnum:
7461 case Intrinsic::is_fpclass:
7462 case Intrinsic::ldexp:
7463 case Intrinsic::frexp:
7465 case Intrinsic::lround:
7466 case Intrinsic::llround:
7467 case Intrinsic::lrint:
7468 case Intrinsic::llrint:
7475 case Instruction::CallBr:
7476 case Instruction::Invoke: {
7478 return !CB->hasRetAttr(Attribute::NoUndef);
7480 case Instruction::InsertElement:
7481 case Instruction::ExtractElement: {
7484 unsigned IdxOp =
Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
7488 Idx->getValue().uge(VTy->getElementCount().getKnownMinValue());
7491 case Instruction::ShuffleVector: {
7497 case Instruction::FNeg:
7498 case Instruction::PHI:
7499 case Instruction::Select:
7500 case Instruction::ExtractValue:
7501 case Instruction::InsertValue:
7502 case Instruction::Freeze:
7503 case Instruction::ICmp:
7504 case Instruction::FCmp:
7505 case Instruction::GetElementPtr:
7507 case Instruction::AddrSpaceCast:
7522 bool ConsiderFlagsAndMetadata) {
7524 ConsiderFlagsAndMetadata);
7529 ConsiderFlagsAndMetadata);
7534 if (ValAssumedPoison == V)
7537 const unsigned MaxDepth = 2;
7538 if (
Depth >= MaxDepth)
7543 return propagatesPoison(Op) &&
7544 directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
7568 const unsigned MaxDepth = 2;
7569 if (
Depth >= MaxDepth)
7575 return impliesPoison(Op, V, Depth + 1);
7582 return ::impliesPoison(ValAssumedPoison, V, 0);
7597 if (
A->hasAttribute(Attribute::NoUndef) ||
7598 A->hasAttribute(Attribute::Dereferenceable) ||
7599 A->hasAttribute(Attribute::DereferenceableOrNull))
7614 if (
C->getType()->isVectorTy()) {
7617 if (
Constant *SplatC =
C->getSplatValue())
7625 return !
C->containsConstantExpression();
7638 auto *StrippedV = V->stripPointerCastsSameRepresentation();
7643 auto OpCheck = [&](
const Value *V) {
7654 if (CB->hasRetAttr(Attribute::NoUndef) ||
7655 CB->hasRetAttr(Attribute::Dereferenceable) ||
7656 CB->hasRetAttr(Attribute::DereferenceableOrNull))
7661 unsigned Num = PN->getNumIncomingValues();
7662 bool IsWellDefined =
true;
7663 for (
unsigned i = 0; i < Num; ++i) {
7664 if (PN == PN->getIncomingValue(i))
7666 auto *TI = PN->getIncomingBlock(i)->getTerminator();
7668 DT,
Depth + 1, Kind)) {
7669 IsWellDefined =
false;
7677 all_of(Opr->operands(), OpCheck))
7682 if (
I->hasMetadata(LLVMContext::MD_noundef) ||
7683 I->hasMetadata(LLVMContext::MD_dereferenceable) ||
7684 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null))
7704 auto *Dominator = DNode->
getIDom();
7709 auto *TI = Dominator->getBlock()->getTerminator();
7713 if (BI->isConditional())
7714 Cond = BI->getCondition();
7716 Cond =
SI->getCondition();
7725 if (
any_of(Opr->operands(), [V](
const Use &U) {
7726 return V == U && propagatesPoison(U);
7732 Dominator = Dominator->getIDom();
7745 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7752 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7759 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7783 while (!Worklist.
empty()) {
7792 if (
I != Root && !
any_of(
I->operands(), [&KnownPoison](
const Use &U) {
7793 return KnownPoison.contains(U) && propagatesPoison(U);
7797 if (KnownPoison.
insert(
I).second)
7809 return ::computeOverflowForSignedAdd(
Add->getOperand(0),
Add->getOperand(1),
7817 return ::computeOverflowForSignedAdd(LHS, RHS,
nullptr, SQ);
7849 return !
I->mayThrow() &&
I->willReturn();
7863 unsigned ScanLimit) {
7870 assert(ScanLimit &&
"scan limit must be non-zero");
7872 if (--ScanLimit == 0)
7886 if (
I->getParent() != L->getHeader())
return false;
7889 if (&LI ==
I)
return true;
7892 llvm_unreachable(
"Instruction not contained in its own parent basic block.");
7898 case Intrinsic::sadd_with_overflow:
7899 case Intrinsic::ssub_with_overflow:
7900 case Intrinsic::smul_with_overflow:
7901 case Intrinsic::uadd_with_overflow:
7902 case Intrinsic::usub_with_overflow:
7903 case Intrinsic::umul_with_overflow:
7908 case Intrinsic::ctpop:
7909 case Intrinsic::ctlz:
7910 case Intrinsic::cttz:
7911 case Intrinsic::abs:
7912 case Intrinsic::smax:
7913 case Intrinsic::smin:
7914 case Intrinsic::umax:
7915 case Intrinsic::umin:
7916 case Intrinsic::scmp:
7917 case Intrinsic::is_fpclass:
7918 case Intrinsic::ptrmask:
7919 case Intrinsic::ucmp:
7920 case Intrinsic::bitreverse:
7921 case Intrinsic::bswap:
7922 case Intrinsic::sadd_sat:
7923 case Intrinsic::ssub_sat:
7924 case Intrinsic::sshl_sat:
7925 case Intrinsic::uadd_sat:
7926 case Intrinsic::usub_sat:
7927 case Intrinsic::ushl_sat:
7928 case Intrinsic::smul_fix:
7929 case Intrinsic::smul_fix_sat:
7930 case Intrinsic::umul_fix:
7931 case Intrinsic::umul_fix_sat:
7932 case Intrinsic::pow:
7933 case Intrinsic::powi:
7934 case Intrinsic::sin:
7935 case Intrinsic::sinh:
7936 case Intrinsic::cos:
7937 case Intrinsic::cosh:
7938 case Intrinsic::sincos:
7939 case Intrinsic::sincospi:
7940 case Intrinsic::tan:
7941 case Intrinsic::tanh:
7942 case Intrinsic::asin:
7943 case Intrinsic::acos:
7944 case Intrinsic::atan:
7945 case Intrinsic::atan2:
7946 case Intrinsic::canonicalize:
7947 case Intrinsic::sqrt:
7948 case Intrinsic::exp:
7949 case Intrinsic::exp2:
7950 case Intrinsic::exp10:
7951 case Intrinsic::log:
7952 case Intrinsic::log2:
7953 case Intrinsic::log10:
7954 case Intrinsic::modf:
7955 case Intrinsic::floor:
7956 case Intrinsic::ceil:
7957 case Intrinsic::trunc:
7958 case Intrinsic::rint:
7959 case Intrinsic::nearbyint:
7960 case Intrinsic::round:
7961 case Intrinsic::roundeven:
7962 case Intrinsic::lrint:
7963 case Intrinsic::llrint:
7972 switch (
I->getOpcode()) {
7973 case Instruction::Freeze:
7974 case Instruction::PHI:
7975 case Instruction::Invoke:
7977 case Instruction::Select:
7979 case Instruction::Call:
7983 case Instruction::ICmp:
7984 case Instruction::FCmp:
7985 case Instruction::GetElementPtr:
7999template <
typename CallableT>
8001 const CallableT &Handle) {
8002 switch (
I->getOpcode()) {
8003 case Instruction::Store:
8008 case Instruction::Load:
8015 case Instruction::AtomicCmpXchg:
8020 case Instruction::AtomicRMW:
8025 case Instruction::Call:
8026 case Instruction::Invoke: {
8030 for (
unsigned i = 0; i < CB->
arg_size(); ++i)
8033 CB->
paramHasAttr(i, Attribute::DereferenceableOrNull)) &&
8038 case Instruction::Ret:
8039 if (
I->getFunction()->hasRetAttribute(Attribute::NoUndef) &&
8040 Handle(
I->getOperand(0)))
8043 case Instruction::Switch:
8047 case Instruction::Br: {
8049 if (BR->isConditional() && Handle(BR->getCondition()))
8061template <
typename CallableT>
8063 const CallableT &Handle) {
8066 switch (
I->getOpcode()) {
8068 case Instruction::UDiv:
8069 case Instruction::SDiv:
8070 case Instruction::URem:
8071 case Instruction::SRem:
8072 return Handle(
I->getOperand(1));
8081 I, [&](
const Value *V) {
return KnownPoison.
count(V); });
8100 if (Arg->getParent()->isDeclaration())
8103 Begin = BB->
begin();
8110 unsigned ScanLimit = 32;
8119 if (--ScanLimit == 0)
8123 return WellDefinedOp == V;
8143 if (--ScanLimit == 0)
8151 for (
const Use &
Op :
I.operands()) {
8161 if (
I.getOpcode() == Instruction::Select &&
8162 YieldsPoison.
count(
I.getOperand(1)) &&
8163 YieldsPoison.
count(
I.getOperand(2))) {
8169 if (!BB || !Visited.
insert(BB).second)
8179 return ::programUndefinedIfUndefOrPoison(Inst,
false);
8183 return ::programUndefinedIfUndefOrPoison(Inst,
true);
8194 if (!
C->getElementType()->isFloatingPointTy())
8196 for (
unsigned I = 0,
E =
C->getNumElements();
I <
E; ++
I) {
8197 if (
C->getElementAsAPFloat(
I).isNaN())
8211 return !
C->isZero();
8214 if (!
C->getElementType()->isFloatingPointTy())
8216 for (
unsigned I = 0,
E =
C->getNumElements();
I <
E; ++
I) {
8217 if (
C->getElementAsAPFloat(
I).isZero())
8240 if (CmpRHS == FalseVal) {
8284 if (CmpRHS != TrueVal) {
8323 Value *
A =
nullptr, *
B =
nullptr;
8328 Value *
C =
nullptr, *
D =
nullptr;
8330 if (L.Flavor != R.Flavor)
8382 return {L.Flavor,
SPNB_NA,
false};
8389 return {L.Flavor,
SPNB_NA,
false};
8396 return {L.Flavor,
SPNB_NA,
false};
8403 return {L.Flavor,
SPNB_NA,
false};
8419 return ConstantInt::get(V->getType(), ~(*
C));
8476 if ((CmpLHS == TrueVal &&
match(FalseVal,
m_APInt(C2))) ||
8496 assert(
X &&
Y &&
"Invalid operand");
8498 auto IsNegationOf = [&](
const Value *
X,
const Value *
Y) {
8503 if (NeedNSW && !BO->hasNoSignedWrap())
8507 if (!AllowPoison && !Zero->isNullValue())
8514 if (IsNegationOf(
X,
Y) || IsNegationOf(
Y,
X))
8541 const APInt *RHSC1, *RHSC2;
8552 return CR1.inverse() == CR2;
8586std::optional<std::pair<CmpPredicate, Constant *>>
8589 "Only for relational integer predicates.");
8591 return std::nullopt;
8597 bool WillIncrement =
8602 auto ConstantIsOk = [WillIncrement, IsSigned](
ConstantInt *
C) {
8603 return WillIncrement ? !
C->isMaxValue(IsSigned) : !
C->isMinValue(IsSigned);
8606 Constant *SafeReplacementConstant =
nullptr;
8609 if (!ConstantIsOk(CI))
8610 return std::nullopt;
8612 unsigned NumElts = FVTy->getNumElements();
8613 for (
unsigned i = 0; i != NumElts; ++i) {
8614 Constant *Elt =
C->getAggregateElement(i);
8616 return std::nullopt;
8624 if (!CI || !ConstantIsOk(CI))
8625 return std::nullopt;
8627 if (!SafeReplacementConstant)
8628 SafeReplacementConstant = CI;
8632 Value *SplatC =
C->getSplatValue();
8635 if (!CI || !ConstantIsOk(CI))
8636 return std::nullopt;
8639 return std::nullopt;
8646 if (
C->containsUndefOrPoisonElement()) {
8647 assert(SafeReplacementConstant &&
"Replacement constant not set");
8654 Constant *OneOrNegOne = ConstantInt::get(
Type, WillIncrement ? 1 : -1,
true);
8657 return std::make_pair(NewPred, NewC);
8666 bool HasMismatchedZeros =
false;
8672 Value *OutputZeroVal =
nullptr;
8675 OutputZeroVal = TrueVal;
8678 OutputZeroVal = FalseVal;
8680 if (OutputZeroVal) {
8682 HasMismatchedZeros =
true;
8683 CmpLHS = OutputZeroVal;
8686 HasMismatchedZeros =
true;
8687 CmpRHS = OutputZeroVal;
8704 if (!HasMismatchedZeros)
8715 bool Ordered =
false;
8726 if (LHSSafe && RHSSafe) {
8757 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
8768 if (TrueVal == CmpLHS && FalseVal == CmpRHS)
8774 auto MaybeSExtCmpLHS =
8778 if (
match(TrueVal, MaybeSExtCmpLHS)) {
8800 else if (
match(FalseVal, MaybeSExtCmpLHS)) {
8840 case Instruction::ZExt:
8844 case Instruction::SExt:
8848 case Instruction::Trunc:
8851 CmpConst->
getType() == SrcTy) {
8873 CastedTo = CmpConst;
8875 unsigned ExtOp = CmpI->
isSigned() ? Instruction::SExt : Instruction::ZExt;
8879 case Instruction::FPTrunc:
8882 case Instruction::FPExt:
8885 case Instruction::FPToUI:
8888 case Instruction::FPToSI:
8891 case Instruction::UIToFP:
8894 case Instruction::SIToFP:
8907 if (CastedBack && CastedBack !=
C)
8935 *CastOp = Cast1->getOpcode();
8936 Type *SrcTy = Cast1->getSrcTy();
8939 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
8940 return Cast2->getOperand(0);
8948 Value *CastedTo =
nullptr;
8949 if (*CastOp == Instruction::Trunc) {
8963 "V2 and Cast1 should be the same type.");
8982 Value *TrueVal =
SI->getTrueValue();
8983 Value *FalseVal =
SI->getFalseValue();
8986 CmpI, TrueVal, FalseVal, LHS, RHS,
9005 if (CastOp && CmpLHS->
getType() != TrueVal->getType()) {
9009 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
9011 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
9018 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
9020 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
9025 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
9044 return Intrinsic::umin;
9046 return Intrinsic::umax;
9048 return Intrinsic::smin;
9050 return Intrinsic::smax;
9066 case Intrinsic::smax:
return Intrinsic::smin;
9067 case Intrinsic::smin:
return Intrinsic::smax;
9068 case Intrinsic::umax:
return Intrinsic::umin;
9069 case Intrinsic::umin:
return Intrinsic::umax;
9072 case Intrinsic::maximum:
return Intrinsic::minimum;
9073 case Intrinsic::minimum:
return Intrinsic::maximum;
9074 case Intrinsic::maxnum:
return Intrinsic::minnum;
9075 case Intrinsic::minnum:
return Intrinsic::maxnum;
9090std::pair<Intrinsic::ID, bool>
9095 bool AllCmpSingleUse =
true;
9098 if (
all_of(VL, [&SelectPattern, &AllCmpSingleUse](
Value *
I) {
9104 SelectPattern.
Flavor != CurrentPattern.Flavor)
9106 SelectPattern = CurrentPattern;
9111 switch (SelectPattern.
Flavor) {
9113 return {Intrinsic::smin, AllCmpSingleUse};
9115 return {Intrinsic::umin, AllCmpSingleUse};
9117 return {Intrinsic::smax, AllCmpSingleUse};
9119 return {Intrinsic::umax, AllCmpSingleUse};
9121 return {Intrinsic::maxnum, AllCmpSingleUse};
9123 return {Intrinsic::minnum, AllCmpSingleUse};
9131template <
typename InstTy>
9141 for (
unsigned I = 0;
I != 2; ++
I) {
9146 if (
LHS != PN &&
RHS != PN)
9182 if (
I->arg_size() != 2 ||
I->getType() !=
I->getArgOperand(0)->getType() ||
9183 I->getType() !=
I->getArgOperand(1)->getType())
9211 return !
C->isNegative();
9223 const APInt *CLHS, *CRHS;
9226 return CLHS->
sle(*CRHS);
9264 const APInt *CLHS, *CRHS;
9267 return CLHS->
ule(*CRHS);
9276static std::optional<bool>
9281 return std::nullopt;
9288 return std::nullopt;
9295 return std::nullopt;
9302 return std::nullopt;
9309 return std::nullopt;
9316static std::optional<bool>
9322 if (CR.
icmp(Pred, RCR))
9329 return std::nullopt;
9342 return std::nullopt;
9348static std::optional<bool>
9379 const APInt *Unused;
9398 return std::nullopt;
9402 if (L0 == R0 && L1 == R1)
9438 return std::nullopt;
9445static std::optional<bool>
9450 assert((
LHS->getOpcode() == Instruction::And ||
9451 LHS->getOpcode() == Instruction::Or ||
9452 LHS->getOpcode() == Instruction::Select) &&
9453 "Expected LHS to be 'and', 'or', or 'select'.");
9460 const Value *ALHS, *ARHS;
9465 ALHS, RHSPred, RHSOp0, RHSOp1,
DL, LHSIsTrue,
Depth + 1))
9468 ARHS, RHSPred, RHSOp0, RHSOp1,
DL, LHSIsTrue,
Depth + 1))
9470 return std::nullopt;
9472 return std::nullopt;
9481 return std::nullopt;
9486 return std::nullopt;
9488 assert(LHS->getType()->isIntOrIntVectorTy(1) &&
9489 "Expected integer type only!");
9493 LHSIsTrue = !LHSIsTrue;
9498 LHSCmp->getOperand(1), RHSPred, RHSOp0, RHSOp1,
9503 ConstantInt::get(V->getType(), 0), RHSPred,
9504 RHSOp0, RHSOp1,
DL, LHSIsTrue);
9510 if ((LHSI->getOpcode() == Instruction::And ||
9511 LHSI->getOpcode() == Instruction::Or ||
9512 LHSI->getOpcode() == Instruction::Select))
9516 return std::nullopt;
9521 bool LHSIsTrue,
unsigned Depth) {
9527 bool InvertRHS =
false;
9536 LHS, RHSCmp->getCmpPredicate(), RHSCmp->getOperand(0),
9537 RHSCmp->getOperand(1),
DL, LHSIsTrue,
Depth))
9538 return InvertRHS ? !*Implied : *Implied;
9539 return std::nullopt;
9545 ConstantInt::get(V->getType(), 0),
DL,
9547 return InvertRHS ? !*Implied : *Implied;
9548 return std::nullopt;
9552 return std::nullopt;
9556 const Value *RHS1, *RHS2;
9558 if (std::optional<bool> Imp =
9562 if (std::optional<bool> Imp =
9568 if (std::optional<bool> Imp =
9572 if (std::optional<bool> Imp =
9578 return std::nullopt;
9583static std::pair<Value *, bool>
9585 if (!ContextI || !ContextI->
getParent())
9586 return {
nullptr,
false};
9593 return {
nullptr,
false};
9599 return {
nullptr,
false};
9602 if (TrueBB == FalseBB)
9603 return {
nullptr,
false};
9605 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
9606 "Predecessor block does not point to successor?");
9609 return {PredCond, TrueBB == ContextBB};
9615 assert(
Cond->getType()->isIntOrIntVectorTy(1) &&
"Condition must be bool");
9619 return std::nullopt;
9631 return std::nullopt;
9636 bool PreferSignedRange) {
9637 unsigned Width =
Lower.getBitWidth();
9640 case Instruction::Sub:
9650 if (PreferSignedRange && HasNSW && HasNUW)
9656 }
else if (HasNSW) {
9657 if (
C->isNegative()) {
9670 case Instruction::Add:
9679 if (PreferSignedRange && HasNSW && HasNUW)
9685 }
else if (HasNSW) {
9686 if (
C->isNegative()) {
9699 case Instruction::And:
9710 case Instruction::Or:
9716 case Instruction::AShr:
9722 unsigned ShiftAmount = Width - 1;
9723 if (!
C->isZero() && IIQ.
isExact(&BO))
9724 ShiftAmount =
C->countr_zero();
9725 if (
C->isNegative()) {
9728 Upper =
C->ashr(ShiftAmount) + 1;
9731 Lower =
C->ashr(ShiftAmount);
9737 case Instruction::LShr:
9743 unsigned ShiftAmount = Width - 1;
9744 if (!
C->isZero() && IIQ.
isExact(&BO))
9745 ShiftAmount =
C->countr_zero();
9746 Lower =
C->lshr(ShiftAmount);
9751 case Instruction::Shl:
9758 if (
C->isNegative()) {
9760 unsigned ShiftAmount =
C->countl_one() - 1;
9761 Lower =
C->shl(ShiftAmount);
9765 unsigned ShiftAmount =
C->countl_zero() - 1;
9767 Upper =
C->shl(ShiftAmount) + 1;
9786 case Instruction::SDiv:
9790 if (
C->isAllOnes()) {
9795 }
else if (
C->countl_zero() < Width - 1) {
9806 if (
C->isMinSignedValue()) {
9818 case Instruction::UDiv:
9828 case Instruction::SRem:
9834 if (
C->isNegative()) {
9845 case Instruction::URem:
9860 bool UseInstrInfo) {
9861 unsigned Width =
II.getType()->getScalarSizeInBits();
9863 switch (
II.getIntrinsicID()) {
9864 case Intrinsic::ctlz:
9865 case Intrinsic::cttz: {
9867 if (!UseInstrInfo || !
match(
II.getArgOperand(1),
m_One()))
9872 case Intrinsic::ctpop:
9875 APInt(Width, Width) + 1);
9876 case Intrinsic::uadd_sat:
9882 case Intrinsic::sadd_sat:
9885 if (
C->isNegative())
9896 case Intrinsic::usub_sat:
9906 case Intrinsic::ssub_sat:
9908 if (
C->isNegative())
9918 if (
C->isNegative())
9929 case Intrinsic::umin:
9930 case Intrinsic::umax:
9931 case Intrinsic::smin:
9932 case Intrinsic::smax:
9937 switch (
II.getIntrinsicID()) {
9938 case Intrinsic::umin:
9940 case Intrinsic::umax:
9942 case Intrinsic::smin:
9945 case Intrinsic::smax:
9952 case Intrinsic::abs:
9961 case Intrinsic::vscale:
9962 if (!
II.getParent() || !
II.getFunction())
9969 return ConstantRange::getFull(Width);
9974 unsigned BitWidth =
SI.getType()->getScalarSizeInBits();
9978 return ConstantRange::getFull(
BitWidth);
10001 return ConstantRange::getFull(
BitWidth);
10003 switch (R.Flavor) {
10015 return ConstantRange::getFull(
BitWidth);
10022 unsigned BitWidth =
I->getType()->getScalarSizeInBits();
10023 if (!
I->getOperand(0)->getType()->getScalarType()->isHalfTy())
10041 assert(V->getType()->isIntOrIntVectorTy() &&
"Expected integer instruction");
10044 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
10047 return C->toConstantRange();
10049 unsigned BitWidth = V->getType()->getScalarSizeInBits();
10062 SI->getTrueValue(), ForSigned, UseInstrInfo, AC, CtxI, DT,
Depth + 1);
10064 SI->getFalseValue(), ForSigned, UseInstrInfo, AC, CtxI, DT,
Depth + 1);
10074 if (std::optional<ConstantRange>
Range =
A->getRange())
10082 if (std::optional<ConstantRange>
Range = CB->getRange())
10093 "Got assumption for the wrong function!");
10094 assert(
I->getIntrinsicID() == Intrinsic::assume &&
10095 "must be an assume intrinsic");
10099 Value *Arg =
I->getArgOperand(0);
10102 if (!Cmp || Cmp->getOperand(0) != V)
10107 UseInstrInfo, AC,
I, DT,
Depth + 1);
10129 InsertAffected(
Op);
10136 auto AddAffected = [&InsertAffected](
Value *V) {
10140 auto AddCmpOperands = [&AddAffected, IsAssume](
Value *LHS,
Value *RHS) {
10151 while (!Worklist.
empty()) {
10153 if (!Visited.
insert(V).second)
10194 AddCmpOperands(
A,
B);
10231 AddCmpOperands(
A,
B);
10259 if (BO->getOpcode() == Instruction::Add ||
10260 BO->getOpcode() == Instruction::Or) {
10262 const APInt *C1, *C2;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Utilities for dealing with flags related to floating point properties and mode controls.
Module.h This file contains the declarations for the Module class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
std::pair< BasicBlock *, BasicBlock * > Edge
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static SmallVector< VPValue *, 4 > getOperands(ArrayRef< VPValue * > Values, unsigned OperandIndex)
static void computeKnownFPClassFromCond(const Value *V, Value *Cond, bool CondIsTrue, const Instruction *CxtI, KnownFPClass &KnownFromContext, unsigned Depth=0)
static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero, SimplifyQuery &Q, unsigned Depth)
Try to detect a recurrence that the value of the induction variable is always a power of two (or zero...
static cl::opt< unsigned > DomConditionsMaxUses("dom-conditions-max-uses", cl::Hidden, cl::init(20))
static unsigned computeNumSignBitsVectorConstant(const Value *V, const APInt &DemandedElts, unsigned TyBits)
For vector constants, loop over the elements and find the constant with the minimum number of sign bi...
static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, const Value *RHS)
Return true if "icmp Pred LHS RHS" is always true.
static bool isModifyingBinopOfNonZero(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if V1 == (binop V2, X), where X is known non-zero.
static bool isGEPKnownNonNull(const GEPOperator *GEP, const SimplifyQuery &Q, unsigned Depth)
Test whether a GEP's result is known to be non-null.
static bool isNonEqualShl(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and the shift is nuw or nsw.
static bool isKnownNonNullFromDominatingCondition(const Value *V, const Instruction *CtxI, const DominatorTree *DT)
static const Value * getUnderlyingObjectFromInt(const Value *V)
This is the function that does the work of looking through basic ptrtoint+arithmetic+inttoptr sequenc...
static bool isNonZeroMul(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, bool NUW, unsigned Depth)
static bool rangeMetadataExcludesValue(const MDNode *Ranges, const APInt &Value)
Does the 'Range' metadata (which must be a valid MD_range operand list) ensure that the value it's at...
static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty)
static KnownBits getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts, const KnownBits &KnownLHS, const KnownBits &KnownRHS, const SimplifyQuery &Q, unsigned Depth)
static void breakSelfRecursivePHI(const Use *U, const PHINode *PHI, Value *&ValOut, Instruction *&CtxIOut, const PHINode **PhiOut=nullptr)
static bool isNonZeroSub(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, unsigned Depth)
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static void addValueAffectedByCondition(Value *V, function_ref< void(Value *)> InsertAffected)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool haveNoCommonBitsSetSpecialCases(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower, APInt &Upper, const InstrInfoQuery &IIQ, bool PreferSignedRange)
static Value * lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, Instruction::CastOps *CastOp)
Helps to match a select pattern in case of a type mismatch.
static std::pair< Value *, bool > getDomPredecessorCondition(const Instruction *ContextI)
static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, const KnownBits &KnownVal, unsigned Depth)
static bool isKnownNonEqualFromContext(const Value *V1, const Value *V2, const SimplifyQuery &Q, unsigned Depth)
static bool includesPoison(UndefPoisonKind Kind)
static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS)
Match clamp pattern for float types without care about NaNs or signed zeros.
static std::optional< bool > isImpliedCondICmps(CmpPredicate LPred, const Value *L0, const Value *L1, CmpPredicate RPred, const Value *R0, const Value *R1, const DataLayout &DL, bool LHSIsTrue)
Return true if LHS implies RHS (expanded to its components as "R0 RPred R1") is true.
static bool includesUndef(UndefPoisonKind Kind)
static std::optional< bool > isImpliedCondCommonOperandWithCR(CmpPredicate LPred, const ConstantRange &LCR, CmpPredicate RPred, const ConstantRange &RCR)
Return true if "icmp LPred X, LCR" implies "icmp RPred X, RCR" is true.
static ConstantRange getRangeForSelectPattern(const SelectInst &SI, const InstrInfoQuery &IIQ)
static void computeKnownBitsFromOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth)
static uint64_t GetStringLengthH(const Value *V, SmallPtrSetImpl< const PHINode * > &PHIs, unsigned CharSize)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
static void computeKnownBitsFromShiftOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth, function_ref< KnownBits(const KnownBits &, const KnownBits &, bool)> KF)
Compute known bits from a shift operator, including those with a non-constant shift amount.
static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(const Value *V, bool AllowLifetime, bool AllowDroppable)
static std::optional< bool > isImpliedCondAndOr(const Instruction *LHS, CmpPredicate RHSPred, const Value *RHSOp0, const Value *RHSOp1, const DataLayout &DL, bool LHSIsTrue, unsigned Depth)
Return true if LHS implies RHS is true.
static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, const APInt *&CLow, const APInt *&CHigh)
static bool isNonZeroAdd(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, bool NUW, unsigned Depth)
static bool directlyImpliesPoison(const Value *ValAssumedPoison, const Value *V, unsigned Depth)
static bool isNonEqualSelect(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
static bool matchTwoInputRecurrence(const PHINode *PN, InstTy *&Inst, Value *&Init, Value *&OtherOp)
static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
static void computeKnownBitsFromCmp(const Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS, KnownBits &Known, const SimplifyQuery &Q)
static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TVal, Value *FVal, unsigned Depth)
Recognize variations of: a < c ?
static void unionWithMinMaxIntrinsicClamp(const IntrinsicInst *II, KnownBits &Known)
static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper)
static bool isSameUnderlyingObjectInLoop(const PHINode *PN, const LoopInfo *LI)
PN defines a loop-variant pointer to an object.
static bool isNonEqualPointersWithRecursiveGEP(const Value *A, const Value *B, const SimplifyQuery &Q)
static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II, const APInt *&CLow, const APInt *&CHigh)
static Value * lookThroughCastConst(CmpInst *CmpI, Type *SrcTy, Constant *C, Instruction::CastOps *CastOp)
static bool handleGuaranteedWellDefinedOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be undef or poison.
static KnownFPClass computeKnownFPClassFromContext(const Value *V, const SimplifyQuery &Q)
static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &KnownOut, KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth)
static Value * getNotValue(Value *V)
If the input value is the result of a 'not' op, constant integer, or vector splat of a constant integ...
static unsigned ComputeNumSignBitsImpl(const Value *V, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return the number of times the sign bit of the register is replicated into the other bits.
static void computeKnownBitsFromICmpCond(const Value *V, ICmpInst *Cmp, KnownBits &Known, const SimplifyQuery &SQ, bool Invert)
static bool isKnownNonZeroFromOperator(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
static bool matchOpWithOpEqZero(Value *Op0, Value *Op1)
static bool isNonZeroRecurrence(const PHINode *PN)
Try to detect a recurrence that monotonically increases/decreases from a non-zero starting value.
static SelectPatternResult matchClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal)
Recognize variations of: CLAMP(v,l,h) ==> ((v) < (l) ?
static bool shiftAmountKnownInRange(const Value *ShiftAmount)
Shifts return poison if shiftwidth is larger than the bitwidth.
static bool isEphemeralValueOf(const Instruction *I, const Value *E)
static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, unsigned Depth)
Match non-obvious integer minimum and maximum sequences.
static KnownBits computeKnownBitsForHorizontalOperation(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth, const function_ref< KnownBits(const KnownBits &, const KnownBits &)> KnownBitsFunc)
static bool handleGuaranteedNonPoisonOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be poison.
static std::optional< std::pair< Value *, Value * > > getInvertibleOperands(const Operator *Op1, const Operator *Op2)
If the pair of operators are the same invertible function, return the the operands of the function co...
static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS)
static void computeKnownBitsFromCond(const Value *V, Value *Cond, KnownBits &Known, const SimplifyQuery &SQ, bool Invert, unsigned Depth)
static bool isKnownNonZeroFromAssume(const Value *V, const SimplifyQuery &Q)
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp PredALHS ARHS" is true.
static const Instruction * safeCxtI(const Value *V, const Instruction *CxtI)
static bool isNonEqualMul(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and the multiplication is nuw o...
static bool isImpliedToBeAPowerOfTwoFromCond(const Value *V, bool OrZero, const Value *Cond, bool CondIsTrue)
Return true if we can infer that V is known to be a power of 2 from dominating condition Cond (e....
static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth)
static bool isKnownNonNaN(const Value *V, FastMathFlags FMF)
static ConstantRange getRangeForIntrinsic(const IntrinsicInst &II, bool UseInstrInfo)
static void computeKnownFPClassForFPTrunc(const Operator *Op, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, const SimplifyQuery &Q, unsigned Depth)
static Value * BuildSubAggregate(Value *From, Value *To, Type *IndexedType, SmallVectorImpl< unsigned > &Idxs, unsigned IdxSkip, BasicBlock::iterator InsertBefore)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
unsigned popcount() const
Count the number of bits set.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned ceilLogBase2() const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
void clearAllBits()
Set every bit to 0.
LLVM_ABI APInt reverseBits() const
bool sle(const APInt &RHS) const
Signed less or equal comparison.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned logBase2() const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
void setAllBits()
Set every bit to 1.
bool getBoolValue() const
Convert APInt to a boolean value.
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void clearSignBit()
Set the sign bit to 0.
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Class to represent array types.
This represents the llvm.assume intrinsic.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
LLVM_ABI std::optional< unsigned > getVScaleRangeMax() const
Returns the maximum value for the vscale_range attribute or std::nullopt when unknown.
LLVM_ABI unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM_ABI bool isSingleEdge() const
Check if this is the only edge between Start and End.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
static bool isFPPredicate(Predicate P)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
static bool isIntPredicate(Predicate P)
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
bool hasSameSign() const
Query samesign information, for optimizations.
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
StringRef getAsString() const
If this array is isString(), then this method returns the array as a StringRef.
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This class represents a range of values.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
LLVM_ABI bool isAllNegative() const
Return true if all values in this range are negative.
LLVM_ABI OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
LLVM_ABI KnownBits toKnownBits() const
Return known bits for values in this range.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
LLVM_ABI bool isAllNonNegative() const
Return true if all values in this range are non-negative.
static LLVM_ABI ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI OverflowResult signedAddMayOverflow(const ConstantRange &Other) const
Return whether signed add of the two ranges always/never overflows.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
This is an important base class in LLVM.
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
LLVM_ABI Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI unsigned getPointerTypeSizeInBits(Type *) const
The pointer representation size in bits for this type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ArrayRef< BranchInst * > conditionsFor(const Value *V) const
Access the list of branches which affect this value.
DomTreeNodeBase * getIDom() const
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Utility class for floating point operations which can have information about relaxed accuracy require...
Convenience struct for specifying and reasoning about fast-math flags.
bool noSignedZeros() const
void setNoSignedZeros(bool B=true)
void setNoNaNs(bool B=true)
const BasicBlock & getEntryBlock() const
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
PointerType * getType() const
Global values are always pointers.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getSwappedCmpPredicate() const
CmpPredicate getInverseCmpPredicate() const
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
static LLVM_ABI std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Value * getPointerOperand()
Align getAlign() const
Return the alignment of the access that is being performed.
bool isLoopHeader(const BlockT *BB) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
This is a utility class that provides an abstraction for the common functionality between Instruction...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
iterator_range< const_block_iterator > blocks() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A udiv, sdiv, lshr, or ashr instruction, which can be marked as "exact", indicating that no bits are ...
bool isExact() const
Test whether this division is known to be exact, with zero remainder.
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
This instruction constructs a fixed permutation of two input vectors.
VectorType * getType() const
Overload to return most specific vector type.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI uint64_t getArrayNumElements() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static LLVM_ABI IntegerType * getInt16Ty(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
LLVM_ABI const fltSemantics & getFltSemantics() const
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
LLVM_ABI unsigned getOperandNo() const
Return the operand # of this use in its User.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
iterator_range< user_iterator > users()
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
const KnownBits & getKnownBits(const SimplifyQuery &Q) const
PointerType getValue() const
Represents an op.with.overflow intrinsic.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
An efficient, type-erasing, non-owning reference to a callable.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
Type * getIndexedType() const
const ParentTy * getParent() const
self_iterator getIterator()
A range adaptor for a pair of iterators.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
@ C
The default llvm calling convention, compatible with C.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
auto m_LogicalOp()
Matches either L && R or L || R where L and R are arbitrary values.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
cst_pred_ty< is_power2_or_zero > m_Power2OrZero()
Match an integer or vector of 0 or power-of-2 values.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap, true > m_c_NUWAdd(const LHS &L, const RHS &R)
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
match_combine_or< MaxMin_match< FCmpInst, LHS, RHS, ofmin_pred_ty >, MaxMin_match< FCmpInst, LHS, RHS, ufmin_pred_ty > > m_OrdOrUnordFMin(const LHS &L, const RHS &R)
Match an 'ordered' or 'unordered' floating point minimum function.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > m_c_SMin(const LHS &L, const RHS &R)
Matches an SMin with LHS and RHS in either order.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true > m_c_UMax(const LHS &L, const RHS &R)
Matches a UMax with LHS and RHS in either order.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > m_c_UMin(const LHS &L, const RHS &R)
Matches a UMin with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
match_combine_or< MaxMin_match< FCmpInst, LHS, RHS, ofmax_pred_ty >, MaxMin_match< FCmpInst, LHS, RHS, ufmax_pred_ty > > m_OrdOrUnordFMax(const LHS &L, const RHS &R)
Match an 'ordered' or 'unordered' floating point maximum function.
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true > m_c_SMax(const LHS &L, const RHS &R)
Matches an SMax with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
cst_pred_ty< is_nonpositive > m_NonPositive()
Match an integer or vector of non-positive values.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
MatchFunctor< Val, Pattern > match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
static constexpr unsigned RVVBitsPerBlock
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool haveNoCommonBitsSet(const WithCache< const Value * > &LHSCache, const WithCache< const Value * > &RHSCache, const SimplifyQuery &SQ)
Return true if LHS and RHS have no common bits set.
LLVM_ABI bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root, Instruction *OnPathTo, DominatorTree *DT)
Return true if undefined behavior would provable be executed on the path to OnPathTo if Root produced...
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth=0)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
LLVM_ABI bool isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not an infinity or if the floating-point vector val...
LLVM_ABI void computeKnownBitsFromContext(const Value *V, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0)
Merge bits known from context-dependent facts into Known.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
LLVM_ABI const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
LLVM_ABI bool isAssumeLikeIntrinsic(const Instruction *I)
Return true if it is an intrinsic that cannot be speculated but also cannot trap.
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
LLVM_ABI APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isOnlyUsedInZeroComparison(const Instruction *CxtI)
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
LLVM_ABI bool getConstantStringInfo(const Value *V, StringRef &Str, bool TrimAtNul=true)
This function computes the length of a null-terminated C string pointed to by V.
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
LLVM_ABI bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V)
Return true if the only users of this pointer are lifetime markers or droppable instructions.
LLVM_ABI Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
LLVM_ABI Value * stripNullTest(Value *V)
Returns the inner value X if the expression has the form f(X) where f(X) == 0 if and only if X == 0,...
LLVM_ABI bool getUnderlyingObjectsForCodeGen(const Value *V, SmallVectorImpl< Value * > &Objects)
This is a wrapper around getUnderlyingObjects and adds support for basic ptrtoint+arithmetic+inttoptr...
LLVM_ABI std::pair< Intrinsic::ID, bool > canConvertToMinOrMaxIntrinsic(ArrayRef< Value * > VL)
Check if the values in VL are select instructions that can be converted to a min or max (vector) intr...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_ABI bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, const Loop *L)
Return true if this function can prove that the instruction I is executed for every iteration of the ...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
gep_type_iterator gep_type_end(const User *GEP)
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
bool isa_and_nonnull(const Y &Val)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI bool canIgnoreSignBitOfZero(const Use &U)
Return true if the sign bit of the FP value can be ignored by the user when the value is zero.
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHSClass, bool LookThroughSrc=true)
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
LLVM_ABI RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)
This extracts the Knowledge from an element of an operand bundle.
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ, bool IsNSW=false)
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
LLVM_ABI SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm, bool Invert, const SimplifyQuery &Q, unsigned Depth=0)
Adjust Known for the given select Arm to include information from the select Cond.
LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
LLVM_ABI OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
LLVM_ABI bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(const CallBase *Call, bool MustPreserveNullness)
{launder,strip}.invariant.group returns pointer that aliases its argument, and it only captures point...
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI void getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS)
Compute the demanded elements mask of horizontal binary operations.
LLVM_ABI SelectPatternResult getSelectPattern(CmpInst::Predicate Pred, SelectPatternNaNBehavior NaNBehavior=SPNB_NA, bool Ordered=false)
Determine the pattern for predicate X Pred Y ? X : Y.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool matchSimpleBinaryIntrinsicRecurrence(const IntrinsicInst *I, PHINode *&P, Value *&Init, Value *&OtherOp)
Attempt to match a simple value-accumulating recurrence of the form: llvm.intrinsic....
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI bool cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is never equal to -0.0.
LLVM_ABI bool programUndefinedIfUndefOrPoison(const Instruction *Inst)
Return true if this function can prove that if Inst is executed and yields a poison value or undef bi...
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
LLVM_ABI uint64_t GetStringLength(const Value *V, unsigned CharSize=8)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
LLVM_ABI OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
LLVM_ABI bool isKnownInversion(const Value *X, const Value *Y)
Return true iff:
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool intrinsicPropagatesPoison(Intrinsic::ID IID)
Return whether this intrinsic propagates poison for all operands.
LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
LLVM_ABI RetainedKnowledge getKnowledgeValidInContext(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, const Instruction *CtxI, const DominatorTree *DT=nullptr)
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and the know...
LLVM_ABI bool isSafeToSpeculativelyExecuteWithOpcode(unsigned Opcode, const Instruction *Inst, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
This returns the same result as isSafeToSpeculativelyExecute if Opcode is the actual opcode of Inst.
LLVM_ABI bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
LLVM_ABI Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB, const TargetLibraryInfo *TLI)
Map a call instruction to an intrinsic ID.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI const Value * getUnderlyingObjectAggressive(const Value *V)
Like getUnderlyingObject(), but will try harder to find a single underlying object.
LLVM_ABI Intrinsic::ID getMinMaxIntrinsic(SelectPatternFlavor SPF)
Convert given SPF to equivalent min/max intrinsic.
LLVM_ABI SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, FastMathFlags FMF=FastMathFlags(), Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
LLVM_ABI OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)
Combine constant ranges from computeConstantRange() and computeKnownBits().
SelectPatternNaNBehavior
Behavior when a floating point min/max is given one NaN and one non-NaN as input.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
LLVM_ABI KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, const SimplifyQuery &SQ, unsigned Depth=0)
Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
LLVM_ABI OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI bool isKnownNeverInfOrNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point value can never contain a NaN or infinity.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI Value * isBytewiseValue(Value *V, const DataLayout &DL)
If the specified value can be set by repeating the same byte in memory, return the i8 value that it i...
LLVM_ABI std::optional< std::pair< CmpPredicate, Constant * > > getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C)
Convert an integer comparison with a constant RHS into an equivalent form with the strictness flipped...
LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Get the upper bound on bit size for this Value Op as a signed integer.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
LLVM_ABI Value * FindInsertedValue(Value *V, ArrayRef< unsigned > idx_range, std::optional< BasicBlock::iterator > InsertBefore=std::nullopt)
Given an aggregate and an sequence of indices, see if the scalar value indexed is already around as a...
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
LLVM_ABI Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI bool cannotBeOrderedLessThanZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is either NaN or never less than -0....
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI bool mayHaveNonDefUseDependency(const Instruction &I)
Returns true if the result or effects of the given instructions I depend values not reachable through...
LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID)
Identify if the intrinsic is trivially vectorizable.
LLVM_ABI bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI std::optional< bool > computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return false if we can prove that the specified FP value's sign bit is 0.
LLVM_ABI bool canIgnoreSignBitOfNaN(const Use &U)
Return true if the sign bit of the FP value can be ignored by the user when the value is NaN.
LLVM_ABI void findValuesAffectedByCondition(Value *Cond, bool IsAssume, function_ref< void(Value *)> InsertAffected)
Call InsertAffected on all Values whose known bits / value may be affected by the condition Cond.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
static LLVM_ABI bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
This struct is a compact representation of a valid (non-zero power of two) alignment.
SmallPtrSet< Value *, 4 > AffectedValues
Represents offset+length into a ConstantDataArray.
const ConstantDataArray * Array
ConstantDataArray pointer.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
constexpr bool outputsAreZero() const
Return true if output denormals should be flushed to 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
bool isExact(const BinaryOperator *Op) const
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
bool hasNoSignedZeros(const InstT *Op) const
bool hasNoSignedWrap(const InstT *Op) const
bool hasNoUnsignedWrap(const InstT *Op) const
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
static LLVM_ABI KnownBits sadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.sadd.sat(LHS, RHS)
static LLVM_ABI std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
LLVM_ABI KnownBits blsi() const
Compute known bits for X & -X, which has only the lowest bit set of X set.
void makeNonNegative()
Make this value non-negative.
static LLVM_ABI KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static LLVM_ABI KnownBits ssub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.ssub.sat(LHS, RHS)
static LLVM_ABI KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
LLVM_ABI KnownBits blsmsk() const
Compute known bits for X ^ (X - 1), which has all bits up to and including the lowest set bit of X se...
void makeNegative()
Make this value negative.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
bool hasConflict() const
Returns true if there is conflicting information.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
KnownBits reverseBits() const
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
bool isConstant() const
Returns true if we know the value of all bits.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
unsigned countMinTrailingOnes() const
Returns the minimum number of trailing one bits.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static LLVM_ABI KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static LLVM_ABI KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
static LLVM_ABI KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
static LLVM_ABI KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void setAllOnes()
Make all bits known to be one and discard any previous information.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits uadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.uadd.sat(LHS, RHS)
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
KnownBits sextOrTrunc(unsigned BitWidth) const
Return known bits for a sign extension or truncation of the value we're tracking.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
void copysign(const KnownFPClass &Sign)
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a zero.
bool isKnownNeverNegInfinity() const
Return true if it's known this can never be -infinity.
bool isKnownNeverNegSubnormal() const
Return true if it's known this can never be a negative subnormal.
bool isKnownNeverPosZero() const
Return true if it's known this can never be a literal positive zero.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
LLVM_ABI void propagateCanonicalizingSrc(const KnownFPClass &Src, DenormalMode Mode)
Report known classes if Src is evaluated through a potentially canonicalizing operation.
void signBitMustBeZero()
Assume the sign bit is zero.
LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a positive zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const
Return true if it's know this can never be interpreted as a negative zero.
bool isKnownNeverPosSubnormal() const
Return true if it's known this can never be a positive subnormal.
Represent one information held inside an operand bundle of an llvm.assume.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
SimplifyQuery getWithoutCondContext() const
SimplifyQuery getWithInstruction(const Instruction *I) const
const DomConditionCache * DC