109#define DEBUG_TYPE "instcombine"
117 "Number of instruction combining iterations performed");
118STATISTIC(NumOneIteration,
"Number of functions with one iteration");
119STATISTIC(NumTwoIterations,
"Number of functions with two iterations");
120STATISTIC(NumThreeIterations,
"Number of functions with three iterations");
122 "Number of functions with four or more iterations");
126STATISTIC(NumDeadInst ,
"Number of dead inst eliminated");
132 "Controls which instructions are visited");
139 "instcombine-max-sink-users",
cl::init(32),
140 cl::desc(
"Maximum number of undroppable users for instruction sinking"));
144 cl::desc(
"Maximum array size considered when doing a combine"));
156std::optional<Instruction *>
159 if (
II.getCalledFunction()->isTargetIntrinsic()) {
167 bool &KnownBitsComputed) {
169 if (
II.getCalledFunction()->isTargetIntrinsic()) {
171 *
this,
II, DemandedMask, Known, KnownBitsComputed);
182 if (
II.getCalledFunction()->isTargetIntrinsic()) {
184 *
this,
II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
202 auto *Inst = dyn_cast<Instruction>(
GEP);
209 if (Inst && !
GEP->hasOneUse() && !
GEP->hasAllConstantIndices() &&
210 !
GEP->getSourceElementType()->isIntegerTy(8)) {
224bool InstCombinerImpl::isDesirableIntType(
unsigned BitWidth)
const {
243bool InstCombinerImpl::shouldChangeType(
unsigned FromWidth,
244 unsigned ToWidth)
const {
250 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
255 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
260 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
271bool InstCombinerImpl::shouldChangeType(
Type *
From,
Type *To)
const {
277 unsigned FromWidth =
From->getPrimitiveSizeInBits();
279 return shouldChangeType(FromWidth, ToWidth);
288 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
289 if (!OBO || !OBO->hasNoSignedWrap())
292 const APInt *BVal, *CVal;
297 bool Overflow =
false;
298 switch (
I.getOpcode()) {
299 case Instruction::Add:
300 (void)BVal->
sadd_ov(*CVal, Overflow);
302 case Instruction::Sub:
303 (void)BVal->
ssub_ov(*CVal, Overflow);
305 case Instruction::Mul:
306 (void)BVal->
smul_ov(*CVal, Overflow);
316 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
317 return OBO && OBO->hasNoUnsignedWrap();
321 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
322 return OBO && OBO->hasNoSignedWrap();
331 I.clearSubclassOptionalData();
336 I.clearSubclassOptionalData();
337 I.setFastMathFlags(FMF);
346 auto *Cast = dyn_cast<CastInst>(BinOp1->
getOperand(0));
347 if (!Cast || !Cast->hasOneUse())
351 auto CastOpcode = Cast->getOpcode();
352 if (CastOpcode != Instruction::ZExt)
360 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
361 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
387 Cast->dropPoisonGeneratingFlags();
393Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(
Value *Val) {
394 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
397 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
398 Type *CastTy = IntToPtr->getDestTy();
401 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
404 return PtrToInt->getOperand(0);
431 bool Changed =
false;
439 Changed = !
I.swapOperands();
441 if (
I.isCommutative()) {
442 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
452 if (
I.isAssociative()) {
475 I.setHasNoUnsignedWrap(
true);
478 I.setHasNoSignedWrap(
true);
507 if (
I.isAssociative() &&
I.isCommutative()) {
570 if (isa<FPMathOperator>(NewBO)) {
584 I.setHasNoUnsignedWrap(
true);
602 if (LOp == Instruction::And)
603 return ROp == Instruction::Or || ROp == Instruction::Xor;
606 if (LOp == Instruction::Or)
607 return ROp == Instruction::And;
611 if (LOp == Instruction::Mul)
612 return ROp == Instruction::Add || ROp == Instruction::Sub;
635 if (isa<Constant>(V))
649 assert(
Op &&
"Expected a binary operator");
650 LHS =
Op->getOperand(0);
651 RHS =
Op->getOperand(1);
652 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
657 Instruction::Shl, ConstantInt::get(
Op->getType(), 1),
C);
658 assert(
RHS &&
"Constant folding of immediate constants failed");
659 return Instruction::Mul;
664 if (OtherOp && OtherOp->
getOpcode() == Instruction::AShr &&
667 return Instruction::AShr;
670 return Op->getOpcode();
679 assert(
A &&
B &&
C &&
D &&
"All values must be provided");
682 Value *RetVal =
nullptr;
693 if (
A ==
C || (InnerCommutative &&
A ==
D)) {
713 if (
B ==
D || (InnerCommutative &&
B ==
C)) {
736 if (isa<BinaryOperator>(RetVal)) {
739 if (isa<OverflowingBinaryOperator>(&
I)) {
740 HasNSW =
I.hasNoSignedWrap();
741 HasNUW =
I.hasNoUnsignedWrap();
743 if (
auto *LOBO = dyn_cast<OverflowingBinaryOperator>(
LHS)) {
744 HasNSW &= LOBO->hasNoSignedWrap();
745 HasNUW &= LOBO->hasNoUnsignedWrap();
748 if (
auto *ROBO = dyn_cast<OverflowingBinaryOperator>(
RHS)) {
749 HasNSW &= ROBO->hasNoSignedWrap();
750 HasNUW &= ROBO->hasNoUnsignedWrap();
753 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
763 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
766 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
781 unsigned Opc =
I->getOpcode();
782 unsigned ConstIdx = 1;
789 case Instruction::Sub:
792 case Instruction::ICmp:
799 case Instruction::Or:
803 case Instruction::Add:
809 if (!
match(
I->getOperand(1 - ConstIdx),
822 if (Opc == Instruction::ICmp && !cast<ICmpInst>(
I)->isEquality()) {
825 if (!Cmp || !Cmp->isZeroValue())
830 bool Consumes =
false;
834 assert(NotOp !=
nullptr &&
835 "Desync between isFreeToInvert and getFreelyInverted");
844 case Instruction::Sub:
847 case Instruction::Or:
848 case Instruction::Add:
851 case Instruction::ICmp:
887 auto IsValidBinOpc = [](
unsigned Opc) {
891 case Instruction::And:
892 case Instruction::Or:
893 case Instruction::Xor:
894 case Instruction::Add:
903 auto IsCompletelyDistributable = [](
unsigned BinOpc1,
unsigned BinOpc2,
905 assert(ShOpc != Instruction::AShr);
906 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
907 ShOpc == Instruction::Shl;
910 auto GetInvShift = [](
unsigned ShOpc) {
911 assert(ShOpc != Instruction::AShr);
912 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
915 auto CanDistributeBinops = [&](
unsigned BinOpc1,
unsigned BinOpc2,
919 if (BinOpc1 == Instruction::And)
924 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
930 if (BinOpc2 == Instruction::And)
941 auto MatchBinOp = [&](
unsigned ShOpnum) ->
Instruction * {
943 Value *
X, *
Y, *ShiftedX, *Mask, *Shift;
944 if (!
match(
I.getOperand(ShOpnum),
947 if (!
match(
I.getOperand(1 - ShOpnum),
954 auto *IY = dyn_cast<Instruction>(
I.getOperand(ShOpnum));
955 auto *IX = dyn_cast<Instruction>(ShiftedX);
960 unsigned ShOpc = IY->getOpcode();
961 if (ShOpc != IX->getOpcode())
965 auto *BO2 = dyn_cast<Instruction>(
I.getOperand(1 - ShOpnum));
969 unsigned BinOpc = BO2->getOpcode();
971 if (!IsValidBinOpc(
I.getOpcode()) || !IsValidBinOpc(BinOpc))
974 if (ShOpc == Instruction::AShr) {
988 if (BinOpc ==
I.getOpcode() &&
989 IsCompletelyDistributable(
I.getOpcode(), BinOpc, ShOpc)) {
1004 if (!CanDistributeBinops(
I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1018 return MatchBinOp(1);
1036 Value *
A, *CondVal, *TrueVal, *FalseVal;
1039 auto MatchSelectAndCast = [&](
Value *CastOp,
Value *SelectOp) {
1041 A->getType()->getScalarSizeInBits() == 1 &&
1048 if (MatchSelectAndCast(
LHS,
RHS))
1050 else if (MatchSelectAndCast(
RHS,
LHS))
1055 auto NewFoldedConst = [&](
bool IsTrueArm,
Value *V) {
1056 bool IsCastOpRHS = (CastOp ==
RHS);
1057 bool IsZExt = isa<ZExtInst>(CastOp);
1062 }
else if (IsZExt) {
1063 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1076 Value *NewTrueVal = NewFoldedConst(
false, TrueVal);
1078 NewFoldedConst(
true, FalseVal));
1082 Value *NewTrueVal = NewFoldedConst(
true, TrueVal);
1084 NewFoldedConst(
false, FalseVal));
1105 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1225static std::optional<std::pair<Value *, Value *>>
1227 if (
LHS->getParent() !=
RHS->getParent())
1228 return std::nullopt;
1230 if (
LHS->getNumIncomingValues() < 2)
1231 return std::nullopt;
1234 return std::nullopt;
1236 Value *L0 =
LHS->getIncomingValue(0);
1237 Value *R0 =
RHS->getIncomingValue(0);
1239 for (
unsigned I = 1, E =
LHS->getNumIncomingValues();
I != E; ++
I) {
1243 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1246 return std::nullopt;
1249 return std::optional(std::pair(L0, R0));
1252std::optional<std::pair<Value *, Value *>>
1253InstCombinerImpl::matchSymmetricPair(
Value *LHS,
Value *RHS) {
1254 Instruction *LHSInst = dyn_cast<Instruction>(LHS);
1255 Instruction *RHSInst = dyn_cast<Instruction>(RHS);
1257 return std::nullopt;
1259 case Instruction::PHI:
1261 case Instruction::Select: {
1267 return std::pair(TrueVal, FalseVal);
1268 return std::nullopt;
1270 case Instruction::Call: {
1274 if (LHSMinMax && RHSMinMax &&
1281 return std::pair(LHSMinMax->
getLHS(), LHSMinMax->
getRHS());
1282 return std::nullopt;
1285 return std::nullopt;
1295 if (!LHSIsSelect && !RHSIsSelect)
1300 if (isa<FPMathOperator>(&
I)) {
1301 FMF =
I.getFastMathFlags();
1308 Value *
Cond, *True =
nullptr, *False =
nullptr;
1316 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1331 if (LHSIsSelect && RHSIsSelect &&
A ==
D) {
1340 else if (True && !False)
1348 if (
Value *NewSel = foldAddNegate(
B,
C,
RHS))
1355 if (
Value *NewSel = foldAddNegate(E,
F,
LHS))
1359 if (!True || !False)
1370 assert(!isa<Constant>(
I) &&
"Shouldn't invert users of constant");
1372 if (U == IgnoredUser)
1374 switch (cast<Instruction>(U)->
getOpcode()) {
1375 case Instruction::Select: {
1376 auto *SI = cast<SelectInst>(U);
1378 SI->swapProfMetadata();
1381 case Instruction::Br: {
1388 case Instruction::Xor:
1395 "canFreelyInvertAllUsersOf() ?");
1402Value *InstCombinerImpl::dyn_castNegVal(
Value *V)
const {
1412 if (
C->getType()->getElementType()->isIntegerTy())
1416 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1421 if (isa<UndefValue>(Elt))
1424 if (!isa<ConstantInt>(Elt))
1431 if (
auto *CV = dyn_cast<Constant>(V))
1432 if (CV->getType()->isVectorTy() &&
1433 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1446Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1447 BinaryOperator &BO,
bool OpsFromSigned, std::array<Value *, 2> IntOps,
1451 Type *IntTy = IntOps[0]->getType();
1456 unsigned MaxRepresentableBits =
1461 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1465 auto IsNonZero = [&](
unsigned OpNo) ->
bool {
1466 if (OpsKnown[OpNo].hasKnownBits() &&
1467 OpsKnown[OpNo].getKnownBits(
SQ).isNonZero())
1472 auto IsNonNeg = [&](
unsigned OpNo) ->
bool {
1476 return OpsKnown[OpNo].getKnownBits(
SQ).isNonNegative();
1480 auto IsValidPromotion = [&](
unsigned OpNo) ->
bool {
1482 if (OpsFromSigned != isa<SIToFPInst>(BO.
getOperand(OpNo)) &&
1491 if (MaxRepresentableBits < IntSz) {
1501 NumUsedLeadingBits[OpNo] =
1502 IntSz - OpsKnown[OpNo].getKnownBits(
SQ).countMinLeadingZeros();
1510 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1513 return !OpsFromSigned || BO.
getOpcode() != Instruction::FMul ||
1518 if (Op1FpC !=
nullptr) {
1520 if (OpsFromSigned && BO.
getOpcode() == Instruction::FMul &&
1525 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1527 if (Op1IntC ==
nullptr)
1530 : Instruction::UIToFP,
1531 Op1IntC, FPTy,
DL) != Op1FpC)
1535 IntOps[1] = Op1IntC;
1539 if (IntTy != IntOps[1]->
getType())
1542 if (Op1FpC ==
nullptr) {
1543 if (!IsValidPromotion(1))
1546 if (!IsValidPromotion(0))
1552 bool NeedsOverflowCheck =
true;
1555 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1556 unsigned OverflowMaxCurBits =
1557 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1558 bool OutputSigned = OpsFromSigned;
1560 case Instruction::FAdd:
1561 IntOpc = Instruction::Add;
1562 OverflowMaxOutputBits += OverflowMaxCurBits;
1564 case Instruction::FSub:
1565 IntOpc = Instruction::Sub;
1566 OverflowMaxOutputBits += OverflowMaxCurBits;
1568 case Instruction::FMul:
1569 IntOpc = Instruction::Mul;
1570 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1576 if (OverflowMaxOutputBits < IntSz) {
1577 NeedsOverflowCheck =
false;
1580 if (IntOpc == Instruction::Sub)
1581 OutputSigned =
true;
1587 if (NeedsOverflowCheck &&
1588 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1592 if (
auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1593 IntBO->setHasNoSignedWrap(OutputSigned);
1594 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1607 std::array<Value *, 2> IntOps = {
nullptr,
nullptr};
1627 if (
Instruction *R = foldFBinOpOfIntCastsFromSign(BO,
false,
1628 IntOps, Op1FpC, OpsKnown))
1630 return foldFBinOpOfIntCastsFromSign(BO,
true, IntOps,
1646 !
X->getType()->isIntOrIntVectorTy(1))
1663 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1664 }
else if (
match(SI->getCondition(),
1689 bool FoldWithMultiUse) {
1691 if (!SI->hasOneUse() && !FoldWithMultiUse)
1694 Value *TV = SI->getTrueValue();
1695 Value *FV = SI->getFalseValue();
1698 if (SI->getType()->isIntOrIntVectorTy(1))
1708 if (
auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1709 if (CI->hasOneUse()) {
1710 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1711 if ((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1))
1720 if (!NewTV && !NewFV)
1757 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&
I);
1772 bool AllowMultipleUses) {
1774 if (NumPHIValues == 0)
1781 bool IdenticalUsers =
false;
1782 if (!AllowMultipleUses && !OneUse) {
1786 if (UI != &
I && !
I.isIdenticalTo(UI))
1790 IdenticalUsers =
true;
1799 auto *
I = dyn_cast<Instruction>(
Op);
1804 if (isa<PHINode>(
I))
1820 bool SeenNonSimplifiedInVal =
false;
1821 for (
unsigned i = 0; i != NumPHIValues; ++i) {
1832 auto WillFold = [&]() {
1837 const APInt *Ignored;
1838 if (isa<CmpIntrinsic>(InVal) &&
1843 if (isa<ZExtInst>(InVal) &&
1844 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1858 if (!OneUse && !IdenticalUsers)
1861 if (SeenNonSimplifiedInVal)
1863 SeenNonSimplifiedInVal =
true;
1879 if (isa<InvokeInst>(InVal))
1880 if (cast<Instruction>(InVal)->
getParent() == InBB)
1893 for (
auto OpIndex : OpsToMoveUseToIncomingBB) {
1904 U = U->DoPHITranslation(PN->
getParent(), OpBB);
1907 Clones.
insert({OpBB, Clone});
1910 NewPhiValues[
OpIndex] = Clone;
1919 for (
unsigned i = 0; i != NumPHIValues; ++i)
1922 if (IdenticalUsers) {
1935 const_cast<PHINode &
>(*NewPN),
1945 auto *Phi0 = dyn_cast<PHINode>(BO.
getOperand(0));
1946 auto *Phi1 = dyn_cast<PHINode>(BO.
getOperand(1));
1947 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1948 Phi0->getNumOperands() != Phi1->getNumOperands())
1952 if (BO.
getParent() != Phi0->getParent() ||
1969 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &>
T) {
1970 auto &Phi0Use = std::get<0>(
T);
1971 auto &Phi1Use = std::get<1>(
T);
1972 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
1974 Value *Phi0UseV = Phi0Use.get();
1975 Value *Phi1UseV = Phi1Use.get();
1978 else if (Phi1UseV ==
C)
1985 if (
all_of(
zip(Phi0->operands(), Phi1->operands()),
1986 CanFoldIncomingValuePair)) {
1989 assert(NewIncomingValues.
size() == Phi0->getNumOperands() &&
1990 "The number of collected incoming values should equal the number "
1991 "of the original PHINode operands!");
1992 for (
unsigned I = 0;
I < Phi0->getNumOperands();
I++)
1993 NewPhi->
addIncoming(NewIncomingValues[
I], Phi0->getIncomingBlock(
I));
1998 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
2005 ConstBB = Phi0->getIncomingBlock(0);
2006 OtherBB = Phi0->getIncomingBlock(1);
2008 ConstBB = Phi0->getIncomingBlock(1);
2009 OtherBB = Phi0->getIncomingBlock(0);
2019 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->
getTerminator());
2020 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2027 for (
auto BBIter = BO.
getParent()->begin(); &*BBIter != &BO; ++BBIter)
2040 Phi0->getIncomingValueForBlock(OtherBB),
2041 Phi1->getIncomingValueForBlock(OtherBB));
2042 if (
auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2043 NotFoldedNewBO->copyIRFlags(&BO);
2053 if (!isa<Constant>(
I.getOperand(1)))
2056 if (
auto *Sel = dyn_cast<SelectInst>(
I.getOperand(0))) {
2059 }
else if (
auto *PN = dyn_cast<PHINode>(
I.getOperand(0))) {
2070 if (
GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2077 if (!isa<VectorType>(Inst.
getType()))
2083 cast<VectorType>(Inst.
getType())->getElementCount());
2085 cast<VectorType>(Inst.
getType())->getElementCount());
2090 Value *L0, *L1, *R0, *R1;
2095 cast<ShuffleVectorInst>(
LHS)->isConcat() &&
2096 cast<ShuffleVectorInst>(
RHS)->isConcat()) {
2103 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2106 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2113 if (
auto *BO = dyn_cast<BinaryOperator>(V))
2117 M, Intrinsic::vector_reverse, V->getType());
2130 return createBinOpReverse(V1, V2);
2134 return createBinOpReverse(V1,
RHS);
2138 return createBinOpReverse(
LHS, V2);
2148 if (
auto *BO = dyn_cast<BinaryOperator>(XY))
2157 V1->
getType() == V2->getType() &&
2160 return createBinOpShuffle(V1, V2, Mask);
2169 auto *LShuf = cast<ShuffleVectorInst>(
LHS);
2170 auto *RShuf = cast<ShuffleVectorInst>(
RHS);
2175 if (LShuf->isSelect() &&
2177 RShuf->isSelect() &&
2195 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.
getType());
2200 cast<FixedVectorType>(V1->
getType())->getNumElements() <=
2201 InstVTy->getNumElements()) {
2203 "Shuffle should not change scalar type");
2210 bool ConstOp1 = isa<Constant>(
RHS);
2212 unsigned SrcVecNumElts =
2213 cast<FixedVectorType>(V1->
getType())->getNumElements();
2216 bool MayChange =
true;
2217 unsigned NumElts = InstVTy->getNumElements();
2218 for (
unsigned I = 0;
I < NumElts; ++
I) {
2220 if (ShMask[
I] >= 0) {
2221 assert(ShMask[
I] < (
int)NumElts &&
"Not expecting narrowing shuffle");
2229 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2230 I >= SrcVecNumElts) {
2234 NewVecC[ShMask[
I]] = CElt;
2245 if (
I >= SrcVecNumElts || ShMask[
I] < 0) {
2250 if (!MaybePoison || !isa<PoisonValue>(MaybePoison)) {
2267 Value *NewLHS = ConstOp1 ? V1 : NewC;
2268 Value *NewRHS = ConstOp1 ? NewC : V1;
2269 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2276 if (isa<ShuffleVectorInst>(
RHS))
2309 if (isa<FPMathOperator>(R)) {
2310 R->copyFastMathFlags(&Inst);
2313 if (
auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2314 NewInstBO->copyIRFlags(R);
2343 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2344 (Op0->
hasOneUse() || Op1->hasOneUse()))) {
2362 if (!willNotOverflow(BO.
getOpcode(),
X,
Y, BO, IsSext))
2368 if (
auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2370 NewBinOp->setHasNoSignedWrap();
2372 NewBinOp->setHasNoUnsignedWrap();
2388 if (!
GEP.hasAllConstantIndices())
2404 Type *Ty =
GEP.getSourceElementType();
2406 Value *NewFalseC = Builder.
CreateGEP(Ty, FalseC, IndexC,
"", NW);
2416 if (
GEP.getNumIndices() != 1)
2425 Type *PtrTy = Src->getType()->getScalarType();
2426 unsigned IndexSizeInBits =
DL.getIndexTypeSizeInBits(PtrTy);
2433 if (isa<ScalableVectorType>(
BaseType))
2437 if (NewOffset.
isZero() ||
2438 (Src->hasOneUse() &&
GEP.getOperand(1)->hasOneUse())) {
2459 Type *PtrTy = Src->getType()->getScalarType();
2460 if (
GEP.hasAllConstantIndices() &&
2461 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2465 bool IsFirstType =
true;
2466 unsigned NumVarIndices = 0;
2467 for (
auto Pair :
enumerate(Src->indices())) {
2468 if (!isa<ConstantInt>(Pair.value())) {
2470 IsFirstType =
false;
2471 NumVarIndices = Pair.index() + 1;
2478 if (NumVarIndices != Src->getNumIndices()) {
2498 if (!
Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero()))
2504 Src->getNumIndices() - NumVarIndices));
2511 if (
Idx.isNonNegative() != ConstIndices[0].isNonNegative())
2513 if (!
Idx.isNonNegative())
2522 if (Src->getResultElementType() !=
GEP.getSourceElementType())
2528 bool EndsWithSequential =
false;
2531 EndsWithSequential =
I.isSequential();
2534 if (EndsWithSequential) {
2537 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2554 Indices.
append(Src->op_begin()+1, Src->op_end()-1);
2557 }
else if (isa<Constant>(*
GEP.idx_begin()) &&
2558 cast<Constant>(*
GEP.idx_begin())->isNullValue() &&
2559 Src->getNumOperands() != 1) {
2561 Indices.
append(Src->op_begin()+1, Src->op_end());
2565 if (!Indices.
empty())
2568 Src->getSourceElementType(), Src->getOperand(0), Indices,
"",
2576 bool &DoesConsume,
unsigned Depth) {
2577 static Value *
const NonNull =
reinterpret_cast<Value *
>(uintptr_t(1));
2595 if (!WillInvertAllUses)
2600 if (
auto *
I = dyn_cast<CmpInst>(V)) {
2611 DoesConsume,
Depth))
2614 DoesConsume,
Depth))
2623 DoesConsume,
Depth))
2626 DoesConsume,
Depth))
2635 DoesConsume,
Depth))
2644 DoesConsume,
Depth))
2656 bool LocalDoesConsume = DoesConsume;
2658 LocalDoesConsume,
Depth))
2661 LocalDoesConsume,
Depth)) {
2662 DoesConsume = LocalDoesConsume;
2665 DoesConsume,
Depth);
2666 assert(NotB !=
nullptr &&
2667 "Unable to build inverted value for known freely invertable op");
2668 if (
auto *
II = dyn_cast<IntrinsicInst>(V))
2677 if (
PHINode *PN = dyn_cast<PHINode>(V)) {
2678 bool LocalDoesConsume = DoesConsume;
2680 for (
Use &U : PN->operands()) {
2681 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2685 if (NewIncomingVal ==
nullptr)
2688 if (NewIncomingVal == V)
2691 IncomingValues.
emplace_back(NewIncomingVal, IncomingBlock);
2694 DoesConsume = LocalDoesConsume;
2700 for (
auto [Val, Pred] : IncomingValues)
2709 DoesConsume,
Depth))
2716 DoesConsume,
Depth))
2725 bool IsLogical,
Value *
A,
2727 bool LocalDoesConsume = DoesConsume;
2729 LocalDoesConsume,
Depth))
2732 LocalDoesConsume,
Depth)) {
2734 LocalDoesConsume,
Depth);
2735 DoesConsume = LocalDoesConsume;
2745 return TryInvertAndOrUsingDeMorgan(Instruction::And,
false,
A,
2749 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
false,
A,
2753 return TryInvertAndOrUsingDeMorgan(Instruction::And,
true,
A,
2757 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
true,
A,
2766 Type *GEPEltType =
GEP.getSourceElementType();
2777 if (
GEP.getNumIndices() == 1 &&
2785 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
2786 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
2789 return match(V, m_APInt(C)) && !C->isZero();
2795 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->
getOperand(0));
2812 auto *Op2 = dyn_cast<GetElementPtrInst>(*
I);
2813 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2814 Op1->getSourceElementType() != Op2->getSourceElementType())
2822 Type *CurTy =
nullptr;
2824 for (
unsigned J = 0,
F = Op1->getNumOperands(); J !=
F; ++J) {
2825 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2828 if (Op1->getOperand(J) != Op2->getOperand(J)) {
2837 assert(CurTy &&
"No current type?");
2857 CurTy = Op1->getSourceElementType();
2865 NW &= Op2->getNoWrapFlags();
2874 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2875 NewGEP->setNoWrapFlags(NW);
2888 NewPN = Builder.
CreatePHI(Op1->getOperand(DI)->getType(),
2893 NewPN->
addIncoming(cast<GEPOperator>(
I)->getOperand(DI),
2896 NewGEP->setOperand(DI, NewPN);
2899 NewGEP->insertBefore(*
GEP.getParent(),
GEP.getParent()->getFirstInsertionPt());
2906 Type *GEPType =
GEP.getType();
2907 Type *GEPEltType =
GEP.getSourceElementType();
2916 if (
auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2917 auto VWidth = GEPFVTy->getNumElements();
2918 APInt PoisonElts(VWidth, 0);
2934 bool MadeChange =
false;
2938 Type *NewScalarIndexTy =
2948 Type *IndexTy = (*I)->getType();
2949 Type *NewIndexType =
2952 cast<VectorType>(IndexTy)->getElementCount())
2964 if (IndexTy != NewIndexType) {
2976 if (!GEPEltType->
isIntegerTy(8) &&
GEP.hasAllConstantIndices()) {
2981 GEP.getNoWrapFlags()));
2992 if (
auto *PN = dyn_cast<PHINode>(PtrOp)) {
2997 if (
auto *Src = dyn_cast<GEPOperator>(PtrOp))
3001 if (
GEP.getNumIndices() == 1) {
3002 unsigned AS =
GEP.getPointerAddressSpace();
3003 if (
GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3007 if (TyAllocSize == 1) {
3016 GEPType ==
Y->getType()) {
3017 bool HasSameUnderlyingObject =
3019 bool Changed =
false;
3020 GEP.replaceUsesWithIf(
Y, [&](
Use &U) {
3021 bool ShouldReplace = HasSameUnderlyingObject ||
3022 isa<ICmpInst>(U.getUser()) ||
3023 isa<PtrToIntInst>(U.getUser());
3024 Changed |= ShouldReplace;
3025 return ShouldReplace;
3027 return Changed ? &
GEP :
nullptr;
3029 }
else if (
auto *ExactIns =
3030 dyn_cast<PossiblyExactOperator>(
GEP.getOperand(1))) {
3033 if (ExactIns->isExact()) {
3041 GEP.getPointerOperand(), V,
3042 GEP.getNoWrapFlags());
3045 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3051 std::optional<APInt> NewC;
3071 if (NewC.has_value()) {
3074 ConstantInt::get(V->getType(), *NewC));
3075 cast<BinaryOperator>(NewOp)->setIsExact();
3077 GEP.getPointerOperand(), NewOp,
3078 GEP.getNoWrapFlags());
3088 if (
GEP.getNumIndices() == 1) {
3091 auto CanPreserveInBounds = [&](
bool AddIsNSW,
Value *Idx1,
Value *Idx2) {
3106 bool IsInBounds = CanPreserveInBounds(
3107 cast<OverflowingBinaryOperator>(
GEP.getOperand(1))->hasNoSignedWrap(),
3111 Idx1,
"", IsInBounds);
3125 bool IsInBounds = CanPreserveInBounds(
3128 GEP.getSourceElementType(),
GEP.getPointerOperand(),
3139 if (!
GEP.isInBounds()) {
3142 APInt BasePtrOffset(IdxWidth, 0);
3143 Value *UnderlyingPtrOp =
3146 bool CanBeNull, CanBeFreed;
3148 DL, CanBeNull, CanBeFreed);
3149 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3150 if (
GEP.accumulateConstantOffset(
DL, BasePtrOffset) &&
3152 APInt AllocSize(IdxWidth, DerefBytes);
3153 if (BasePtrOffset.
ule(AllocSize)) {
3155 GEP.getSourceElementType(), PtrOp, Indices,
GEP.getName());
3162 if (
GEP.hasNoUnsignedSignedWrap() && !
GEP.hasNoUnsignedWrap() &&
3164 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3178 if (isa<ConstantPointerNull>(V))
3180 if (
auto *LI = dyn_cast<LoadInst>(V))
3181 return isa<GlobalVariable>(LI->getPointerOperand());
3205 return Dest && Dest->Ptr == UsedV;
3219 switch (
I->getOpcode()) {
3224 case Instruction::AddrSpaceCast:
3225 case Instruction::BitCast:
3226 case Instruction::GetElementPtr:
3231 case Instruction::ICmp: {
3238 unsigned OtherIndex = (ICI->
getOperand(0) == PI) ? 1 : 0;
3245 auto AlignmentAndSizeKnownValid = [](
CallBase *CB) {
3249 const APInt *Alignment;
3251 return match(CB->getArgOperand(0),
m_APInt(Alignment)) &&
3255 auto *CB = dyn_cast<CallBase>(AI);
3257 if (CB && TLI.
getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3258 TLI.
has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3259 !AlignmentAndSizeKnownValid(CB))
3265 case Instruction::Call:
3268 switch (
II->getIntrinsicID()) {
3272 case Intrinsic::memmove:
3273 case Intrinsic::memcpy:
3274 case Intrinsic::memset: {
3276 if (
MI->isVolatile() ||
MI->getRawDest() != PI)
3280 case Intrinsic::assume:
3281 case Intrinsic::invariant_start:
3282 case Intrinsic::invariant_end:
3283 case Intrinsic::lifetime_start:
3284 case Intrinsic::lifetime_end:
3285 case Intrinsic::objectsize:
3288 case Intrinsic::launder_invariant_group:
3289 case Intrinsic::strip_invariant_group:
3318 case Instruction::Store: {
3320 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3328 }
while (!Worklist.
empty());
3351 std::unique_ptr<DIBuilder> DIB;
3352 if (isa<AllocaInst>(
MI)) {
3358 for (
unsigned i = 0, e =
Users.size(); i != e; ++i) {
3367 if (
II->getIntrinsicID() == Intrinsic::objectsize) {
3370 II,
DL, &
TLI,
AA,
true, &InsertedInstructions);
3371 for (
Instruction *Inserted : InsertedInstructions)
3379 for (
unsigned i = 0, e =
Users.size(); i != e; ++i) {
3388 C->isFalseWhenEqual()));
3389 }
else if (
auto *SI = dyn_cast<StoreInst>(
I)) {
3390 for (
auto *DVI : DVIs)
3391 if (DVI->isAddressOfVariable())
3393 for (
auto *DVR : DVRs)
3394 if (DVR->isAddressOfVariable())
3437 for (
auto *DVI : DVIs)
3438 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
3439 DVI->eraseFromParent();
3440 for (
auto *DVR : DVRs)
3441 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3442 DVR->eraseFromParent();
3488 if (FreeInstrBB->
size() != 2) {
3490 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3492 auto *Cast = dyn_cast<CastInst>(&Inst);
3493 if (!Cast || !Cast->isNoopCast(
DL))
3514 "Broken CFG: missing edge from predecessor to successor");
3519 if (&Instr == FreeInstrBBTerminator)
3524 "Only the branch instruction should remain");
3535 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0, Attribute::NonNull);
3536 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3537 if (Dereferenceable.
isValid()) {
3539 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0,
3540 Attribute::Dereferenceable);
3541 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.
getContext(), 0, Bytes);
3550 if (isa<UndefValue>(
Op)) {
3558 if (isa<ConstantPointerNull>(
Op))
3594 FPClassTest ReturnClass =
F->getAttributes().getRetNoFPClass();
3595 if (ReturnClass ==
fcNone)
3612 bool Changed =
false;
3613 while (
Instruction *Prev =
I.getPrevNonDebugInstruction()) {
3618 if (Prev->isEHPad())
3649 return BBI->isDebugOrPseudoInst() ||
3650 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
3655 if (BBI != FirstInstr)
3657 }
while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
3659 return dyn_cast<StoreInst>(BBI);
3671 if (!
DeadEdges.insert({From, To}).second)
3676 for (
Use &U : PN.incoming_values())
3677 if (PN.getIncomingBlock(U) ==
From && !isa<PoisonValue>(U)) {
3693 std::next(
I->getReverseIterator())))) {
3694 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
3698 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
3701 Inst.dropDbgRecords();
3709 for (
Value *V : Changed)
3736 if (Succ == LiveSucc)
3764 if (isa<SelectInst>(
Cond) &&
3785 auto *Cmp = cast<CmpInst>(
Cond);
3794 if (isa<UndefValue>(
Cond)) {
3798 if (
auto *CI = dyn_cast<ConstantInt>(
Cond)) {
3833 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
3834 auto *
C = dyn_cast<ConstantInt>(
Select->getOperand(CstOpIdx));
3838 BasicBlock *CstBB = SI.findCaseValue(
C)->getCaseSuccessor();
3839 if (CstBB != SI.getDefaultDest())
3852 for (
auto Case : SI.cases())
3853 if (!CR.
contains(Case.getCaseValue()->getValue()))
3865 for (
auto Case : SI.cases()) {
3867 assert(isa<ConstantInt>(NewCase) &&
3868 "Result of expression should be constant");
3869 Case.setValue(cast<ConstantInt>(NewCase));
3877 for (
auto Case : SI.cases()) {
3879 assert(isa<ConstantInt>(NewCase) &&
3880 "Result of expression should be constant");
3881 Case.setValue(cast<ConstantInt>(NewCase));
3889 all_of(SI.cases(), [&](
const auto &Case) {
3890 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
3896 Value *NewCond = Op0;
3903 for (
auto Case : SI.cases()) {
3904 const APInt &CaseVal = Case.getCaseValue()->getValue();
3906 : CaseVal.
lshr(ShiftAmt);
3907 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
3915 bool IsZExt = isa<ZExtInst>(
Cond);
3919 if (
all_of(SI.cases(), [&](
const auto &Case) {
3920 const APInt &CaseVal = Case.getCaseValue()->getValue();
3921 return IsZExt ? CaseVal.isIntN(NewWidth)
3922 : CaseVal.isSignedIntN(NewWidth);
3924 for (
auto &Case : SI.cases()) {
3925 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
3926 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3933 if (
auto *
Select = dyn_cast<SelectInst>(
Cond)) {
3948 for (
const auto &
C : SI.cases()) {
3950 std::min(LeadingKnownZeros,
C.getCaseValue()->getValue().countl_zero());
3952 std::min(LeadingKnownOnes,
C.getCaseValue()->getValue().countl_one());
3955 unsigned NewWidth = Known.
getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
3961 if (NewWidth > 0 && NewWidth < Known.
getBitWidth() &&
3962 shouldChangeType(Known.
getBitWidth(), NewWidth)) {
3967 for (
auto Case : SI.cases()) {
3968 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
3969 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3974 if (isa<UndefValue>(
Cond)) {
3978 if (
auto *CI = dyn_cast<ConstantInt>(
Cond)) {
3980 SI.findCaseValue(CI)->getCaseSuccessor());
3994 const APInt *
C =
nullptr;
3996 if (*EV.
idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
3997 OvID == Intrinsic::umul_with_overflow)) {
4002 if (
C->isPowerOf2()) {
4003 return BinaryOperator::CreateShl(
4005 ConstantInt::get(WO->getLHS()->getType(),
C->logBase2()));
4013 if (!WO->hasOneUse())
4027 assert(*EV.
idx_begin() == 1 &&
"Unexpected extract index for overflow inst");
4030 if (OvID == Intrinsic::usub_with_overflow)
4035 if (OvID == Intrinsic::smul_with_overflow &&
4036 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4037 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4040 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4041 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4046 ConstantInt::get(WO->getLHS()->getType(),
4057 WO->getBinaryOp(), *
C, WO->getNoWrapKind());
4062 auto *OpTy = WO->getRHS()->getType();
4063 auto *NewLHS = WO->getLHS();
4067 ConstantInt::get(OpTy, NewRHSC));
4084 const APFloat *ConstVal =
nullptr;
4085 Value *VarOp =
nullptr;
4086 bool ConstIsTrue =
false;
4093 ConstIsTrue =
false;
4109 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4112 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4113 ConstIsTrue ? NewEV : ConstantMantissa,
SelectInst,
"select.frexp");
4127 if (
match(&EV, m_ExtractValue<0>(m_Intrinsic<Intrinsic::frexp>(
m_Select(
4130 cast<SelectInst>(cast<IntrinsicInst>(Agg)->getArgOperand(0));
4137 const unsigned *exti, *exte, *insi, *inse;
4138 for (exti = EV.
idx_begin(), insi =
IV->idx_begin(),
4139 exte = EV.
idx_end(), inse =
IV->idx_end();
4140 exti != exte && insi != inse;
4154 if (exti == exte && insi == inse)
4187 if (
Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4190 if (
LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4192 if (
auto *STy = dyn_cast<StructType>(Agg->
getType());
4193 STy && STy->isScalableTy())
4201 if (L->isSimple() && L->hasOneUse()) {
4213 L->getPointerOperand(), Indices);
4224 if (
auto *PN = dyn_cast<PHINode>(Agg))
4230 if (
auto *SI = dyn_cast<SelectInst>(Agg))
4247 switch (Personality) {
4277 cast<ArrayType>(
LHS->
getType())->getNumElements()
4279 cast<ArrayType>(
RHS->
getType())->getNumElements();
4291 bool MakeNewInstruction =
false;
4297 bool isLastClause = i + 1 == e;
4305 if (AlreadyCaught.
insert(TypeInfo).second) {
4310 MakeNewInstruction =
true;
4317 MakeNewInstruction =
true;
4318 CleanupFlag =
false;
4337 if (!NumTypeInfos) {
4340 MakeNewInstruction =
true;
4341 CleanupFlag =
false;
4345 bool MakeNewFilter =
false;
4347 if (isa<ConstantAggregateZero>(FilterClause)) {
4349 assert(NumTypeInfos > 0 &&
"Should have handled empty filter already!");
4355 MakeNewInstruction =
true;
4362 if (NumTypeInfos > 1)
4363 MakeNewFilter =
true;
4367 NewFilterElts.
reserve(NumTypeInfos);
4372 bool SawCatchAll =
false;
4373 for (
unsigned j = 0; j != NumTypeInfos; ++j) {
4401 if (SeenInFilter.
insert(TypeInfo).second)
4402 NewFilterElts.
push_back(cast<Constant>(Elt));
4407 MakeNewInstruction =
true;
4412 if (NewFilterElts.
size() < NumTypeInfos)
4413 MakeNewFilter =
true;
4415 if (MakeNewFilter) {
4417 NewFilterElts.
size());
4419 MakeNewInstruction =
true;
4428 if (MakeNewFilter && !NewFilterElts.
size()) {
4429 assert(MakeNewInstruction &&
"New filter but not a new instruction!");
4430 CleanupFlag =
false;
4441 for (
unsigned i = 0, e = NewClauses.
size(); i + 1 < e; ) {
4444 for (j = i; j != e; ++j)
4445 if (!isa<ArrayType>(NewClauses[j]->
getType()))
4451 for (
unsigned k = i; k + 1 < j; ++k)
4455 std::stable_sort(NewClauses.
begin() + i, NewClauses.
begin() + j,
4457 MakeNewInstruction =
true;
4476 for (
unsigned i = 0; i + 1 < NewClauses.
size(); ++i) {
4486 for (
unsigned j = NewClauses.
size() - 1; j != i; --j) {
4487 Value *LFilter = NewClauses[j];
4498 NewClauses.
erase(J);
4499 MakeNewInstruction =
true;
4509 if (isa<ConstantAggregateZero>(LFilter)) {
4512 if (isa<ConstantAggregateZero>(
Filter)) {
4513 assert(FElts <= LElts &&
"Should have handled this case earlier!");
4515 NewClauses.
erase(J);
4516 MakeNewInstruction =
true;
4522 if (isa<ConstantAggregateZero>(
Filter)) {
4525 assert(FElts > 0 &&
"Should have eliminated the empty filter earlier!");
4526 for (
unsigned l = 0; l != LElts; ++l)
4529 NewClauses.
erase(J);
4530 MakeNewInstruction =
true;
4541 bool AllFound =
true;
4542 for (
unsigned f = 0; f != FElts; ++f) {
4545 for (
unsigned l = 0; l != LElts; ++l) {
4547 if (LTypeInfo == FTypeInfo) {
4557 NewClauses.
erase(J);
4558 MakeNewInstruction =
true;
4566 if (MakeNewInstruction) {
4574 if (NewClauses.empty())
4583 assert(!CleanupFlag &&
"Adding a cleanup, not removing one?!");
4608 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
4613 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
4627 Use *MaybePoisonOperand =
nullptr;
4628 for (
Use &U : OrigOpInst->operands()) {
4629 if (isa<MetadataAsValue>(U.get()) ||
4632 if (!MaybePoisonOperand)
4633 MaybePoisonOperand = &U;
4638 OrigOpInst->dropPoisonGeneratingAnnotations();
4641 if (!MaybePoisonOperand)
4646 MaybePoisonOperand->get(), MaybePoisonOperand->get()->
getName() +
".fr");
4648 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
4659 Use *StartU =
nullptr;
4677 Value *StartV = StartU->get();
4689 if (!Visited.
insert(V).second)
4692 if (Visited.
size() > 32)
4709 I->dropPoisonGeneratingAnnotations();
4711 if (StartNeedsFreeze) {
4723 if (isa<Constant>(
Op) ||
Op->hasOneUse())
4732 if (isa<Argument>(
Op)) {
4736 auto MoveBeforeOpt = cast<Instruction>(
Op)->getInsertionPointAfterDef();
4739 MoveBefore = *MoveBeforeOpt;
4743 if (isa<DbgInfoIntrinsic>(MoveBefore))
4744 MoveBefore = MoveBefore->getNextNonDebugInstruction()->getIterator();
4747 MoveBefore.setHeadBit(
false);
4749 bool Changed =
false;
4750 if (&FI != &*MoveBefore) {
4751 FI.
moveBefore(*MoveBefore->getParent(), MoveBefore);
4755 Op->replaceUsesWithIf(&FI, [&](
Use &U) ->
bool {
4757 Changed |= Dominates;
4766 for (
auto *U : V->users()) {
4767 if (isa<ShuffleVectorInst>(U))
4776 Value *Op0 =
I.getOperand(0);
4782 if (
auto *PN = dyn_cast<PHINode>(Op0)) {
4805 auto getUndefReplacement = [&
I](
Type *Ty) {
4808 for (
const auto *U :
I.users()) {
4817 else if (BestValue !=
C)
4818 BestValue = NullValue;
4820 assert(BestValue &&
"Must have at least one use");
4835 Constant *ReplaceC = getUndefReplacement(
I.getType()->getScalarType());
4850 auto *CB = dyn_cast<CallBase>(
I);
4869 for (
const User *U :
I.users()) {
4870 if (Visited.
insert(U).second)
4875 while (!AllocaUsers.
empty()) {
4876 auto *UserI = cast<Instruction>(AllocaUsers.
pop_back_val());
4877 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
4898 if (isa<PHINode>(
I) ||
I->isEHPad() ||
I->mayThrow() || !
I->willReturn() ||
4906 if (isa<AllocaInst>(
I))
4914 if (
auto *CI = dyn_cast<CallInst>(
I)) {
4915 if (CI->isConvergent())
4921 if (
I->mayWriteToMemory()) {
4928 if (
I->mayReadFromMemory() &&
4929 !
I->hasMetadata(LLVMContext::MD_invariant_load)) {
4936 E =
I->getParent()->end();
4938 if (Scan->mayWriteToMemory())
4942 I->dropDroppableUses([&](
const Use *U) {
4943 auto *
I = dyn_cast<Instruction>(U->getUser());
4944 if (
I &&
I->getParent() != DestBlock) {
4954 I->moveBefore(*DestBlock, InsertPos);
4965 if (!DbgUsers.
empty())
4967 if (!DbgVariableRecords.
empty())
4969 DbgVariableRecords);
4989 for (
auto &DbgUser : DbgUsers)
4990 if (DbgUser->getParent() != DestBlock)
4997 if (DVI->getParent() == SrcBlock)
5000 [](
auto *
A,
auto *
B) {
return B->comesBefore(
A); });
5004 for (
auto *
User : DbgUsersToSink) {
5009 if (isa<DbgDeclareInst>(
User))
5014 User->getDebugLoc()->getInlinedAt());
5016 if (!SunkVariables.
insert(DbgUserVariable).second)
5021 if (isa<DbgAssignIntrinsic>(
User))
5024 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(
User->clone()));
5025 if (isa<DbgDeclareInst>(
User) && isa<CastInst>(
I))
5026 DIIClones.back()->replaceVariableLocationOp(
I,
I->getOperand(0));
5031 if (!DIIClones.empty()) {
5036 DIIClone->insertBefore(InsertPos);
5051 for (
auto &DVR : DbgVariableRecords)
5052 if (DVR->getParent() != DestBlock)
5053 DbgVariableRecordsToSalvage.
push_back(DVR);
5059 if (DVR->getParent() == SrcBlock)
5060 DbgVariableRecordsToSink.
push_back(DVR);
5067 return B->getInstruction()->comesBefore(
A->getInstruction());
5074 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5076 if (DbgVariableRecordsToSink.
size() > 1) {
5082 DVR->getDebugLoc()->getInlinedAt());
5083 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5089 for (
auto It : CountMap) {
5090 if (It.second > 1) {
5091 FilterOutMap[It.first] =
nullptr;
5092 DupSet.
insert(It.first.first);
5103 DVR.getDebugLoc()->getInlinedAt());
5105 FilterOutMap.
find(std::make_pair(Inst, DbgUserVariable));
5106 if (FilterIt == FilterOutMap.
end())
5108 if (FilterIt->second !=
nullptr)
5110 FilterIt->second = &DVR;
5125 DVR->getDebugLoc()->getInlinedAt());
5129 if (!FilterOutMap.
empty()) {
5130 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5131 auto It = FilterOutMap.
find(IVP);
5134 if (It != FilterOutMap.
end() && It->second != DVR)
5138 if (!SunkVariables.
insert(DbgUserVariable).second)
5141 if (DVR->isDbgAssign())
5149 if (DVRClones.
empty())
5163 assert(InsertPos.getHeadBit());
5165 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5189 if (
I ==
nullptr)
continue;
5204 auto getOptionalSinkBlockForInst =
5205 [
this](
Instruction *
I) -> std::optional<BasicBlock *> {
5207 return std::nullopt;
5211 unsigned NumUsers = 0;
5213 for (
Use &U :
I->uses()) {
5218 return std::nullopt;
5223 if (
PHINode *PN = dyn_cast<PHINode>(UserInst))
5224 UserBB = PN->getIncomingBlock(U);
5228 if (UserParent && UserParent != UserBB)
5229 return std::nullopt;
5230 UserParent = UserBB;
5234 if (NumUsers == 0) {
5238 return std::nullopt;
5250 return std::nullopt;
5260 return std::nullopt;
5265 auto OptBB = getOptionalSinkBlockForInst(
I);
5267 auto *UserParent = *OptBB;
5275 for (
Use &U :
I->operands())
5276 if (
Instruction *OpI = dyn_cast<Instruction>(U.get()))
5284 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5297 <<
" New = " << *Result <<
'\n');
5302 if (!Result->getDebugLoc())
5303 Result->setDebugLoc(
I->getDebugLoc());
5305 Result->copyMetadata(*
I, LLVMContext::MD_annotation);
5307 I->replaceAllUsesWith(Result);
5310 Result->takeName(
I);
5317 if (isa<PHINode>(Result) != isa<PHINode>(
I)) {
5319 if (isa<PHINode>(
I))
5325 Result->insertInto(InstParent, InsertPos);
5334 <<
" New = " << *
I <<
'\n');
5366 if (!
I->hasMetadataOtherThanDebugLoc())
5369 auto Track = [](
Metadata *ScopeList,
auto &Container) {
5370 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5371 if (!MDScopeList || !Container.insert(MDScopeList).second)
5373 for (
const auto &
MDOperand : MDScopeList->operands())
5374 if (
auto *MDScope = dyn_cast<MDNode>(
MDOperand))
5375 Container.insert(MDScope);
5378 Track(
I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5379 Track(
I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5388 "llvm.experimental.noalias.scope.decl in use ?");
5391 "llvm.experimental.noalias.scope should refer to a single scope");
5393 if (
auto *MD = dyn_cast<MDNode>(
MDOperand))
5394 return !UsedAliasScopesAndLists.
contains(MD) ||
5395 !UsedNoAliasScopesAndLists.
contains(MD);
5419 if (Succ != LiveSucc &&
DeadEdges.insert({BB, Succ}).second)
5420 for (
PHINode &PN : Succ->phis())
5421 for (
Use &U : PN.incoming_values())
5422 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5432 HandleOnlyLiveSuccessor(BB,
nullptr);
5439 if (!Inst.use_empty() &&
5440 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5444 Inst.replaceAllUsesWith(
C);
5447 Inst.eraseFromParent();
5453 for (
Use &U : Inst.operands()) {
5454 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
5457 auto *
C = cast<Constant>(U);
5458 Constant *&FoldRes = FoldedConstants[
C];
5464 <<
"\n Old = " << *
C
5465 <<
"\n New = " << *FoldRes <<
'\n');
5474 if (!Inst.isDebugOrPseudoInst()) {
5475 InstrsForInstructionWorklist.
push_back(&Inst);
5476 SeenAliasScopes.
analyse(&Inst);
5484 if (isa<UndefValue>(BI->getCondition())) {
5486 HandleOnlyLiveSuccessor(BB,
nullptr);
5489 if (
auto *
Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5490 bool CondVal =
Cond->getZExtValue();
5491 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5494 }
else if (
SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5495 if (isa<UndefValue>(SI->getCondition())) {
5497 HandleOnlyLiveSuccessor(BB,
nullptr);
5500 if (
auto *
Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5501 HandleOnlyLiveSuccessor(BB,
5502 SI->findCaseValue(
Cond)->getCaseSuccessor());
5512 if (LiveBlocks.
count(&BB))
5515 unsigned NumDeadInstInBB;
5516 unsigned NumDeadDbgInstInBB;
5517 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
5520 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
5521 NumDeadInst += NumDeadInstInBB;
5538 Inst->eraseFromParent();
5567 auto &
DL =
F.getDataLayout();
5569 !
F.hasFnAttribute(
"instcombine-no-verify-fixpoint");
5577 if (
auto *Assume = dyn_cast<AssumeInst>(
I))
5585 bool MadeIRChange =
false;
5590 unsigned Iteration = 0;
5596 <<
" on " <<
F.getName()
5597 <<
" reached; stopping without verifying fixpoint\n");
5601 ++NumWorklistIterations;
5602 LLVM_DEBUG(
dbgs() <<
"\n\nINSTCOMBINE ITERATION #" << Iteration <<
" on "
5603 <<
F.getName() <<
"\n");
5606 ORE, BFI, BPI, PSI,
DL, RPOT);
5609 MadeChangeInThisIteration |= IC.
run();
5610 if (!MadeChangeInThisIteration)
5613 MadeIRChange =
true;
5616 "Instruction Combining on " +
Twine(
F.getName()) +
5619 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
5620 "'instcombine-no-verify-fixpoint' to suppress this error.",
5627 else if (Iteration == 2)
5629 else if (Iteration == 3)
5630 ++NumThreeIterations;
5632 ++NumFourOrMoreIterations;
5634 return MadeIRChange;
5642 OS, MapClassName2PassName);
5649char InstCombinePass::ID = 0;
5655 if (LRT.shouldSkip(&
ID))
5668 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
5673 BFI, BPI, PSI, Options)) {
5675 LRT.update(&
ID,
false);
5681 LRT.update(&
ID,
true);
5708 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
5709 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F);
5710 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
5711 auto &
TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
F);
5712 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5713 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
5717 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
5720 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
5723 if (
auto *WrapperPass =
5724 getAnalysisIfAvailable<BranchProbabilityInfoWrapperPass>())
5725 BPI = &WrapperPass->getBPI();
5738 "Combine redundant instructions",
false,
false)
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static bool isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI)
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static const uint32_t IV[8]
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
APInt trunc(unsigned width) const
Truncate to new width.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
APInt smul_ov(const APInt &RHS, bool &Overflow) const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
A container for analyses that lazily runs them and caches their results.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
Class to represent array types.
uint64_t getNumElements() const
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
const Instruction & front() const
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
void swapSuccEdgesProbabilities(const BasicBlock *Src)
Swap outgoing edges probabilities for Src with branch terminator.
Represents analyses that only rely on functions' control flow.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_ULT
unsigned less than
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
ConstantArray - Constant Array Declarations.
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getNot(Constant *C)
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
static ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
SmallVector< APInt > getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const
Get GEP indices to access Offset inside ElemTy.
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value * > Indices) const
Returns the offset from the beginning of the type for the specified indices.
This is the common base class for debug info intrinsics for variables.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void registerBranch(BranchInst *BI)
Add a branch condition to the cache.
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Utility class for floating point operations which can have information about relaxed accuracy require...
Convenience struct for specifying and reasoning about fast-math flags.
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
Represents flags for the getelementptr instruction/expression.
GEPNoWrapFlags withoutNoUnsignedSignedWrap() const
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
GEPNoWrapFlags withoutNoUnsignedWrap() const
GEPNoWrapFlags getNoWrapFlags() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Value * CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2, const Twine &Name="")
Value * CreateSelectFMF(Value *C, Value *True, Value *False, FMFSource FMFSource, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateFreeze(Value *V, const Twine &Name="")
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
void CollectMetadataToCopy(Instruction *Src, ArrayRef< unsigned > MetadataKinds)
Collect metadata with IDs MetadataKinds from Src which should be added to all created instructions.
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
InstCombinePass(InstCombineOptions Opts={})
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth, Instruction *CxtI)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void tryToSinkInstructionDbgValues(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableIntrinsic * > &DbgUsers)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
const DataLayout & getDataLayout() const
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
ReversePostOrderTraversal< BasicBlock * > & RPOT
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
void visit(Iterator Start, Iterator End)
The legacy pass manager's instcombine pass.
InstructionCombiningPass()
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
Instruction * removeOne()
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void add(Instruction *I)
Add instruction to the worklist.
void push(Instruction *I)
Push the instruction onto the worklist stack.
Instruction * popDeferred()
void zap()
Check that the worklist is empty and nuke the backing store for the map.
void reserve(size_t Size)
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
void dropUBImplyingAttrsAndMetadata()
Drop any attributes or metadata that can cause immediate undefined behavior.
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
Tracking metadata reference owned by Metadata.
This is the common base class for memset/memcpy/memmove.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
This class represents min/max intrinsics.
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
MDNode * getScopeList() const
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserveSet()
Mark an analysis set as preserved.
void preserve()
Mark an analysis as preserved.
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
This instruction constructs a fixed permutation of two input vectors.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This class represents a cast unsigned integer to floating point.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
bool hasOneUser() const
Return true if there is exactly one user of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVMContext & getContext() const
All values hold a context through their type.
uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
constexpr bool isZero() const
An efficient, type-erasing, non-owning reference to a callable.
Type * getIndexedType() const
const ParentTy * getParent() const
reverse_self_iterator getReverseIterator()
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
@ C
The default llvm calling convention, compatible with C.
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
bool succ_empty(const Instruction *I)
Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
FunctionPass * createInstructionCombiningPass()
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I)
Don't use information from its non-constant operands.
std::pair< unsigned, unsigned > removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableIntrinsic * > Insns, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
auto successors(const MachineBasicBlock *BB)
bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
gep_type_iterator gep_type_end(const User *GEP)
Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool LowerDbgDeclare(Function &F)
Lowers llvm.dbg.declare intrinsics into appropriate set of llvm.dbg.value intrinsics.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value that has an associated llvm....
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Or
Bitwise or logical OR of integers.
DWARFExpression::Operation Op
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
void initializeInstructionCombiningPassPass(PassRegistry &)
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr roundingMode rmNearestTiesToEven
static unsigned int semanticsPrecision(const fltSemantics &)
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
unsigned getBitWidth() const
Get the bit width of this value.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
A CRTP mix-in to automatically provide informational APIs needed for passes.
SimplifyQuery getWithInstruction(const Instruction *I) const
SimplifyQuery getWithoutUndef() const