110#define DEBUG_TYPE "instcombine"
118 "Number of instruction combining iterations performed");
119STATISTIC(NumOneIteration,
"Number of functions with one iteration");
120STATISTIC(NumTwoIterations,
"Number of functions with two iterations");
121STATISTIC(NumThreeIterations,
"Number of functions with three iterations");
123 "Number of functions with four or more iterations");
127STATISTIC(NumDeadInst ,
"Number of dead inst eliminated");
133 "Controls which instructions are visited");
140 "instcombine-max-sink-users",
cl::init(32),
141 cl::desc(
"Maximum number of undroppable users for instruction sinking"));
145 cl::desc(
"Maximum array size considered when doing a combine"));
157std::optional<Instruction *>
160 if (
II.getCalledFunction()->isTargetIntrinsic()) {
168 bool &KnownBitsComputed) {
170 if (
II.getCalledFunction()->isTargetIntrinsic()) {
172 *
this,
II, DemandedMask, Known, KnownBitsComputed);
183 if (
II.getCalledFunction()->isTargetIntrinsic()) {
185 *
this,
II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
203 auto *Inst = dyn_cast<Instruction>(
GEP);
209 if (Inst && !
GEP->hasAllConstantIndices() &&
210 !
GEP->getSourceElementType()->isIntegerTy(8)) {
230 Value *Sum =
nullptr;
231 Value *OneUseSum =
nullptr;
232 Value *OneUseBase =
nullptr;
240 auto *Inst = dyn_cast<Instruction>(
GEP);
241 if (RewriteGEPs && Inst)
245 if (
Offset->getType() != IdxTy)
247 cast<VectorType>(IdxTy)->getElementCount(),
Offset);
248 if (
GEP->hasOneUse()) {
253 OneUseBase =
GEP->getPointerOperand();
262 if (RewriteGEPs && Inst &&
263 !(
GEP->getSourceElementType()->isIntegerTy(8) &&
268 OneUseBase ? OneUseBase :
GEP->getPointerOperand(),
Offset,
"",
275 OneUseSum = OneUseBase =
nullptr;
279 Sum =
Add(Sum, OneUseSum);
290bool InstCombinerImpl::isDesirableIntType(
unsigned BitWidth)
const {
309bool InstCombinerImpl::shouldChangeType(
unsigned FromWidth,
310 unsigned ToWidth)
const {
316 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
321 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
326 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
337bool InstCombinerImpl::shouldChangeType(
Type *
From,
Type *To)
const {
343 unsigned FromWidth =
From->getPrimitiveSizeInBits();
345 return shouldChangeType(FromWidth, ToWidth);
354 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
355 if (!OBO || !OBO->hasNoSignedWrap())
358 const APInt *BVal, *CVal;
363 bool Overflow =
false;
364 switch (
I.getOpcode()) {
365 case Instruction::Add:
366 (void)BVal->
sadd_ov(*CVal, Overflow);
368 case Instruction::Sub:
369 (void)BVal->
ssub_ov(*CVal, Overflow);
371 case Instruction::Mul:
372 (void)BVal->
smul_ov(*CVal, Overflow);
382 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
383 return OBO && OBO->hasNoUnsignedWrap();
387 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
388 return OBO && OBO->hasNoSignedWrap();
397 I.clearSubclassOptionalData();
402 I.clearSubclassOptionalData();
403 I.setFastMathFlags(FMF);
412 auto *Cast = dyn_cast<CastInst>(BinOp1->
getOperand(0));
413 if (!Cast || !Cast->hasOneUse())
417 auto CastOpcode = Cast->getOpcode();
418 if (CastOpcode != Instruction::ZExt)
426 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
427 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
453 Cast->dropPoisonGeneratingFlags();
459Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(
Value *Val) {
460 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
463 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
464 Type *CastTy = IntToPtr->getDestTy();
467 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
470 return PtrToInt->getOperand(0);
497 bool Changed =
false;
505 Changed = !
I.swapOperands();
507 if (
I.isCommutative()) {
508 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
518 if (
I.isAssociative()) {
541 I.setHasNoUnsignedWrap(
true);
544 I.setHasNoSignedWrap(
true);
573 if (
I.isAssociative() &&
I.isCommutative()) {
636 if (isa<FPMathOperator>(NewBO)) {
650 I.setHasNoUnsignedWrap(
true);
668 if (LOp == Instruction::And)
669 return ROp == Instruction::Or || ROp == Instruction::Xor;
672 if (LOp == Instruction::Or)
673 return ROp == Instruction::And;
677 if (LOp == Instruction::Mul)
678 return ROp == Instruction::Add || ROp == Instruction::Sub;
701 if (isa<Constant>(V))
715 assert(
Op &&
"Expected a binary operator");
716 LHS =
Op->getOperand(0);
717 RHS =
Op->getOperand(1);
718 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
723 Instruction::Shl, ConstantInt::get(
Op->getType(), 1),
C);
724 assert(
RHS &&
"Constant folding of immediate constants failed");
725 return Instruction::Mul;
730 if (OtherOp && OtherOp->
getOpcode() == Instruction::AShr &&
733 return Instruction::AShr;
736 return Op->getOpcode();
745 assert(
A &&
B &&
C &&
D &&
"All values must be provided");
748 Value *RetVal =
nullptr;
759 if (
A ==
C || (InnerCommutative &&
A ==
D)) {
779 if (
B ==
D || (InnerCommutative &&
B ==
C)) {
802 if (isa<BinaryOperator>(RetVal)) {
805 if (isa<OverflowingBinaryOperator>(&
I)) {
806 HasNSW =
I.hasNoSignedWrap();
807 HasNUW =
I.hasNoUnsignedWrap();
809 if (
auto *LOBO = dyn_cast<OverflowingBinaryOperator>(
LHS)) {
810 HasNSW &= LOBO->hasNoSignedWrap();
811 HasNUW &= LOBO->hasNoUnsignedWrap();
814 if (
auto *ROBO = dyn_cast<OverflowingBinaryOperator>(
RHS)) {
815 HasNSW &= ROBO->hasNoSignedWrap();
816 HasNUW &= ROBO->hasNoUnsignedWrap();
819 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
829 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
832 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
847 unsigned Opc =
I->getOpcode();
848 unsigned ConstIdx = 1;
855 case Instruction::Sub:
858 case Instruction::ICmp:
865 case Instruction::Or:
869 case Instruction::Add:
875 if (!
match(
I->getOperand(1 - ConstIdx),
888 if (
Opc == Instruction::ICmp && !cast<ICmpInst>(
I)->isEquality()) {
891 if (!Cmp || !Cmp->isZeroValue())
896 bool Consumes =
false;
900 assert(NotOp !=
nullptr &&
901 "Desync between isFreeToInvert and getFreelyInverted");
910 case Instruction::Sub:
913 case Instruction::Or:
914 case Instruction::Add:
917 case Instruction::ICmp:
953 auto IsValidBinOpc = [](
unsigned Opc) {
957 case Instruction::And:
958 case Instruction::Or:
959 case Instruction::Xor:
960 case Instruction::Add:
969 auto IsCompletelyDistributable = [](
unsigned BinOpc1,
unsigned BinOpc2,
971 assert(ShOpc != Instruction::AShr);
972 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
973 ShOpc == Instruction::Shl;
976 auto GetInvShift = [](
unsigned ShOpc) {
977 assert(ShOpc != Instruction::AShr);
978 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
981 auto CanDistributeBinops = [&](
unsigned BinOpc1,
unsigned BinOpc2,
985 if (BinOpc1 == Instruction::And)
990 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
996 if (BinOpc2 == Instruction::And)
1007 auto MatchBinOp = [&](
unsigned ShOpnum) ->
Instruction * {
1009 Value *
X, *
Y, *ShiftedX, *Mask, *Shift;
1010 if (!
match(
I.getOperand(ShOpnum),
1013 if (!
match(
I.getOperand(1 - ShOpnum),
1020 auto *IY = dyn_cast<Instruction>(
I.getOperand(ShOpnum));
1021 auto *IX = dyn_cast<Instruction>(ShiftedX);
1026 unsigned ShOpc = IY->getOpcode();
1027 if (ShOpc != IX->getOpcode())
1031 auto *BO2 = dyn_cast<Instruction>(
I.getOperand(1 - ShOpnum));
1035 unsigned BinOpc = BO2->getOpcode();
1037 if (!IsValidBinOpc(
I.getOpcode()) || !IsValidBinOpc(BinOpc))
1040 if (ShOpc == Instruction::AShr) {
1054 if (BinOpc ==
I.getOpcode() &&
1055 IsCompletelyDistributable(
I.getOpcode(), BinOpc, ShOpc)) {
1070 if (!CanDistributeBinops(
I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1084 return MatchBinOp(1);
1102 Value *
A, *CondVal, *TrueVal, *FalseVal;
1105 auto MatchSelectAndCast = [&](
Value *CastOp,
Value *SelectOp) {
1107 A->getType()->getScalarSizeInBits() == 1 &&
1114 if (MatchSelectAndCast(
LHS,
RHS))
1116 else if (MatchSelectAndCast(
RHS,
LHS))
1121 auto NewFoldedConst = [&](
bool IsTrueArm,
Value *V) {
1122 bool IsCastOpRHS = (CastOp ==
RHS);
1123 bool IsZExt = isa<ZExtInst>(CastOp);
1128 }
else if (IsZExt) {
1129 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1142 Value *NewTrueVal = NewFoldedConst(
false, TrueVal);
1144 NewFoldedConst(
true, FalseVal));
1148 Value *NewTrueVal = NewFoldedConst(
true, TrueVal);
1150 NewFoldedConst(
false, FalseVal));
1171 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1291static std::optional<std::pair<Value *, Value *>>
1293 if (
LHS->getParent() !=
RHS->getParent())
1294 return std::nullopt;
1296 if (
LHS->getNumIncomingValues() < 2)
1297 return std::nullopt;
1300 return std::nullopt;
1302 Value *L0 =
LHS->getIncomingValue(0);
1303 Value *R0 =
RHS->getIncomingValue(0);
1305 for (
unsigned I = 1, E =
LHS->getNumIncomingValues();
I != E; ++
I) {
1309 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1312 return std::nullopt;
1315 return std::optional(std::pair(L0, R0));
1318std::optional<std::pair<Value *, Value *>>
1319InstCombinerImpl::matchSymmetricPair(
Value *LHS,
Value *RHS) {
1320 Instruction *LHSInst = dyn_cast<Instruction>(LHS);
1321 Instruction *RHSInst = dyn_cast<Instruction>(RHS);
1323 return std::nullopt;
1325 case Instruction::PHI:
1327 case Instruction::Select: {
1333 return std::pair(TrueVal, FalseVal);
1334 return std::nullopt;
1336 case Instruction::Call: {
1340 if (LHSMinMax && RHSMinMax &&
1347 return std::pair(LHSMinMax->
getLHS(), LHSMinMax->
getRHS());
1348 return std::nullopt;
1351 return std::nullopt;
1361 if (!LHSIsSelect && !RHSIsSelect)
1366 if (isa<FPMathOperator>(&
I)) {
1367 FMF =
I.getFastMathFlags();
1374 Value *
Cond, *True =
nullptr, *False =
nullptr;
1382 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1397 if (LHSIsSelect && RHSIsSelect &&
A ==
D) {
1406 else if (True && !False)
1414 if (
Value *NewSel = foldAddNegate(
B,
C,
RHS))
1421 if (
Value *NewSel = foldAddNegate(E,
F,
LHS))
1425 if (!True || !False)
1436 assert(!isa<Constant>(
I) &&
"Shouldn't invert users of constant");
1438 if (U == IgnoredUser)
1440 switch (cast<Instruction>(U)->
getOpcode()) {
1441 case Instruction::Select: {
1442 auto *SI = cast<SelectInst>(U);
1444 SI->swapProfMetadata();
1447 case Instruction::Br: {
1454 case Instruction::Xor:
1461 "canFreelyInvertAllUsersOf() ?");
1471 for (
unsigned Idx = 0,
End = DbgVal->getNumVariableLocationOps();
1473 if (DbgVal->getVariableLocationOp(
Idx) ==
I)
1474 DbgVal->setExpression(
1481Value *InstCombinerImpl::dyn_castNegVal(
Value *V)
const {
1491 if (
C->getType()->getElementType()->isIntegerTy())
1495 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1500 if (isa<UndefValue>(Elt))
1503 if (!isa<ConstantInt>(Elt))
1510 if (
auto *CV = dyn_cast<Constant>(V))
1511 if (CV->getType()->isVectorTy() &&
1512 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1525Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1526 BinaryOperator &BO,
bool OpsFromSigned, std::array<Value *, 2> IntOps,
1530 Type *IntTy = IntOps[0]->getType();
1535 unsigned MaxRepresentableBits =
1540 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1544 auto IsNonZero = [&](
unsigned OpNo) ->
bool {
1545 if (OpsKnown[OpNo].hasKnownBits() &&
1546 OpsKnown[OpNo].getKnownBits(
SQ).isNonZero())
1551 auto IsNonNeg = [&](
unsigned OpNo) ->
bool {
1555 return OpsKnown[OpNo].getKnownBits(
SQ).isNonNegative();
1559 auto IsValidPromotion = [&](
unsigned OpNo) ->
bool {
1561 if (OpsFromSigned != isa<SIToFPInst>(BO.
getOperand(OpNo)) &&
1570 if (MaxRepresentableBits < IntSz) {
1580 NumUsedLeadingBits[OpNo] =
1581 IntSz - OpsKnown[OpNo].getKnownBits(
SQ).countMinLeadingZeros();
1589 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1592 return !OpsFromSigned || BO.
getOpcode() != Instruction::FMul ||
1597 if (Op1FpC !=
nullptr) {
1599 if (OpsFromSigned && BO.
getOpcode() == Instruction::FMul &&
1604 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1606 if (Op1IntC ==
nullptr)
1609 : Instruction::UIToFP,
1610 Op1IntC, FPTy,
DL) != Op1FpC)
1614 IntOps[1] = Op1IntC;
1618 if (IntTy != IntOps[1]->
getType())
1621 if (Op1FpC ==
nullptr) {
1622 if (!IsValidPromotion(1))
1625 if (!IsValidPromotion(0))
1631 bool NeedsOverflowCheck =
true;
1634 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1635 unsigned OverflowMaxCurBits =
1636 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1637 bool OutputSigned = OpsFromSigned;
1639 case Instruction::FAdd:
1640 IntOpc = Instruction::Add;
1641 OverflowMaxOutputBits += OverflowMaxCurBits;
1643 case Instruction::FSub:
1644 IntOpc = Instruction::Sub;
1645 OverflowMaxOutputBits += OverflowMaxCurBits;
1647 case Instruction::FMul:
1648 IntOpc = Instruction::Mul;
1649 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1655 if (OverflowMaxOutputBits < IntSz) {
1656 NeedsOverflowCheck =
false;
1659 if (IntOpc == Instruction::Sub)
1660 OutputSigned =
true;
1666 if (NeedsOverflowCheck &&
1667 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1671 if (
auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1672 IntBO->setHasNoSignedWrap(OutputSigned);
1673 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1686 std::array<Value *, 2> IntOps = {
nullptr,
nullptr};
1706 if (
Instruction *R = foldFBinOpOfIntCastsFromSign(BO,
false,
1707 IntOps, Op1FpC, OpsKnown))
1709 return foldFBinOpOfIntCastsFromSign(BO,
true, IntOps,
1725 !
X->getType()->isIntOrIntVectorTy(1))
1742 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1743 }
else if (
match(SI->getCondition(),
1768 bool FoldWithMultiUse) {
1770 if (!SI->hasOneUse() && !FoldWithMultiUse)
1773 Value *TV = SI->getTrueValue();
1774 Value *FV = SI->getFalseValue();
1777 if (SI->getType()->isIntOrIntVectorTy(1))
1782 if (isa<MinMaxIntrinsic>(&
Op))
1783 for (
Value *IntrinOp :
Op.operands())
1784 if (
auto *PN = dyn_cast<PHINode>(IntrinOp))
1785 for (
Value *PhiOp : PN->operands())
1796 if (
auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1797 if (CI->hasOneUse()) {
1798 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1799 if (((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1)) &&
1800 !CI->isCommutative())
1809 if (!NewTV && !NewFV)
1846 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&
I);
1861 bool AllowMultipleUses) {
1863 if (NumPHIValues == 0)
1870 bool IdenticalUsers =
false;
1871 if (!AllowMultipleUses && !OneUse) {
1875 if (UI != &
I && !
I.isIdenticalTo(UI))
1879 IdenticalUsers =
true;
1888 auto *
I = dyn_cast<Instruction>(
Op);
1893 if (isa<PHINode>(
I))
1909 bool SeenNonSimplifiedInVal =
false;
1910 for (
unsigned i = 0; i != NumPHIValues; ++i) {
1921 auto WillFold = [&]() {
1926 const APInt *Ignored;
1927 if (isa<CmpIntrinsic>(InVal) &&
1932 if (isa<ZExtInst>(InVal) &&
1933 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1947 if (!OneUse && !IdenticalUsers)
1950 if (SeenNonSimplifiedInVal)
1952 SeenNonSimplifiedInVal =
true;
1968 if (isa<InvokeInst>(InVal))
1969 if (cast<Instruction>(InVal)->
getParent() == InBB)
1982 for (
auto OpIndex : OpsToMoveUseToIncomingBB) {
1993 U = U->DoPHITranslation(PN->
getParent(), OpBB);
1996 Clones.
insert({OpBB, Clone});
2001 NewPhiValues[
OpIndex] = Clone;
2010 for (
unsigned i = 0; i != NumPHIValues; ++i)
2013 if (IdenticalUsers) {
2041 auto *BO0 = dyn_cast<BinaryOperator>(BO.
getOperand(0));
2042 auto *BO1 = dyn_cast<BinaryOperator>(BO.
getOperand(1));
2044 BO0->getOpcode() !=
Opc || BO1->getOpcode() !=
Opc ||
2045 !BO0->isAssociative() || !BO1->isAssociative() ||
2046 BO0->getParent() != BO1->getParent())
2050 "Expected commutative instructions!");
2054 Value *Start0, *Step0, *Start1, *Step1;
2061 "Expected PHIs with two incoming values!");
2064 auto *Init0 = dyn_cast<Constant>(Start0);
2065 auto *Init1 = dyn_cast<Constant>(Start1);
2066 auto *C0 = dyn_cast<Constant>(Step0);
2067 auto *C1 = dyn_cast<Constant>(Step1);
2068 if (!Init0 || !Init1 || !C0 || !C1)
2083 if (
Opc == Instruction::FAdd ||
Opc == Instruction::FMul) {
2087 NewBO->setFastMathFlags(Intersect);
2090 Flags.AllKnownNonNegative =
false;
2091 Flags.AllKnownNonZero =
false;
2092 Flags.mergeFlags(*BO0);
2093 Flags.mergeFlags(*BO1);
2094 Flags.mergeFlags(BO);
2095 Flags.applyFlags(*NewBO);
2097 NewBO->takeName(&BO);
2107 "Invalid incoming block!");
2108 NewPN->addIncoming(
Init, BB);
2109 }
else if (V == BO0) {
2114 "Invalid incoming block!");
2115 NewPN->addIncoming(NewBO, BB);
2121 <<
"\n with " << *PN1 <<
"\n " << *BO1
2146 auto *Phi0 = dyn_cast<PHINode>(BO.
getOperand(0));
2147 auto *Phi1 = dyn_cast<PHINode>(BO.
getOperand(1));
2148 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
2149 Phi0->getNumOperands() != Phi1->getNumOperands())
2153 if (BO.
getParent() != Phi0->getParent() ||
2170 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &>
T) {
2171 auto &Phi0Use = std::get<0>(
T);
2172 auto &Phi1Use = std::get<1>(
T);
2173 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
2175 Value *Phi0UseV = Phi0Use.get();
2176 Value *Phi1UseV = Phi1Use.get();
2179 else if (Phi1UseV ==
C)
2186 if (
all_of(
zip(Phi0->operands(), Phi1->operands()),
2187 CanFoldIncomingValuePair)) {
2190 assert(NewIncomingValues.
size() == Phi0->getNumOperands() &&
2191 "The number of collected incoming values should equal the number "
2192 "of the original PHINode operands!");
2193 for (
unsigned I = 0;
I < Phi0->getNumOperands();
I++)
2194 NewPhi->
addIncoming(NewIncomingValues[
I], Phi0->getIncomingBlock(
I));
2199 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
2206 ConstBB = Phi0->getIncomingBlock(0);
2207 OtherBB = Phi0->getIncomingBlock(1);
2209 ConstBB = Phi0->getIncomingBlock(1);
2210 OtherBB = Phi0->getIncomingBlock(0);
2220 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->
getTerminator());
2221 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2228 for (
auto BBIter = BO.
getParent()->begin(); &*BBIter != &BO; ++BBIter)
2241 Phi0->getIncomingValueForBlock(OtherBB),
2242 Phi1->getIncomingValueForBlock(OtherBB));
2243 if (
auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2244 NotFoldedNewBO->copyIRFlags(&BO);
2254 if (!isa<Constant>(
I.getOperand(1)))
2257 if (
auto *Sel = dyn_cast<SelectInst>(
I.getOperand(0))) {
2260 }
else if (
auto *PN = dyn_cast<PHINode>(
I.getOperand(0))) {
2271 if (
GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2285 if (isa<ScalableVectorType>(NewCTy)) {
2293 cast<FixedVectorType>(
C->getType())->getNumElements())
2296 unsigned NewCNumElts = cast<FixedVectorType>(NewCTy)->getNumElements();
2299 unsigned NumElts = cast<FixedVectorType>(
C->getType())->getNumElements();
2300 for (
unsigned I = 0;
I < NumElts; ++
I) {
2302 if (ShMask[
I] >= 0) {
2303 assert(ShMask[
I] < (
int)NumElts &&
"Not expecting narrowing shuffle");
2311 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2314 NewVecC[ShMask[
I]] = CElt;
2321 if (!isa<VectorType>(Inst.
getType()))
2327 cast<VectorType>(Inst.
getType())->getElementCount());
2329 cast<VectorType>(Inst.
getType())->getElementCount());
2334 Value *L0, *L1, *R0, *R1;
2339 cast<ShuffleVectorInst>(
LHS)->isConcat() &&
2340 cast<ShuffleVectorInst>(
RHS)->isConcat()) {
2347 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2350 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2357 if (
auto *BO = dyn_cast<BinaryOperator>(V))
2361 M, Intrinsic::vector_reverse, V->getType());
2374 return createBinOpReverse(V1, V2);
2378 return createBinOpReverse(V1,
RHS);
2382 return createBinOpReverse(
LHS, V2);
2386 if (
auto *BO = dyn_cast<BinaryOperator>(V))
2389 ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
2393 M, Intrinsic::experimental_vp_reverse, V->getType());
2398 if (
match(
LHS, m_Intrinsic<Intrinsic::experimental_vp_reverse>(
2401 if (
match(
RHS, m_Intrinsic<Intrinsic::experimental_vp_reverse>(
2405 return createBinOpVPReverse(V1, V2, EVL);
2409 return createBinOpVPReverse(V1,
RHS, EVL);
2413 match(
RHS, m_Intrinsic<Intrinsic::experimental_vp_reverse>(
2415 return createBinOpVPReverse(
LHS, V2, EVL);
2425 if (
auto *BO = dyn_cast<BinaryOperator>(XY))
2437 return createBinOpShuffle(V1, V2, Mask);
2446 auto *LShuf = cast<ShuffleVectorInst>(
LHS);
2447 auto *RShuf = cast<ShuffleVectorInst>(
RHS);
2452 if (LShuf->isSelect() &&
2454 RShuf->isSelect() &&
2476 "Shuffle should not change scalar type");
2478 bool ConstOp1 = isa<Constant>(
RHS);
2488 Value *NewLHS = ConstOp1 ? V1 : NewC;
2489 Value *NewRHS = ConstOp1 ? NewC : V1;
2490 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2497 if (isa<ShuffleVectorInst>(
RHS))
2530 if (isa<FPMathOperator>(R)) {
2531 R->copyFastMathFlags(&Inst);
2534 if (
auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2535 NewInstBO->copyIRFlags(R);
2564 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2565 (Op0->
hasOneUse() || Op1->hasOneUse()))) {
2583 if (!willNotOverflow(BO.
getOpcode(),
X,
Y, BO, IsSext))
2589 if (
auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2591 NewBinOp->setHasNoSignedWrap();
2593 NewBinOp->setHasNoUnsignedWrap();
2609 if (!
GEP.hasAllConstantIndices())
2625 Type *Ty =
GEP.getSourceElementType();
2627 Value *NewFalseC = Builder.
CreateGEP(Ty, FalseC, IndexC,
"", NW);
2637 if (
GEP.getNumIndices() != 1)
2646 Type *PtrTy = Src->getType()->getScalarType();
2647 unsigned IndexSizeInBits =
DL.getIndexTypeSizeInBits(PtrTy);
2654 if (isa<ScalableVectorType>(
BaseType))
2658 if (NewOffset.
isZero() ||
2659 (Src->hasOneUse() &&
GEP.getOperand(1)->hasOneUse())) {
2661 if (
GEP.hasNoUnsignedWrap() &&
2662 cast<GEPOperator>(Src)->hasNoUnsignedWrap() &&
2665 if (
GEP.isInBounds() && cast<GEPOperator>(Src)->isInBounds())
2689 Type *PtrTy = Src->getType()->getScalarType();
2690 if (
GEP.hasAllConstantIndices() &&
2691 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2695 bool IsFirstType =
true;
2696 unsigned NumVarIndices = 0;
2697 for (
auto Pair :
enumerate(Src->indices())) {
2698 if (!isa<ConstantInt>(Pair.value())) {
2700 IsFirstType =
false;
2701 NumVarIndices = Pair.index() + 1;
2708 if (NumVarIndices != Src->getNumIndices()) {
2728 if (!
Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero()))
2733 drop_end(Src->indices(), Src->getNumIndices() - NumVarIndices));
2740 if (
Idx.isNonNegative() != ConstIndices[0].isNonNegative())
2742 if (!
Idx.isNonNegative())
2751 if (Src->getResultElementType() !=
GEP.getSourceElementType())
2757 bool EndsWithSequential =
false;
2760 EndsWithSequential =
I.isSequential();
2763 if (EndsWithSequential) {
2766 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2783 Indices.
append(Src->op_begin()+1, Src->op_end()-1);
2786 }
else if (isa<Constant>(*
GEP.idx_begin()) &&
2787 cast<Constant>(*
GEP.idx_begin())->isNullValue() &&
2788 Src->getNumOperands() != 1) {
2790 Indices.
append(Src->op_begin()+1, Src->op_end());
2795 unsigned NumVarIndices =
2797 if (NumVarIndices > 1)
2800 if (!Indices.
empty())
2803 Src->getSourceElementType(), Src->getOperand(0), Indices,
"",
2811 bool &DoesConsume,
unsigned Depth) {
2812 static Value *
const NonNull =
reinterpret_cast<Value *
>(uintptr_t(1));
2830 if (!WillInvertAllUses)
2835 if (
auto *
I = dyn_cast<CmpInst>(V)) {
2846 DoesConsume,
Depth))
2849 DoesConsume,
Depth))
2858 DoesConsume,
Depth))
2861 DoesConsume,
Depth))
2870 DoesConsume,
Depth))
2879 DoesConsume,
Depth))
2891 bool LocalDoesConsume = DoesConsume;
2893 LocalDoesConsume,
Depth))
2896 LocalDoesConsume,
Depth)) {
2897 DoesConsume = LocalDoesConsume;
2900 DoesConsume,
Depth);
2901 assert(NotB !=
nullptr &&
2902 "Unable to build inverted value for known freely invertable op");
2903 if (
auto *
II = dyn_cast<IntrinsicInst>(V))
2912 if (
PHINode *PN = dyn_cast<PHINode>(V)) {
2913 bool LocalDoesConsume = DoesConsume;
2915 for (
Use &U : PN->operands()) {
2916 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2920 if (NewIncomingVal ==
nullptr)
2923 if (NewIncomingVal == V)
2926 IncomingValues.
emplace_back(NewIncomingVal, IncomingBlock);
2929 DoesConsume = LocalDoesConsume;
2935 for (
auto [Val, Pred] : IncomingValues)
2944 DoesConsume,
Depth))
2951 DoesConsume,
Depth))
2960 bool IsLogical,
Value *
A,
2962 bool LocalDoesConsume = DoesConsume;
2964 LocalDoesConsume,
Depth))
2967 LocalDoesConsume,
Depth)) {
2969 LocalDoesConsume,
Depth);
2970 DoesConsume = LocalDoesConsume;
2980 return TryInvertAndOrUsingDeMorgan(Instruction::And,
false,
A,
2984 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
false,
A,
2988 return TryInvertAndOrUsingDeMorgan(Instruction::And,
true,
A,
2992 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
true,
A,
3001 Type *GEPEltType =
GEP.getSourceElementType();
3012 if (
GEP.getNumIndices() == 1 &&
3020 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
3021 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
3024 return match(V, m_APInt(C)) && !C->isZero();
3030 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->
getOperand(0));
3047 auto *Op2 = dyn_cast<GetElementPtrInst>(*
I);
3048 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
3049 Op1->getSourceElementType() != Op2->getSourceElementType())
3057 Type *CurTy =
nullptr;
3059 for (
unsigned J = 0,
F = Op1->getNumOperands(); J !=
F; ++J) {
3060 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
3063 if (Op1->getOperand(J) != Op2->getOperand(J)) {
3072 assert(CurTy &&
"No current type?");
3092 CurTy = Op1->getSourceElementType();
3100 NW &= Op2->getNoWrapFlags();
3109 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
3110 NewGEP->setNoWrapFlags(NW);
3123 NewPN = Builder.
CreatePHI(Op1->getOperand(DI)->getType(),
3128 NewPN->
addIncoming(cast<GEPOperator>(
I)->getOperand(DI),
3131 NewGEP->setOperand(DI, NewPN);
3134 NewGEP->insertBefore(*
GEP.getParent(),
GEP.getParent()->getFirstInsertionPt());
3141 Type *GEPType =
GEP.getType();
3142 Type *GEPEltType =
GEP.getSourceElementType();
3151 if (
auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
3152 auto VWidth = GEPFVTy->getNumElements();
3153 APInt PoisonElts(VWidth, 0);
3165 bool MadeChange =
false;
3169 Type *NewScalarIndexTy =
3179 Type *IndexTy = (*I)->getType();
3180 Type *NewIndexType =
3183 cast<VectorType>(IndexTy)->getElementCount())
3195 if (IndexTy != NewIndexType) {
3201 if (
GEP.hasNoUnsignedWrap() &&
GEP.hasNoUnsignedSignedWrap())
3207 GEP.hasNoUnsignedSignedWrap());
3216 if (!GEPEltType->
isIntegerTy(8) &&
GEP.hasAllConstantIndices()) {
3221 GEP.getNoWrapFlags()));
3232 auto *LastIdx = dyn_cast<Constant>(Indices.
back());
3233 if (LastIdx && LastIdx->isNullValue() && !LastIdx->getType()->isVectorTy()) {
3240 auto *FirstIdx = dyn_cast<Constant>(Indices.
front());
3241 if (FirstIdx && FirstIdx->isNullValue() &&
3242 !FirstIdx->getType()->isVectorTy()) {
3247 GEP.getPointerOperand(),
3249 GEP.getNoWrapFlags()));
3256 return Op->getType()->isVectorTy() && getSplatValue(Op);
3259 for (
auto &
Op :
GEP.operands()) {
3260 if (
Op->getType()->isVectorTy())
3270 GEP.getNoWrapFlags());
3272 ElementCount EC = cast<VectorType>(GEPType)->getElementCount();
3278 bool SeenVarIndex =
false;
3280 if (isa<Constant>(
Idx))
3283 if (!SeenVarIndex) {
3284 SeenVarIndex =
true;
3292 GEP.getName() +
".split",
GEP.getNoWrapFlags());
3299 BackIndices,
GEP.getNoWrapFlags());
3303 if (
auto *PN = dyn_cast<PHINode>(PtrOp)) {
3308 if (
auto *Src = dyn_cast<GEPOperator>(PtrOp))
3312 if (
GEP.getNumIndices() == 1) {
3313 unsigned AS =
GEP.getPointerAddressSpace();
3314 if (
GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3318 if (TyAllocSize == 1) {
3327 GEPType ==
Y->getType()) {
3328 bool HasSameUnderlyingObject =
3330 bool Changed =
false;
3331 GEP.replaceUsesWithIf(
Y, [&](
Use &U) {
3332 bool ShouldReplace = HasSameUnderlyingObject ||
3333 isa<ICmpInst>(U.getUser()) ||
3334 isa<PtrToIntInst>(U.getUser());
3335 Changed |= ShouldReplace;
3336 return ShouldReplace;
3338 return Changed ? &
GEP :
nullptr;
3340 }
else if (
auto *ExactIns =
3341 dyn_cast<PossiblyExactOperator>(
GEP.getOperand(1))) {
3344 if (ExactIns->isExact()) {
3352 GEP.getPointerOperand(), V,
3353 GEP.getNoWrapFlags());
3356 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3362 std::optional<APInt> NewC;
3382 if (NewC.has_value()) {
3385 ConstantInt::get(V->getType(), *NewC));
3386 cast<BinaryOperator>(NewOp)->setIsExact();
3388 GEP.getPointerOperand(), NewOp,
3389 GEP.getNoWrapFlags());
3399 if (!
GEP.isInBounds()) {
3402 APInt BasePtrOffset(IdxWidth, 0);
3403 Value *UnderlyingPtrOp =
3405 bool CanBeNull, CanBeFreed;
3407 DL, CanBeNull, CanBeFreed);
3408 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3409 if (
GEP.accumulateConstantOffset(
DL, BasePtrOffset) &&
3411 APInt AllocSize(IdxWidth, DerefBytes);
3412 if (BasePtrOffset.
ule(AllocSize)) {
3414 GEP.getSourceElementType(), PtrOp, Indices,
GEP.getName());
3421 if (
GEP.hasNoUnsignedSignedWrap() && !
GEP.hasNoUnsignedWrap() &&
3423 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3431 if (
GEP.getNumIndices() == 1) {
3434 auto GetPreservedNoWrapFlags = [&](
bool AddIsNUW) {
3437 if (
GEP.hasNoUnsignedWrap() && AddIsNUW)
3438 return GEP.getNoWrapFlags();
3458 NewPtr, Idx2,
"", NWFlags));
3469 bool NUW =
match(
GEP.getOperand(1),
3473 GEP.getSourceElementType(),
GEP.getPointerOperand(),
3491 if (isa<ConstantPointerNull>(V))
3493 if (
auto *LI = dyn_cast<LoadInst>(V))
3494 return isa<GlobalVariable>(LI->getPointerOperand());
3518 return Dest && Dest->Ptr == UsedV;
3521static std::optional<ModRefInfo>
3533 switch (
I->getOpcode()) {
3536 return std::nullopt;
3538 case Instruction::AddrSpaceCast:
3539 case Instruction::BitCast:
3540 case Instruction::GetElementPtr:
3545 case Instruction::ICmp: {
3551 return std::nullopt;
3552 unsigned OtherIndex = (ICI->
getOperand(0) == PI) ? 1 : 0;
3554 return std::nullopt;
3559 auto AlignmentAndSizeKnownValid = [](
CallBase *CB) {
3563 const APInt *Alignment;
3565 return match(CB->getArgOperand(0),
m_APInt(Alignment)) &&
3569 auto *CB = dyn_cast<CallBase>(AI);
3571 if (CB && TLI.
getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3572 TLI.
has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3573 !AlignmentAndSizeKnownValid(CB))
3574 return std::nullopt;
3579 case Instruction::Call:
3582 switch (
II->getIntrinsicID()) {
3584 return std::nullopt;
3586 case Intrinsic::memmove:
3587 case Intrinsic::memcpy:
3588 case Intrinsic::memset: {
3590 if (
MI->isVolatile())
3591 return std::nullopt;
3597 return std::nullopt;
3601 case Intrinsic::assume:
3602 case Intrinsic::invariant_start:
3603 case Intrinsic::invariant_end:
3604 case Intrinsic::lifetime_start:
3605 case Intrinsic::lifetime_end:
3606 case Intrinsic::objectsize:
3609 case Intrinsic::launder_invariant_group:
3610 case Intrinsic::strip_invariant_group:
3637 return std::nullopt;
3639 case Instruction::Store: {
3641 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3642 return std::nullopt;
3644 return std::nullopt;
3650 case Instruction::Load: {
3653 return std::nullopt;
3655 return std::nullopt;
3663 }
while (!Worklist.
empty());
3687 std::unique_ptr<DIBuilder> DIB;
3688 if (isa<AllocaInst>(
MI)) {
3695 bool KnowInitUndef =
false;
3696 bool KnowInitZero =
false;
3700 if (isa<UndefValue>(
Init))
3701 KnowInitUndef =
true;
3702 else if (
Init->isNullValue())
3703 KnowInitZero =
true;
3707 auto &
F = *
MI.getFunction();
3708 if (
F.hasFnAttribute(Attribute::SanitizeMemory) ||
3709 F.hasFnAttribute(Attribute::SanitizeAddress))
3710 KnowInitUndef =
false;
3724 if (
II->getIntrinsicID() == Intrinsic::objectsize) {
3727 II,
DL, &
TLI,
AA,
true, &InsertedInstructions);
3728 for (
Instruction *Inserted : InsertedInstructions)
3735 if (
auto *MTI = dyn_cast<MemTransferInst>(
I)) {
3736 if (KnowInitZero &&
isRefSet(*Removable)) {
3742 MTI->getLength(), MTI->getDestAlign());
3743 M->copyMetadata(*MTI);
3757 C->isFalseWhenEqual()));
3758 }
else if (
auto *SI = dyn_cast<StoreInst>(
I)) {
3759 for (
auto *DVR : DVRs)
3760 if (DVR->isAddressOfVariable())
3766 if (isa<LoadInst>(
I)) {
3767 assert(KnowInitZero || KnowInitUndef);
3782 F,
II->getNormalDest(),
II->getUnwindDest(), {},
"",
II->getParent());
3783 NewII->setDebugLoc(
II->getDebugLoc());
3811 for (
auto *DVR : DVRs)
3812 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3813 DVR->eraseFromParent();
3859 if (FreeInstrBB->
size() != 2) {
3861 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3863 auto *Cast = dyn_cast<CastInst>(&Inst);
3864 if (!Cast || !Cast->isNoopCast(
DL))
3885 "Broken CFG: missing edge from predecessor to successor");
3890 if (&Instr == FreeInstrBBTerminator)
3895 "Only the branch instruction should remain");
3906 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0, Attribute::NonNull);
3907 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3908 if (Dereferenceable.
isValid()) {
3910 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0,
3911 Attribute::Dereferenceable);
3912 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.
getContext(), 0, Bytes);
3921 if (isa<UndefValue>(
Op)) {
3929 if (isa<ConstantPointerNull>(
Op))
3966 if (
RetTy->isPointerTy()) {
3967 bool HasDereferenceable =
3968 F->getAttributes().getRetDereferenceableBytes() > 0;
3969 if (
F->hasRetAttribute(Attribute::NonNull) ||
3970 (HasDereferenceable &&
3972 if (
Value *V = simplifyNonNullOperand(RetVal, HasDereferenceable))
3980 FPClassTest ReturnClass =
F->getAttributes().getRetNoFPClass();
3981 if (ReturnClass ==
fcNone)
3998 bool Changed =
false;
4004 if (Prev->isEHPad())
4036 if (BBI != FirstInstr)
4038 }
while (BBI != FirstInstr && BBI->isDebugOrPseudoInst());
4040 return dyn_cast<StoreInst>(BBI);
4052 if (!
DeadEdges.insert({From, To}).second)
4057 for (
Use &U : PN.incoming_values())
4058 if (PN.getIncomingBlock(U) ==
From && !isa<PoisonValue>(U)) {
4074 std::next(
I->getReverseIterator())))) {
4075 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
4079 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
4082 Inst.dropDbgRecords();
4090 for (
Value *V : Changed)
4117 if (Succ == LiveSucc)
4145 if (isa<SelectInst>(
Cond) &&
4166 auto *Cmp = cast<CmpInst>(
Cond);
4175 if (isa<UndefValue>(
Cond)) {
4179 if (
auto *CI = dyn_cast<ConstantInt>(
Cond)) {
4214 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
4215 auto *
C = dyn_cast<ConstantInt>(
Select->getOperand(CstOpIdx));
4219 BasicBlock *CstBB = SI.findCaseValue(
C)->getCaseSuccessor();
4220 if (CstBB != SI.getDefaultDest())
4233 for (
auto Case : SI.cases())
4234 if (!CR.
contains(Case.getCaseValue()->getValue()))
4246 for (
auto Case : SI.cases()) {
4248 assert(isa<ConstantInt>(NewCase) &&
4249 "Result of expression should be constant");
4250 Case.setValue(cast<ConstantInt>(NewCase));
4258 for (
auto Case : SI.cases()) {
4260 assert(isa<ConstantInt>(NewCase) &&
4261 "Result of expression should be constant");
4262 Case.setValue(cast<ConstantInt>(NewCase));
4270 all_of(SI.cases(), [&](
const auto &Case) {
4271 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
4277 Value *NewCond = Op0;
4284 for (
auto Case : SI.cases()) {
4285 const APInt &CaseVal = Case.getCaseValue()->getValue();
4287 : CaseVal.
lshr(ShiftAmt);
4288 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
4296 bool IsZExt = isa<ZExtInst>(
Cond);
4300 if (
all_of(SI.cases(), [&](
const auto &Case) {
4301 const APInt &CaseVal = Case.getCaseValue()->getValue();
4302 return IsZExt ? CaseVal.isIntN(NewWidth)
4303 : CaseVal.isSignedIntN(NewWidth);
4305 for (
auto &Case : SI.cases()) {
4306 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
4307 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4314 if (
auto *
Select = dyn_cast<SelectInst>(
Cond)) {
4329 for (
const auto &
C : SI.cases()) {
4331 std::min(LeadingKnownZeros,
C.getCaseValue()->getValue().countl_zero());
4333 std::min(LeadingKnownOnes,
C.getCaseValue()->getValue().countl_one());
4336 unsigned NewWidth = Known.
getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
4342 if (NewWidth > 0 && NewWidth < Known.
getBitWidth() &&
4343 shouldChangeType(Known.
getBitWidth(), NewWidth)) {
4348 for (
auto Case : SI.cases()) {
4349 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
4350 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4355 if (isa<UndefValue>(
Cond)) {
4359 if (
auto *CI = dyn_cast<ConstantInt>(
Cond)) {
4361 SI.findCaseValue(CI)->getCaseSuccessor());
4375 const APInt *
C =
nullptr;
4377 if (*EV.
idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
4378 OvID == Intrinsic::umul_with_overflow)) {
4383 if (
C->isPowerOf2()) {
4384 return BinaryOperator::CreateShl(
4386 ConstantInt::get(WO->getLHS()->getType(),
C->logBase2()));
4394 if (!WO->hasOneUse())
4408 assert(*EV.
idx_begin() == 1 &&
"Unexpected extract index for overflow inst");
4411 if (OvID == Intrinsic::usub_with_overflow)
4416 if (OvID == Intrinsic::smul_with_overflow &&
4417 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4418 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4421 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4422 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4427 ConstantInt::get(WO->getLHS()->getType(),
4438 WO->getBinaryOp(), *
C, WO->getNoWrapKind());
4443 auto *OpTy = WO->getRHS()->getType();
4444 auto *NewLHS = WO->getLHS();
4448 ConstantInt::get(OpTy, NewRHSC));
4465 const APFloat *ConstVal =
nullptr;
4466 Value *VarOp =
nullptr;
4467 bool ConstIsTrue =
false;
4474 ConstIsTrue =
false;
4490 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4493 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4494 ConstIsTrue ? NewEV : ConstantMantissa,
SelectInst,
"select.frexp");
4508 if (
match(&EV, m_ExtractValue<0>(m_Intrinsic<Intrinsic::frexp>(
m_Select(
4511 cast<SelectInst>(cast<IntrinsicInst>(Agg)->getArgOperand(0));
4518 const unsigned *exti, *exte, *insi, *inse;
4519 for (exti = EV.
idx_begin(), insi =
IV->idx_begin(),
4520 exte = EV.
idx_end(), inse =
IV->idx_end();
4521 exti != exte && insi != inse;
4535 if (exti == exte && insi == inse)
4568 if (
Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4571 if (
LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4573 if (
auto *STy = dyn_cast<StructType>(Agg->
getType());
4574 STy && STy->isScalableTy())
4582 if (L->isSimple() && L->hasOneUse()) {
4594 L->getPointerOperand(), Indices);
4605 if (
auto *PN = dyn_cast<PHINode>(Agg))
4611 if (
auto *SI = dyn_cast<SelectInst>(Agg))
4628 switch (Personality) {
4658 cast<ArrayType>(
LHS->
getType())->getNumElements()
4660 cast<ArrayType>(
RHS->
getType())->getNumElements();
4672 bool MakeNewInstruction =
false;
4678 bool isLastClause = i + 1 == e;
4686 if (AlreadyCaught.
insert(TypeInfo).second) {
4691 MakeNewInstruction =
true;
4698 MakeNewInstruction =
true;
4699 CleanupFlag =
false;
4718 if (!NumTypeInfos) {
4721 MakeNewInstruction =
true;
4722 CleanupFlag =
false;
4726 bool MakeNewFilter =
false;
4728 if (isa<ConstantAggregateZero>(FilterClause)) {
4730 assert(NumTypeInfos > 0 &&
"Should have handled empty filter already!");
4736 MakeNewInstruction =
true;
4743 if (NumTypeInfos > 1)
4744 MakeNewFilter =
true;
4748 NewFilterElts.
reserve(NumTypeInfos);
4753 bool SawCatchAll =
false;
4754 for (
unsigned j = 0; j != NumTypeInfos; ++j) {
4782 if (SeenInFilter.
insert(TypeInfo).second)
4783 NewFilterElts.
push_back(cast<Constant>(Elt));
4788 MakeNewInstruction =
true;
4793 if (NewFilterElts.
size() < NumTypeInfos)
4794 MakeNewFilter =
true;
4796 if (MakeNewFilter) {
4798 NewFilterElts.
size());
4800 MakeNewInstruction =
true;
4809 if (MakeNewFilter && !NewFilterElts.
size()) {
4810 assert(MakeNewInstruction &&
"New filter but not a new instruction!");
4811 CleanupFlag =
false;
4822 for (
unsigned i = 0, e = NewClauses.
size(); i + 1 < e; ) {
4825 for (j = i; j != e; ++j)
4826 if (!isa<ArrayType>(NewClauses[j]->
getType()))
4832 for (
unsigned k = i; k + 1 < j; ++k)
4836 std::stable_sort(NewClauses.
begin() + i, NewClauses.
begin() + j,
4838 MakeNewInstruction =
true;
4857 for (
unsigned i = 0; i + 1 < NewClauses.
size(); ++i) {
4867 for (
unsigned j = NewClauses.
size() - 1; j != i; --j) {
4868 Value *LFilter = NewClauses[j];
4879 NewClauses.
erase(J);
4880 MakeNewInstruction =
true;
4890 if (isa<ConstantAggregateZero>(LFilter)) {
4893 if (isa<ConstantAggregateZero>(
Filter)) {
4894 assert(FElts <= LElts &&
"Should have handled this case earlier!");
4896 NewClauses.
erase(J);
4897 MakeNewInstruction =
true;
4903 if (isa<ConstantAggregateZero>(
Filter)) {
4906 assert(FElts > 0 &&
"Should have eliminated the empty filter earlier!");
4907 for (
unsigned l = 0; l != LElts; ++l)
4910 NewClauses.
erase(J);
4911 MakeNewInstruction =
true;
4922 bool AllFound =
true;
4923 for (
unsigned f = 0; f != FElts; ++f) {
4926 for (
unsigned l = 0; l != LElts; ++l) {
4928 if (LTypeInfo == FTypeInfo) {
4938 NewClauses.
erase(J);
4939 MakeNewInstruction =
true;
4947 if (MakeNewInstruction) {
4955 if (NewClauses.empty())
4964 assert(!CleanupFlag &&
"Adding a cleanup, not removing one?!");
4986 auto CanPushFreeze = [](
Value *V) {
4987 if (!isa<Instruction>(V) || isa<PHINode>(V))
5007 Value *V = U->get();
5008 if (!CanPushFreeze(V)) {
5013 auto *UserI = cast<Instruction>(U->getUser());
5020 auto *
I = cast<Instruction>(V);
5021 if (!Visited.
insert(
I).second)
5032 I->dropPoisonGeneratingAnnotations();
5033 this->Worklist.add(
I);
5036 return OrigUse->get();
5046 Use *StartU =
nullptr;
5064 Value *StartV = StartU->get();
5076 if (!Visited.
insert(V).second)
5079 if (Visited.
size() > 32)
5096 I->dropPoisonGeneratingAnnotations();
5098 if (StartNeedsFreeze) {
5110 if (isa<Constant>(
Op) ||
Op->hasOneUse())
5119 if (isa<Argument>(
Op)) {
5123 auto MoveBeforeOpt = cast<Instruction>(
Op)->getInsertionPointAfterDef();
5126 MoveBefore = *MoveBeforeOpt;
5130 MoveBefore.setHeadBit(
false);
5132 bool Changed =
false;
5133 if (&FI != &*MoveBefore) {
5134 FI.
moveBefore(*MoveBefore->getParent(), MoveBefore);
5138 Op->replaceUsesWithIf(&FI, [&](
Use &U) ->
bool {
5140 Changed |= Dominates;
5149 for (
auto *U : V->users()) {
5150 if (isa<ShuffleVectorInst>(U))
5159 Value *Op0 =
I.getOperand(0);
5165 if (
auto *PN = dyn_cast<PHINode>(Op0)) {
5188 auto getUndefReplacement = [&](
Type *Ty) {
5189 Value *BestValue =
nullptr;
5191 for (
const auto *U :
I.users()) {
5192 Value *V = NullValue;
5204 else if (BestValue != V)
5205 BestValue = NullValue;
5207 assert(BestValue &&
"Must have at least one use");
5221 Type *Ty =
C->getType();
5222 auto *VTy = dyn_cast<FixedVectorType>(Ty);
5225 unsigned NumElts = VTy->getNumElements();
5227 for (
unsigned i = 0; i != NumElts; ++i) {
5228 Constant *EltC =
C->getAggregateElement(i);
5239 !
C->containsConstantExpression()) {
5240 if (
Constant *Repl = getFreezeVectorReplacement(
C))
5255 auto *CB = dyn_cast<CallBase>(
I);
5274 for (
const User *U :
I.users()) {
5275 if (Visited.
insert(U).second)
5280 while (!AllocaUsers.
empty()) {
5281 auto *UserI = cast<Instruction>(AllocaUsers.
pop_back_val());
5282 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
5303 if (isa<PHINode>(
I) ||
I->isEHPad() ||
I->mayThrow() || !
I->willReturn() ||
5311 if (isa<AllocaInst>(
I))
5319 if (
auto *CI = dyn_cast<CallInst>(
I)) {
5320 if (CI->isConvergent())
5326 if (
I->mayWriteToMemory()) {
5333 if (
I->mayReadFromMemory() &&
5334 !
I->hasMetadata(LLVMContext::MD_invariant_load)) {
5341 E =
I->getParent()->end();
5343 if (Scan->mayWriteToMemory())
5347 I->dropDroppableUses([&](
const Use *U) {
5348 auto *
I = dyn_cast<Instruction>(U->getUser());
5349 if (
I &&
I->getParent() != DestBlock) {
5359 I->moveBefore(*DestBlock, InsertPos);
5369 if (!DbgVariableRecords.
empty())
5371 DbgVariableRecords);
5394 for (
auto &DVR : DbgVariableRecords)
5395 if (DVR->getParent() != DestBlock)
5396 DbgVariableRecordsToSalvage.
push_back(DVR);
5402 if (DVR->getParent() == SrcBlock)
5403 DbgVariableRecordsToSink.
push_back(DVR);
5410 return B->getInstruction()->comesBefore(
A->getInstruction());
5417 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5419 if (DbgVariableRecordsToSink.
size() > 1) {
5425 DVR->getDebugLoc()->getInlinedAt());
5426 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5432 for (
auto It : CountMap) {
5433 if (It.second > 1) {
5434 FilterOutMap[It.first] =
nullptr;
5435 DupSet.
insert(It.first.first);
5446 DVR.getDebugLoc()->getInlinedAt());
5448 FilterOutMap.
find(std::make_pair(Inst, DbgUserVariable));
5449 if (FilterIt == FilterOutMap.
end())
5451 if (FilterIt->second !=
nullptr)
5453 FilterIt->second = &DVR;
5468 DVR->getDebugLoc()->getInlinedAt());
5472 if (!FilterOutMap.
empty()) {
5473 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5474 auto It = FilterOutMap.
find(IVP);
5477 if (It != FilterOutMap.
end() && It->second != DVR)
5481 if (!SunkVariables.
insert(DbgUserVariable).second)
5484 if (DVR->isDbgAssign())
5492 if (DVRClones.
empty())
5506 assert(InsertPos.getHeadBit());
5508 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5532 if (
I ==
nullptr)
continue;
5547 auto getOptionalSinkBlockForInst =
5548 [
this](
Instruction *
I) -> std::optional<BasicBlock *> {
5550 return std::nullopt;
5554 unsigned NumUsers = 0;
5556 for (
Use &U :
I->uses()) {
5561 return std::nullopt;
5566 if (
PHINode *PN = dyn_cast<PHINode>(UserInst))
5567 UserBB = PN->getIncomingBlock(U);
5571 if (UserParent && UserParent != UserBB)
5572 return std::nullopt;
5573 UserParent = UserBB;
5577 if (NumUsers == 0) {
5581 return std::nullopt;
5593 return std::nullopt;
5603 return std::nullopt;
5608 auto OptBB = getOptionalSinkBlockForInst(
I);
5610 auto *UserParent = *OptBB;
5618 for (
Use &U :
I->operands())
5619 if (
Instruction *OpI = dyn_cast<Instruction>(U.get()))
5627 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5640 <<
" New = " << *Result <<
'\n');
5645 Result->setDebugLoc(Result->getDebugLoc().orElse(
I->getDebugLoc()));
5647 Result->copyMetadata(*
I, LLVMContext::MD_annotation);
5649 I->replaceAllUsesWith(Result);
5652 Result->takeName(
I);
5659 if (isa<PHINode>(Result) != isa<PHINode>(
I)) {
5661 if (isa<PHINode>(
I))
5667 Result->insertInto(InstParent, InsertPos);
5676 <<
" New = " << *
I <<
'\n');
5708 if (!
I->hasMetadataOtherThanDebugLoc())
5711 auto Track = [](
Metadata *ScopeList,
auto &Container) {
5712 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5713 if (!MDScopeList || !Container.insert(MDScopeList).second)
5715 for (
const auto &
MDOperand : MDScopeList->operands())
5716 if (
auto *MDScope = dyn_cast<MDNode>(
MDOperand))
5717 Container.insert(MDScope);
5720 Track(
I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5721 Track(
I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5730 "llvm.experimental.noalias.scope.decl in use ?");
5733 "llvm.experimental.noalias.scope should refer to a single scope");
5735 if (
auto *MD = dyn_cast<MDNode>(
MDOperand))
5736 return !UsedAliasScopesAndLists.
contains(MD) ||
5737 !UsedNoAliasScopesAndLists.
contains(MD);
5761 if (Succ != LiveSucc &&
DeadEdges.insert({BB, Succ}).second)
5762 for (
PHINode &PN : Succ->phis())
5763 for (
Use &U : PN.incoming_values())
5764 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5774 HandleOnlyLiveSuccessor(BB,
nullptr);
5781 if (!Inst.use_empty() &&
5782 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5786 Inst.replaceAllUsesWith(
C);
5789 Inst.eraseFromParent();
5795 for (
Use &U : Inst.operands()) {
5796 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
5799 auto *
C = cast<Constant>(U);
5800 Constant *&FoldRes = FoldedConstants[
C];
5806 <<
"\n Old = " << *
C
5807 <<
"\n New = " << *FoldRes <<
'\n');
5816 if (!Inst.isDebugOrPseudoInst()) {
5817 InstrsForInstructionWorklist.
push_back(&Inst);
5818 SeenAliasScopes.
analyse(&Inst);
5826 if (isa<UndefValue>(BI->getCondition())) {
5828 HandleOnlyLiveSuccessor(BB,
nullptr);
5831 if (
auto *
Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5832 bool CondVal =
Cond->getZExtValue();
5833 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5836 }
else if (
SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5837 if (isa<UndefValue>(SI->getCondition())) {
5839 HandleOnlyLiveSuccessor(BB,
nullptr);
5842 if (
auto *
Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5843 HandleOnlyLiveSuccessor(BB,
5844 SI->findCaseValue(
Cond)->getCaseSuccessor());
5854 if (LiveBlocks.
count(&BB))
5857 unsigned NumDeadInstInBB;
5861 NumDeadInst += NumDeadInstInBB;
5878 Inst->eraseFromParent();
5907 auto &
DL =
F.getDataLayout();
5909 !
F.hasFnAttribute(
"instcombine-no-verify-fixpoint");
5917 if (
auto *Assume = dyn_cast<AssumeInst>(
I))
5925 bool MadeIRChange =
false;
5930 unsigned Iteration = 0;
5934 <<
" on " <<
F.getName()
5935 <<
" reached; stopping without verifying fixpoint\n");
5940 ++NumWorklistIterations;
5941 LLVM_DEBUG(
dbgs() <<
"\n\nINSTCOMBINE ITERATION #" << Iteration <<
" on "
5942 <<
F.getName() <<
"\n");
5945 ORE, BFI, BPI, PSI,
DL, RPOT);
5948 MadeChangeInThisIteration |= IC.
run();
5949 if (!MadeChangeInThisIteration)
5952 MadeIRChange =
true;
5955 "Instruction Combining on " +
Twine(
F.getName()) +
5958 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
5959 "'instcombine-no-verify-fixpoint' to suppress this error.");
5965 else if (Iteration == 2)
5967 else if (Iteration == 3)
5968 ++NumThreeIterations;
5970 ++NumFourOrMoreIterations;
5972 return MadeIRChange;
5980 OS, MapClassName2PassName);
5987char InstCombinePass::ID = 0;
5993 if (LRT.shouldSkip(&
ID))
6006 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
6011 BFI, BPI, PSI, Options)) {
6013 LRT.update(&
ID,
false);
6019 LRT.update(&
ID,
true);
6046 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
6047 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F);
6048 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
6049 auto &
TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
F);
6050 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
6051 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
6055 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
6058 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
6061 if (
auto *WrapperPass =
6062 getAnalysisIfAvailable<BranchProbabilityInfoWrapperPass>())
6063 BPI = &WrapperPass->getBPI();
6076 "Combine redundant instructions",
false,
false)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static std::optional< ModRefInfo > isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI, bool KnowInit)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static const uint32_t IV[8]
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
A private abstract base class describing the concept of an individual alias analysis implementation.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
A container for analyses that lazily runs them and caches their results.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
size_t size() const
size - Get the array size.
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
uint64_t getNumElements() const
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
LLVM_ABI uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
const Instruction & front() const
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
LLVM_ABI void swapSuccEdgesProbabilities(const BasicBlock *Src)
Swap outgoing edges probabilities for Src with branch terminator.
Represents analyses that only rely on functions' control flow.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_ULT
unsigned less than
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
ConstantArray - Constant Array Declarations.
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI SmallVector< APInt > getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const
Get GEP indices to access Offset inside ElemTy.
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getIndexSizeInBits(unsigned AS) const
The size in bits of indices used for address calculation in getelementptr and for addresses in the gi...
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
LLVM_ABI int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value * > Indices) const
Returns the offset from the beginning of the type for the specified indices.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void registerBranch(BranchInst *BI)
Add a branch condition to the cache.
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Utility class for floating point operations which can have information about relaxed accuracy require...
Convenience struct for specifying and reasoning about fast-math flags.
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutNoUnsignedSignedWrap() const
static GEPNoWrapFlags all()
static GEPNoWrapFlags noUnsignedWrap()
bool hasNoUnsignedWrap() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
GEPNoWrapFlags withoutNoUnsignedWrap() const
static GEPNoWrapFlags none()
GEPNoWrapFlags getNoWrapFlags() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Value * CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2, const Twine &Name="")
LLVM_ABI Value * CreateSelectFMF(Value *C, Value *True, Value *False, FMFSource FMFSource, const Twine &Name="", Instruction *MDFrom=nullptr)
LLVM_ABI Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
ConstantInt * getTrue()
Get the constant value for i1 true.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateFreeze(Value *V, const Twine &Name="")
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
void CollectMetadataToCopy(Instruction *Src, ArrayRef< unsigned > MetadataKinds)
Collect metadata with IDs MetadataKinds from Src which should be added to all created instructions.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI InstCombinePass(InstCombineOptions Opts={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * foldBinopWithRecurrence(BinaryOperator &BO)
Try to fold binary operators whose operands are simple interleaved recurrences to a single recurrence...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, Instruction *CxtI, unsigned Depth=0)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
const DataLayout & getDataLayout() const
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
ReversePostOrderTraversal< BasicBlock * > & RPOT
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
void visit(Iterator Start, Iterator End)
The legacy pass manager's instcombine pass.
InstructionCombiningPass()
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
Instruction * removeOne()
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void add(Instruction *I)
Add instruction to the worklist.
void push(Instruction *I)
Push the instruction onto the worklist stack.
Instruction * popDeferred()
void zap()
Check that the worklist is empty and nuke the backing store for the map.
void reserve(size_t Size)
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
Tracking metadata reference owned by Metadata.
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
This class represents min/max intrinsics.
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
MDNode * getScopeList() const
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
This instruction constructs a fixed permutation of two input vectors.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI const fltSemantics & getFltSemantics() const
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This class represents a cast unsigned integer to floating point.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
const Use & getOperandUse(unsigned i) const
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
bool hasUseList() const
Check if this Value has a use-list.
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Value handle that is nullable, but tries to track the Value.
constexpr ScalarTy getFixedValue() const
constexpr bool isZero() const
An efficient, type-erasing, non-owning reference to a callable.
Type * getIndexedType() const
const ParentTy * getParent() const
reverse_self_iterator getReverseIterator()
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
void stable_sort(R &&Range)
LLVM_ABI void initializeInstructionCombiningPassPass(PassRegistry &)
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool succ_empty(const Instruction *I)
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI FunctionPass * createInstructionCombiningPass()
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
auto successors(const MachineBasicBlock *BB)
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
bool isModSet(const ModRefInfo MRI)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
@ Ref
The access may reference the value stored in memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ Mod
The access may modify the value stored in memory.
@ NoModRef
The access neither references nor modifies the value stored in memory.
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
bool isRefSet(const ModRefInfo MRI)
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr roundingMode rmNearestTiesToEven
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
unsigned getBitWidth() const
Get the bit width of this value.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
A CRTP mix-in to automatically provide informational APIs needed for passes.
SimplifyQuery getWithInstruction(const Instruction *I) const
SimplifyQuery getWithoutUndef() const