38#define DEBUG_TYPE "instcombine"
52 if (!V->hasOneUse())
return nullptr;
54 bool MadeChange =
false;
58 Value *
A =
nullptr, *
B =
nullptr, *One =
nullptr;
68 if (
I &&
I->isLogicalShift() &&
77 if (
I->getOpcode() == Instruction::LShr && !
I->isExact()) {
82 if (
I->getOpcode() == Instruction::Shl && !
I->hasNoUnsignedWrap()) {
83 I->setHasNoUnsignedWrap();
92 return MadeChange ? V :
nullptr;
108 bool HasAnyNoWrap =
I.hasNoSignedWrap() ||
I.hasNoUnsignedWrap();
109 Value *Neg = Builder.CreateNeg(OtherOp,
"", HasAnyNoWrap);
110 return Builder.CreateSelect(
Cond, OtherOp, Neg);
116 bool HasAnyNoWrap =
I.hasNoSignedWrap() ||
I.hasNoUnsignedWrap();
117 Value *Neg = Builder.CreateNeg(OtherOp,
"", HasAnyNoWrap);
118 return Builder.CreateSelect(
Cond, Neg, OtherOp);
126 return Builder.CreateSelectFMF(
Cond, OtherOp,
127 Builder.CreateFNegFMF(OtherOp, &
I), &
I);
134 return Builder.CreateSelectFMF(
Cond, Builder.CreateFNegFMF(OtherOp, &
I),
148 const bool HasNSW =
Mul.hasNoSignedWrap();
149 const bool HasNUW =
Mul.hasNoUnsignedWrap();
155 return Builder.CreateShl(
X, Z,
Mul.getName(), HasNUW, PropagateNSW);
168 FrX = Builder.CreateFreeze(
X,
X->getName() +
".fr");
169 Value *Shl = Builder.CreateShl(FrX, Z,
"mulshl", HasNUW, PropagateNSW);
170 return Builder.CreateAdd(Shl, FrX,
Mul.getName(), HasNUW, PropagateNSW);
181 FrX = Builder.CreateFreeze(
X,
X->getName() +
".fr");
182 Value *Shl = Builder.CreateShl(FrX, Z,
"mulshl");
183 return Builder.CreateSub(Shl, FrX,
Mul.getName());
190 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
193 SQ.getWithInstruction(&
I)))
208 Type *Ty =
I.getType();
209 const unsigned BitWidth = Ty->getScalarSizeInBits();
210 const bool HasNSW =
I.hasNoSignedWrap();
211 const bool HasNUW =
I.hasNoUnsignedWrap();
230 assert(Shl &&
"Constant folding of immediate constants failed");
233 if (HasNUW &&
Mul->hasNoUnsignedWrap())
249 if (
match(NewCst,
m_APInt(V)) && *V != V->getBitWidth() - 1)
266 (*MulAP - 1).isPowerOf2() && *ShiftC == MulAP->
logBase2()) {
272 BinOp =
Builder.CreateLShr(NewOp, ConstantInt::get(Ty, *ShiftC),
"",
275 auto *NewAdd = BinaryOperator::CreateAdd(NewOp, BinOp);
276 if (HasNSW && (HasNUW || OpBO->
getOpcode() == Instruction::LShr ||
278 NewAdd->setHasNoSignedWrap(
true);
280 NewAdd->setHasNoUnsignedWrap(HasNUW);
294 HasNSW && Op1C->isNotMinSignedValue()));
303 const APInt *NegPow2C;
307 unsigned SrcWidth =
X->getType()->getScalarSizeInBits();
309 if (ShiftAmt >=
BitWidth - SrcWidth) {
312 return BinaryOperator::CreateShl(Z, ConstantInt::get(Ty, ShiftAmt));
335 (BOp0->getOpcode() == Instruction::Or || BOp0->hasNoUnsignedWrap());
337 auto *BO = BinaryOperator::CreateAdd(NewMul, NewC);
338 if (HasNUW && Op0NUW) {
341 NewMulBO->setHasNoUnsignedWrap();
342 BO->setHasNoUnsignedWrap();
351 return BinaryOperator::CreateMul(
X,
X);
356 if (
I.hasNoSignedWrap() &&
361 I,
Builder.CreateBinaryIntrinsic(Intrinsic::abs,
374 auto *NewMul = BinaryOperator::CreateMul(
X,
Y);
377 NewMul->setHasNoSignedWrap();
390 return BinaryOperator::CreateMul(NegOp0,
X);
398 auto UDivCheck = [&C1](
const APInt &
C) {
return C.urem(*C1).isZero(); };
399 auto SDivCheck = [&C1](
const APInt &
C) {
420 if (!Div || (Div->
getOpcode() != Instruction::UDiv &&
421 Div->
getOpcode() != Instruction::SDiv)) {
425 Value *Neg = dyn_castNegVal(
Y);
428 (Div->
getOpcode() == Instruction::UDiv ||
429 Div->
getOpcode() == Instruction::SDiv)) {
439 auto RemOpc = Div->
getOpcode() == Instruction::UDiv ? Instruction::URem
444 XFreeze =
Builder.CreateFreeze(
X,
X->getName() +
".fr");
445 Value *Rem =
Builder.CreateBinOp(RemOpc, XFreeze, DivOp1);
447 return BinaryOperator::CreateSub(XFreeze, Rem);
448 return BinaryOperator::CreateSub(Rem, XFreeze);
457 if (Ty->isIntOrIntVectorTy(1) ||
460 return BinaryOperator::CreateAnd(Op0, Op1);
472 X->getType()->isIntOrIntVectorTy(1) &&
X->getType() ==
Y->getType() &&
473 (Op0->
hasOneUse() || Op1->hasOneUse() ||
X ==
Y)) {
482 X->getType()->isIntOrIntVectorTy(1) &&
X->getType() ==
Y->getType() &&
483 (Op0->
hasOneUse() || Op1->hasOneUse())) {
498 X->getType()->isIntOrIntVectorTy(1))
513 *
C ==
C->getBitWidth() - 1) {
525 *
C ==
C->getBitWidth() - 1) {
573 if (!HasNSW && willNotOverflowSignedMul(Op0, Op1,
I)) {
575 I.setHasNoSignedWrap(
true);
578 if (!HasNUW && willNotOverflowUnsignedMul(Op0, Op1,
I,
I.hasNoSignedWrap())) {
580 I.setHasNoUnsignedWrap(
true);
588 assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) &&
589 "Expected fmul or fdiv");
591 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
607 (Op0->
hasOneUse() || Op1->hasOneUse())) {
608 Value *XY = Builder.CreateBinOpFMF(Opcode,
X,
Y, &
I);
610 Builder.CreateUnaryIntrinsic(Intrinsic::fabs, XY, &
I,
I.getName());
623 Intrinsic::powi, {
X->getType(), YZ->
getType()}, {
X, YZ}, &
I);
629 unsigned Opcode =
I.getOpcode();
630 assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) &&
631 "Unexpected opcode");
638 Constant *One = ConstantInt::get(
Y->getType(), 1);
639 if (willNotOverflowSignedAdd(
Y, One,
I)) {
646 Value *Op0 =
I.getOperand(0);
647 Value *Op1 =
I.getOperand(1);
648 if (Opcode == Instruction::FMul &&
I.isOnlyUserOfAnyOperand() &&
653 Y->getType() == Z->getType()) {
658 if (Opcode == Instruction::FDiv &&
I.hasAllowReassoc() &&
I.hasNoNaNs()) {
665 willNotOverflowSignedSub(
Y, ConstantInt::get(
Y->getType(), 1),
I)) {
667 Instruction *NewPow = createPowiExpr(
I, *
this, Op1,
Y, NegOne);
678 willNotOverflowSignedSub(
Y, ConstantInt::get(
Y->getType(), 1),
I)) {
680 auto *NewPow = createPowiExpr(
I, *
this,
X,
Y, NegOne);
712 return !R1.
empty() && !
R2.empty();
746 if (!
X->hasAllowReassoc() || !
X->hasAllowReciprocal() || !
X->hasNoInfs())
753 if (BBx != BBr1 && BBx != BBr2)
762 return (
I->getParent() != BBr1 || !
I->hasAllowReassoc());
772 return (
I->getParent() == BBr2 &&
I->hasAllowReassoc());
777 Value *Op0 =
I.getOperand(0);
778 Value *Op1 =
I.getOperand(1);
842 auto *NewFMul =
Builder.CreateFMulFMF(
X, Z, FMF);
853 Value *Sqrt =
Builder.CreateUnaryIntrinsic(Intrinsic::sqrt, XY, &
I);
863 if (
I.hasNoSignedZeros() &&
867 if (
I.hasNoSignedZeros() &&
874 if (
I.hasNoNaNs() &&
I.hasNoSignedZeros() && Op0 == Op1 && Op0->
hasNUses(2)) {
893 Value *Y1 =
Builder.CreateFAddFMF(
Y, ConstantFP::get(
I.getType(), 1.0), &
I);
894 Value *Pow =
Builder.CreateBinaryIntrinsic(Intrinsic::pow,
X, Y1, &
I);
901 if (
I.isOnlyUserOfAnyOperand()) {
905 auto *YZ =
Builder.CreateFAddFMF(
Y, Z, &
I);
906 auto *NewPow =
Builder.CreateBinaryIntrinsic(Intrinsic::pow,
X, YZ, &
I);
912 auto *XZ =
Builder.CreateFMulFMF(
X, Z, &
I);
913 auto *NewPow =
Builder.CreateBinaryIntrinsic(Intrinsic::pow, XZ,
Y, &
I);
921 Value *Exp =
Builder.CreateUnaryIntrinsic(Intrinsic::exp, XY, &
I);
929 Value *Exp2 =
Builder.CreateUnaryIntrinsic(Intrinsic::exp2, XY, &
I);
955 I.getFastMathFlags(),
956 SQ.getWithInstruction(&
I)))
981 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
993 Op0 =
Builder.CreateFNegFMF(Op0, &
I);
995 {
I.getType()}, {Op1, Op0}, &
I);
1006 if (
I.hasNoNaNs() &&
I.hasNoSignedZeros()) {
1011 X->getType()->isIntOrIntVectorTy(1)) {
1013 SI->copyFastMathFlags(
I.getFastMathFlags());
1017 X->getType()->isIntOrIntVectorTy(1)) {
1019 SI->copyFastMathFlags(
I.getFastMathFlags());
1028 if (
I.hasAllowReassoc())
1056 Value *Start =
nullptr, *Step =
nullptr;
1070 if (!Result->hasNoNaNs())
1071 Result->setHasNoInfs(
false);
1076 if (
I.hasAllowContract() &&
1080 auto *Sin =
Builder.CreateUnaryIntrinsic(Intrinsic::sin,
X, &
I);
1081 if (
auto *
Metadata =
I.getMetadata(LLVMContext::MD_fpmath)) {
1082 Sin->setMetadata(LLVMContext::MD_fpmath,
Metadata);
1119 Value *SelectCond =
SI->getCondition();
1126 while (BBI != BBFront) {
1134 for (
Use &
Op : BBI->operands()) {
1138 }
else if (
Op == SelectCond) {
1148 if (&*BBI == SelectCond)
1149 SelectCond =
nullptr;
1152 if (!SelectCond && !
SI)
1163 Product = IsSigned ? C1.
smul_ov(C2, Overflow) : C1.
umul_ov(C2, Overflow);
1190 assert((
I.getOpcode() == Instruction::SDiv ||
1191 I.getOpcode() == Instruction::UDiv) &&
1192 "Expected integer divide");
1194 bool IsSigned =
I.getOpcode() == Instruction::SDiv;
1195 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1196 Type *Ty =
I.getType();
1207 bool HasNUW =
Mul->hasNoUnsignedWrap() && Shl->hasNoUnsignedWrap();
1208 bool HasNSW =
Mul->hasNoSignedWrap() && Shl->hasNoSignedWrap();
1211 if (!IsSigned && HasNUW)
1212 return Builder.CreateLShr(
Y, Z,
"",
I.isExact());
1215 if (IsSigned && HasNSW && (Op0->
hasOneUse() || Op1->hasOneUse())) {
1216 Value *Shl = Builder.CreateShl(ConstantInt::get(Ty, 1), Z);
1217 return Builder.CreateSDiv(
Y, Shl,
"",
I.isExact());
1232 ((Shl0->hasNoUnsignedWrap() && Shl1->hasNoUnsignedWrap()) ||
1233 (Shl0->hasNoUnsignedWrap() && Shl0->hasNoSignedWrap() &&
1234 Shl1->hasNoSignedWrap())))
1235 return Builder.CreateUDiv(
X,
Y,
"",
I.isExact());
1239 if (IsSigned && Shl0->hasNoSignedWrap() && Shl1->hasNoSignedWrap() &&
1240 Shl1->hasNoUnsignedWrap())
1241 return Builder.CreateSDiv(
X,
Y,
"",
I.isExact());
1251 if (IsSigned ? (Shl0->hasNoSignedWrap() && Shl1->hasNoSignedWrap())
1252 : (Shl0->hasNoUnsignedWrap() && Shl1->hasNoUnsignedWrap())) {
1253 Constant *One = ConstantInt::get(
X->getType(), 1);
1256 Value *Dividend = Builder.CreateShl(
1257 One,
Y,
"shl.dividend",
1260 IsSigned ? (Shl0->hasNoUnsignedWrap() || Shl1->hasNoUnsignedWrap())
1261 : Shl0->hasNoSignedWrap());
1262 return Builder.CreateLShr(Dividend, Z,
"",
I.isExact());
1271 assert(
I.isIntDivRem() &&
"Unexpected instruction");
1272 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1277 Type *Ty =
I.getType();
1280 unsigned NumElts = VTy->getNumElements();
1281 for (
unsigned i = 0; i != NumElts; ++i) {
1321 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1322 bool IsSigned =
I.getOpcode() == Instruction::SDiv;
1323 Type *Ty =
I.getType();
1336 ConstantInt::get(Ty, Product));
1344 if (
isMultiple(*C2, *C1, Quotient, IsSigned)) {
1346 ConstantInt::get(Ty, Quotient));
1347 NewDiv->setIsExact(
I.isExact());
1352 if (
isMultiple(*C1, *C2, Quotient, IsSigned)) {
1354 ConstantInt::get(Ty, Quotient));
1356 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
1357 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
1370 if (
isMultiple(*C2, C1Shifted, Quotient, IsSigned)) {
1372 ConstantInt::get(Ty, Quotient));
1373 BO->setIsExact(
I.isExact());
1378 if (
isMultiple(C1Shifted, *C2, Quotient, IsSigned)) {
1380 ConstantInt::get(Ty, Quotient));
1382 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
1383 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
1396 return BinaryOperator::CreateNSWAdd(
X, ConstantInt::get(Ty, Quotient));
1401 return BinaryOperator::CreateNUWAdd(
X,
1402 ConstantInt::get(Ty, C1->
udiv(*C2)));
1411 assert(!Ty->isIntOrIntVectorTy(1) &&
"i1 divide not removed?");
1418 F1 =
Builder.CreateFreeze(Op1, Op1->getName() +
".fr");
1420 Value *Cmp =
Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3));
1443 return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1),
Y);
1445 return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1),
Y);
1451 if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) {
1460 if (!IsSigned && Op1->hasOneUse() &&
1465 Builder.CreateShl(ConstantInt::get(Ty, 1), Z,
"",
true),
Y);
1481 if (!IsSigned &&
Mul->hasNoUnsignedWrap())
1482 NewDiv = BinaryOperator::CreateUDiv(
X,
Y);
1483 else if (IsSigned &&
Mul->hasNoSignedWrap())
1484 NewDiv = BinaryOperator::CreateSDiv(
X,
Y);
1488 NewDiv->
setIsExact(
I.isExact() && InnerDiv->isExact());
1502 const APInt *C1, *C2;
1503 if (IsSigned && OB0HasNSW) {
1505 return BinaryOperator::CreateSDiv(
A,
B);
1507 if (!IsSigned && OB0HasNUW) {
1509 return BinaryOperator::CreateUDiv(
A,
B);
1511 return BinaryOperator::CreateUDiv(
A,
B);
1517 if (
auto *Val = CreateDivOrNull(
Y, Z))
1521 if (
auto *Val = CreateDivOrNull(
X, Z))
1532 return reinterpret_cast<Value *
>(-1);
1540 return IfFold([&]() {
1556 return IfFold([&]() {
return Builder.CreateZExt(LogX,
Op->getType()); });
1562 if (AssumeNonZero || TI->hasNoUnsignedWrap())
1564 return IfFold([&]() {
1565 return Builder.CreateTrunc(LogX,
Op->getType(),
"",
1566 TI->hasNoUnsignedWrap());
1575 if (AssumeNonZero || BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap())
1577 return IfFold([&]() {
return Builder.CreateAdd(LogX,
Y); });
1584 if (AssumeNonZero || PEO->isExact())
1586 return IfFold([&]() {
return Builder.CreateSub(LogX,
Y); });
1593 return IfFold([&]() {
return LogX; });
1595 return IfFold([&]() {
return LogY; });
1604 return IfFold([&]() {
1605 return Builder.CreateSelect(
SI->getOperand(0), LogX, LogY);
1618 return IfFold([&]() {
1619 return Builder.CreateBinaryIntrinsic(
MinMax->getIntrinsicID(), LogX,
1634 Type *Ty =
I.getType();
1637 X->getType() ==
Y->getType() && (
N->hasOneUse() ||
D->hasOneUse())) {
1674 SQ.getWithInstruction(&
I)))
1684 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1686 const APInt *C1, *C2;
1694 X, ConstantInt::get(
X->getType(), C2ShlC1));
1703 Type *Ty =
I.getType();
1729 auto GetShiftableDenom = [&](
Value *Denom) ->
Value * {
1739 return Builder.CreateBinaryIntrinsic(Intrinsic::cttz, Denom,
1745 if (
auto *Res = GetShiftableDenom(Op1))
1747 I,
Builder.CreateLShr(Op0, Res,
I.getName(),
I.isExact()));
1754 SQ.getWithInstruction(&
I)))
1764 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1765 Type *Ty =
I.getType();
1781 return BinaryOperator::CreateExactAShr(Op0,
C);
1787 return BinaryOperator::CreateExactAShr(Op0, ShAmt);
1793 Value *Ashr =
Builder.CreateAShr(Op0,
C,
I.getName() +
".neg",
true);
1814 Value *NarrowOp =
Builder.CreateSDiv(Op0Src, NarrowDivisor);
1822 Constant *NegC = ConstantInt::get(Ty, -(*Op1C));
1833 Builder.CreateSDiv(
X,
Y,
I.getName(),
I.isExact()));
1856 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1,
I.getName());
1857 BO->setIsExact(
I.isExact());
1866 Value *Shr =
Builder.CreateLShr(Op0, CNegLog2,
I.getName(),
I.isExact());
1875 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1,
I.getName());
1876 BO->setIsExact(
I.isExact());
1906 if (
I.hasNoNaNs() &&
1911 Intrinsic::copysign, {
C->getType()},
1920 if (!(
C->hasExactInverseFP() || (
I.hasAllowReciprocal() &&
C->isNormalFP())))
1928 Instruction::FDiv, ConstantFP::get(
I.getType(), 1.0),
C,
DL);
1929 if (!RecipC || !RecipC->isNormalFP())
1949 if (!
I.hasAllowReassoc() || !
I.hasAllowReciprocal())
1974 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1976 if (!
II || !
II->hasOneUse() || !
I.hasAllowReassoc() ||
1977 !
I.hasAllowReciprocal())
1987 case Intrinsic::pow:
1988 Args.push_back(
II->getArgOperand(0));
1989 Args.push_back(Builder.CreateFNegFMF(
II->getArgOperand(1), &
I));
1991 case Intrinsic::powi: {
1999 Args.push_back(
II->getArgOperand(0));
2000 Args.push_back(Builder.CreateNeg(
II->getArgOperand(1)));
2001 Type *Tys[] = {
I.getType(),
II->getArgOperand(1)->getType()};
2002 Value *Pow = Builder.CreateIntrinsic(IID, Tys, Args, &
I);
2005 case Intrinsic::exp:
2006 case Intrinsic::exp2:
2007 Args.push_back(Builder.CreateFNegFMF(
II->getArgOperand(0), &
I));
2012 Value *Pow = Builder.CreateIntrinsic(IID,
I.getType(), Args, &
I);
2021 if (!
I.hasAllowReassoc() || !
I.hasAllowReciprocal())
2023 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2025 if (!
II ||
II->getIntrinsicID() != Intrinsic::sqrt || !
II->hasOneUse() ||
2026 !
II->hasAllowReassoc() || !
II->hasAllowReciprocal())
2035 if (!DivOp->hasAllowReassoc() || !
I.hasAllowReciprocal() ||
2036 !DivOp->hasOneUse())
2038 Value *SwapDiv = Builder.CreateFDivFMF(Z,
Y, DivOp);
2040 Builder.CreateUnaryIntrinsic(
II->getIntrinsicID(), SwapDiv,
II);
2063 B.SetInsertPoint(
X);
2069 B.CreateFDiv(ConstantFP::get(
X->getType(), 1.0), SqrtOp));
2070 auto *R1FPMathMDNode = (*R1.
begin())->getMetadata(LLVMContext::MD_fpmath);
2074 R1FPMathMDNode,
I->getMetadata(LLVMContext::MD_fpmath));
2075 R1FMF &=
I->getFastMathFlags();
2079 FDiv->setMetadata(LLVMContext::MD_fpmath, R1FPMathMDNode);
2080 FDiv->copyFastMathFlags(R1FMF);
2087 auto *R2FPMathMDNode = (*
R2.begin())->getMetadata(LLVMContext::MD_fpmath);
2091 R2FPMathMDNode,
I->getMetadata(LLVMContext::MD_fpmath));
2092 R2FMF &=
I->getFastMathFlags();
2096 FSqrt->setMetadata(LLVMContext::MD_fpmath, R2FPMathMDNode);
2097 FSqrt->copyFastMathFlags(R2FMF);
2106 FMul->copyMetadata(*
X);
2116 I.getFastMathFlags(),
2117 SQ.getWithInstruction(&
I)))
2135 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2164 if (
I.hasAllowReassoc() &&
I.hasAllowReciprocal()) {
2188 if (
I.hasAllowReassoc() && Op0->
hasOneUse() && Op1->hasOneUse()) {
2198 if ((IsTan || IsCot) &&
hasFloatFn(M, &
TLI,
I.getType(), LibFunc_tan,
2199 LibFunc_tanf, LibFunc_tanl)) {
2202 B.setFastMathFlags(
I.getFastMathFlags());
2203 AttributeList Attrs =
2206 LibFunc_tanl,
B, Attrs);
2208 Res =
B.CreateFDiv(ConstantFP::get(
I.getType(), 1.0), Res);
2217 if (
I.hasNoNaNs() &&
I.hasAllowReassoc() &&
2226 if (
I.hasNoNaNs() &&
I.hasNoInfs() &&
2230 Intrinsic::copysign, ConstantFP::get(
I.getType(), 1.0),
X, &
I);
2241 if (
I.hasAllowReassoc() &&
2245 Builder.CreateFAddFMF(
Y, ConstantFP::get(
I.getType(), -1.0), &
I);
2246 Value *Pow =
Builder.CreateBinaryIntrinsic(Intrinsic::pow, Op1, Y1, &
I);
2264 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
X =
nullptr;
2266 bool ShiftByX =
false;
2270 bool &PreserveNSW) ->
bool {
2271 const APInt *Tmp =
nullptr;
2290 const APInt *Tmp =
nullptr;
2302 bool Op0PreserveNSW =
true, Op1PreserveNSW =
true;
2303 if (MatchShiftOrMulXC(Op0,
X,
Y, Op0PreserveNSW) &&
2304 MatchShiftOrMulXC(Op1,
X, Z, Op1PreserveNSW)) {
2306 }
else if (MatchShiftCX(Op0,
Y,
X) && MatchShiftCX(Op1, Z,
X)) {
2312 bool IsSRem =
I.getOpcode() == Instruction::SRem;
2319 bool BO0NoWrap = IsSRem ? BO0HasNSW : BO0HasNUW;
2321 APInt RemYZ = IsSRem ?
Y.srem(Z) :
Y.urem(Z);
2325 if (RemYZ.
isZero() && BO0NoWrap)
2331 auto CreateMulOrShift =
2333 Value *RemSimplification =
2334 ConstantInt::get(
I.getType(), RemSimplificationC);
2335 return ShiftByX ? BinaryOperator::CreateShl(RemSimplification,
X)
2336 : BinaryOperator::CreateMul(
X, RemSimplification);
2342 bool BO1NoWrap = IsSRem ? BO1HasNSW : BO1HasNUW;
2346 if (RemYZ ==
Y && BO1NoWrap) {
2357 if (
Y.uge(Z) && (IsSRem ? (BO0HasNSW && BO1HasNSW) : BO0HasNUW)) {
2375 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2383 const APInt *Op1Int;
2385 (
I.getOpcode() == Instruction::URem ||
2409 SQ.getWithInstruction(&
I)))
2422 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2423 Type *Ty =
I.getType();
2429 return BinaryOperator::CreateAnd(Op0,
Add);
2434 Value *Cmp =
Builder.CreateICmpNE(Op1, ConstantInt::get(Ty, 1));
2455 Value *FrozenOp0 = Op0;
2457 FrozenOp0 =
Builder.CreateFreeze(Op0, Op0->
getName() +
".frozen");
2468 Value *FrozenOp0 = Op0;
2470 FrozenOp0 =
Builder.CreateFreeze(Op0, Op0->
getName() +
".frozen");
2481 SQ.getWithInstruction(&
I)))
2491 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2509 return BinaryOperator::CreateURem(Op0, Op1,
I.getName());
2517 bool hasNegative =
false;
2518 bool hasMissing =
false;
2519 for (
unsigned i = 0; i != VWidth; ++i) {
2520 Constant *Elt =
C->getAggregateElement(i);
2527 if (RHS->isNegative())
2531 if (hasNegative && !hasMissing) {
2533 for (
unsigned i = 0; i != VWidth; ++i) {
2534 Elts[i] =
C->getAggregateElement(i);
2536 if (RHS->isNegative())
2552 I.getFastMathFlags(),
2553 SQ.getWithInstruction(&
I)))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file provides internal interfaces used to implement the InstCombine.
static Instruction * convertFSqrtDivIntoFMul(CallInst *CI, Instruction *X, const SmallPtrSetImpl< Instruction * > &R1, const SmallPtrSetImpl< Instruction * > &R2, InstCombiner::BuilderTy &B, InstCombinerImpl *IC)
static Instruction * simplifyIRemMulShl(BinaryOperator &I, InstCombinerImpl &IC)
static Instruction * narrowUDivURem(BinaryOperator &I, InstCombinerImpl &IC)
If we have zero-extended operands of an unsigned div or rem, we may be able to narrow the operation (...
static Value * simplifyValueKnownNonZero(Value *V, InstCombinerImpl &IC, Instruction &CxtI)
The specific integer value is used in a context where it is known to be non-zero.
static bool getFSqrtDivOptPattern(Instruction *Div, SmallPtrSetImpl< Instruction * > &R1, SmallPtrSetImpl< Instruction * > &R2)
static Value * foldMulSelectToNegate(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static bool isFSqrtDivToFMulLegal(Instruction *X, SmallPtrSetImpl< Instruction * > &R1, SmallPtrSetImpl< Instruction * > &R2)
static Instruction * foldFDivPowDivisor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Negate the exponent of pow/exp to fold division-by-pow() into multiply.
static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product, bool IsSigned)
True if the multiply can not be expressed in an int this size.
static Value * foldMulShl1(BinaryOperator &Mul, bool CommuteOperands, InstCombiner::BuilderTy &Builder)
Reduce integer multiplication patterns that contain a (+/-1 << Z) factor.
static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient, bool IsSigned)
True if C1 is a multiple of C2. Quotient contains C1/C2.
static Instruction * foldFDivSqrtDivisor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Convert div to mul if we have an sqrt divisor iff sqrt's operand is a fdiv instruction.
static Instruction * foldFDivConstantDividend(BinaryOperator &I)
Remove negation and try to reassociate constant math.
static Value * foldIDivShl(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
This file provides the interface for the instcombine pass implementation.
static bool hasNoSignedWrap(BinaryOperator &I)
static bool hasNoUnsignedWrap(BinaryOperator &I)
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
bool isMinValue() const
Determine if this is the smallest unsigned value.
unsigned countr_zero() const
Count the number of trailing zero bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt ushl_ov(const APInt &Amt, bool &Overflow) const
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
unsigned logBase2() const
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
static BinaryOperator * CreateExact(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Value *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
@ ICMP_ULT
unsigned less than
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getExactLogBase2(Constant *C)
If C is a scalar/fixed width vector of known powers of 2, then this function returns a new scalar/fix...
static LLVM_ABI Constant * getInfinity(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isNormalFP() const
Return true if this is a normal (as opposed to denormal, infinity, nan, or zero) floating-point scala...
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNotMinSignedValue() const
Return true if the value is not the smallest signed value, or, for vectors, does not contain smallest...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
A parsed version of the target data layout string in and methods for querying it.
Convenience struct for specifying and reasoning about fast-math flags.
static FastMathFlags intersectRewrite(FastMathFlags LHS, FastMathFlags RHS)
Intersect rewrite-based flags.
static FastMathFlags unionValue(FastMathFlags LHS, FastMathFlags RHS)
Union value flags.
bool allowReassoc() const
Flag queries.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Instruction * visitMul(BinaryOperator &I)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
Instruction * visitUDiv(BinaryOperator &I)
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * visitURem(BinaryOperator &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
InstCombinerImpl(InstructionWorklist &Worklist, BuilderTy &Builder, bool MinimizeSize, AAResults *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const DataLayout &DL, ReversePostOrderTraversal< BasicBlock * > &RPOT)
Value * takeLog2(Value *Op, unsigned Depth, bool AssumeNonZero, bool DoFold)
Take the exact integer log2 of the value.
Instruction * visitSRem(BinaryOperator &I)
Instruction * visitFDiv(BinaryOperator &I)
bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I)
Fold a divide or remainder with a select instruction divisor when one of the select operands is zero.
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * commonIDivRemTransforms(BinaryOperator &I)
Common integer divide/remainder transforms.
Value * tryGetLog2(Value *Op, bool AssumeNonZero)
Instruction * commonIDivTransforms(BinaryOperator &I)
This function implements the transforms common to both integer division instructions (udiv and sdiv).
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Instruction * visitFRem(BinaryOperator &I)
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
Instruction * visitFMul(BinaryOperator &I)
Instruction * foldFMulReassoc(BinaryOperator &I)
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
Instruction * foldPowiReassoc(BinaryOperator &I)
Instruction * visitSDiv(BinaryOperator &I)
Instruction * commonIRemTransforms(BinaryOperator &I)
This function implements the transforms common to both integer remainder instructions (urem and srem)...
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const Instruction *CxtI=nullptr, unsigned Depth=0) const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI bool hasNoInfs() const LLVM_READONLY
Determine whether the no-infs flag is set.
LLVM_ABI bool hasNoSignedZeros() const LLVM_READONLY
Determine whether the no-signed-zeros flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
A wrapper class for inspecting calls to intrinsic functions.
static LLVM_ABI MDNode * getMostGenericFPMath(MDNode *A, MDNode *B)
A Module instance is used to store all the information related to an LLVM module.
static Value * Negate(bool LHSIsZero, bool IsNSW, Value *Root, InstCombinerImpl &IC)
Attempt to negate Root.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
This class represents zero extension of integer types.
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
AllowReassoc_match< T > m_AllowReassoc(const T &SubPattern)
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
BinaryOp_match< LHS, RHS, Instruction::FMul > m_FMul(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
match_combine_or< typename m_Intrinsic_Ty< T0, T1 >::Ty, typename m_Intrinsic_Ty< T1, T0 >::Ty > m_c_Intrinsic(const T0 &Op0, const T1 &Op1)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
apfloat_match m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FDiv > m_FDiv(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoSignedWrap > m_NSWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI Value * emitUnaryFloatFnCall(Value *Op, const TargetLibraryInfo *TLI, StringRef Name, IRBuilderBase &B, const AttributeList &Attrs)
Emit a call to the unary function named 'Name' (e.g.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI Value * simplifySDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for an SDiv, fold the result or return null.
LLVM_ABI Value * simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Mul, fold the result or return null.
LLVM_ABI bool hasFloatFn(const Module *M, const TargetLibraryInfo *TLI, Type *Ty, LibFunc DoubleFn, LibFunc FloatFn, LibFunc LongDoubleFn)
Check whether the overloaded floating point function corresponding to Ty is available.
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI Constant * getLosslessUnsignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
LLVM_ABI Value * simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FRem, fold the result or return null.
LLVM_ABI Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI Value * simplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FDiv, fold the result or return null.
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
LLVM_ABI Value * simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for a UDiv, fold the result or return null.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI Value * simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an SRem, fold the result or return null.
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI Value * simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a URem, fold the result or return null.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.