46#include "llvm/IR/IntrinsicsAArch64.h"
47#include "llvm/IR/IntrinsicsAMDGPU.h"
48#include "llvm/IR/IntrinsicsARM.h"
49#include "llvm/IR/IntrinsicsHexagon.h"
79#define DEBUG_TYPE "instcombine"
83using namespace PatternMatch;
85STATISTIC(NumSimplified,
"Number of library calls simplified");
88 "instcombine-guard-widening-window",
90 cl::desc(
"How wide an instruction window to bypass looking for "
97 if (ITy->getBitWidth() < 32)
107 auto *Src =
MI->getRawSource();
108 while (isa<GetElementPtrInst>(Src)) {
109 if (!Src->hasOneUse())
111 Src = cast<Instruction>(Src)->getOperand(0);
113 return isa<AllocaInst>(Src) && Src->hasOneUse();
119 if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
120 MI->setDestAlignment(DstAlign);
126 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
127 MI->setSourceAlignment(SrcAlign);
150 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(
MI->getLength());
151 if (!MemOpLength)
return nullptr;
158 assert(
Size &&
"0-sized memory transferring should be removed already.");
168 if (*CopyDstAlign <
Size || *CopySrcAlign <
Size)
178 Value *Src =
MI->getArgOperand(1);
179 Value *Dest =
MI->getArgOperand(0);
182 L->setAlignment(*CopySrcAlign);
183 L->setAAMetadata(AACopyMD);
184 MDNode *LoopMemParallelMD =
185 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
186 if (LoopMemParallelMD)
187 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
188 MDNode *AccessGroupMD =
MI->getMetadata(LLVMContext::MD_access_group);
190 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
196 if (LoopMemParallelMD)
197 S->
setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
199 S->
setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
202 if (
auto *MT = dyn_cast<MemTransferInst>(
MI)) {
204 L->setVolatile(MT->isVolatile());
207 if (
MI->isAtomic()) {
219 const Align KnownAlignment =
222 if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
223 MI->setDestAlignment(KnownAlignment);
239 if (isa<UndefValue>(
MI->getValue())) {
251 assert(Len &&
"0-sized memory setting should be removed already.");
252 const Align Alignment =
MI->getDestAlign().valueOrOne();
258 if (
MI->isAtomic() && Alignment < Len)
266 Constant *FillVal = ConstantInt::get(
272 DbgAssign->replaceVariableLocationOp(FillC, FillVal);
290 Value *LoadPtr =
II.getArgOperand(0);
291 const Align Alignment =
292 cast<ConstantInt>(
II.getArgOperand(1))->getAlignValue();
306 II.getDataLayout(), &
II, &
AC)) {
320 auto *ConstMask = dyn_cast<Constant>(
II.getArgOperand(3));
325 if (ConstMask->isNullValue())
329 if (ConstMask->isAllOnesValue()) {
330 Value *StorePtr =
II.getArgOperand(1);
331 Align Alignment = cast<ConstantInt>(
II.getArgOperand(2))->getAlignValue();
333 new StoreInst(
II.getArgOperand(0), StorePtr,
false, Alignment);
338 if (isa<ScalableVectorType>(ConstMask->getType()))
358 auto *ConstMask = dyn_cast<Constant>(
II.getArgOperand(2));
365 if (ConstMask->isAllOnesValue())
367 auto *VecTy = cast<VectorType>(
II.getType());
368 const Align Alignment =
369 cast<ConstantInt>(
II.getArgOperand(1))->getAlignValue();
371 Alignment,
"load.scalar");
386 auto *ConstMask = dyn_cast<Constant>(
II.getArgOperand(3));
391 if (ConstMask->isNullValue())
400 cast<ConstantInt>(
II.getArgOperand(2))->getAlignValue();
409 if (ConstMask->isAllOnesValue()) {
410 Align Alignment = cast<ConstantInt>(
II.getArgOperand(2))->getAlignValue();
411 VectorType *WideLoadTy = cast<VectorType>(
II.getArgOperand(1)->getType());
418 new StoreInst(Extract, SplatPtr,
false, Alignment);
423 if (isa<ScalableVectorType>(ConstMask->getType()))
449 auto *Arg =
II.getArgOperand(0);
450 auto *StrippedArg = Arg->stripPointerCasts();
451 auto *StrippedInvariantGroupsArg = StrippedArg;
452 while (
auto *
Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) {
453 if (
Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&
454 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)
456 StrippedInvariantGroupsArg =
Intr->getArgOperand(0)->stripPointerCasts();
458 if (StrippedArg == StrippedInvariantGroupsArg)
461 Value *Result =
nullptr;
463 if (
II.getIntrinsicID() == Intrinsic::launder_invariant_group)
465 else if (
II.getIntrinsicID() == Intrinsic::strip_invariant_group)
469 "simplifyInvariantGroupIntrinsic only handles launder and strip");
470 if (Result->getType()->getPointerAddressSpace() !=
471 II.getType()->getPointerAddressSpace())
474 return cast<Instruction>(Result);
478 assert((
II.getIntrinsicID() == Intrinsic::cttz ||
479 II.getIntrinsicID() == Intrinsic::ctlz) &&
480 "Expected cttz or ctlz intrinsic");
481 bool IsTZ =
II.getIntrinsicID() == Intrinsic::cttz;
482 Value *Op0 =
II.getArgOperand(0);
483 Value *Op1 =
II.getArgOperand(1);
494 if (
II.getType()->isIntOrIntVectorTy(1)) {
507 II.dropUBImplyingAttrsAndMetadata();
554 return BinaryOperator::CreateAdd(ConstCttz,
X);
562 return BinaryOperator::CreateSub(ConstCttz,
X);
568 ConstantInt::get(
II.getType(),
II.getType()->getScalarSizeInBits());
569 return BinaryOperator::CreateSub(Width,
X);
577 return BinaryOperator::CreateAdd(ConstCtlz,
X);
585 return BinaryOperator::CreateSub(ConstCtlz,
X);
595 ConstantInt::get(R->getType(), R->getType()->getScalarSizeInBits() - 1),
614 if (PossibleZeros == DefiniteZeros) {
615 auto *
C = ConstantInt::get(Op0->
getType(), DefiniteZeros);
630 if (
BitWidth != 1 && !
II.hasRetAttr(Attribute::Range) &&
631 !
II.getMetadata(LLVMContext::MD_range)) {
642 assert(
II.getIntrinsicID() == Intrinsic::ctpop &&
643 "Expected ctpop intrinsic");
646 Value *Op0 =
II.getArgOperand(0);
692 if ((~Known.
Zero).isPowerOf2())
693 return BinaryOperator::CreateLShr(
694 Op0, ConstantInt::get(Ty, (~Known.
Zero).exactLogBase2()));
708 II.getRange().value_or(ConstantRange::getFull(
BitWidth));
720 if (
Range != OldRange) {
736 auto *
C = dyn_cast<Constant>(
II.getArgOperand(1));
740 auto *VecTy = cast<FixedVectorType>(
II.getType());
741 unsigned NumElts = VecTy->getNumElements();
744 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
749 for (
unsigned I = 0;
I < NumElts; ++
I) {
752 if (!COp || !isa<ConstantInt>(COp))
755 Indexes[
I] = cast<ConstantInt>(COp)->getLimitedValue();
758 if ((
unsigned)Indexes[
I] >= NumElts)
762 auto *V1 =
II.getArgOperand(0);
770 unsigned NumOperands) {
771 assert(
I.arg_size() >= NumOperands &&
"Not enough operands");
773 for (
unsigned i = 0; i < NumOperands; i++)
795 for (; BI != BE; ++BI) {
796 if (
auto *
I = dyn_cast<IntrinsicInst>(&*BI)) {
797 if (
I->isDebugOrPseudoInst() ||
820 return II.getIntrinsicID() == Intrinsic::vastart ||
821 (
II.getIntrinsicID() == Intrinsic::vacopy &&
822 I.getArgOperand(0) !=
II.getArgOperand(1));
828 assert(Call.arg_size() > 1 &&
"Need at least 2 args to swap");
829 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
830 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
831 Call.setArgOperand(0, Arg1);
832 Call.setArgOperand(1, Arg0);
851 Value *OperationResult =
nullptr;
874 if (
auto *Inst = dyn_cast<Instruction>(Result)) {
876 Inst->setHasNoSignedWrap();
878 Inst->setHasNoUnsignedWrap();
903 switch (
static_cast<unsigned>(Mask)) {
944 case ~fcZero & ~fcNan:
960 Value *Src0 =
II.getArgOperand(0);
961 Value *Src1 =
II.getArgOperand(1);
962 const ConstantInt *CMask = cast<ConstantInt>(Src1);
967 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan;
969 const bool IsStrict =
970 II.getFunction()->getAttributes().hasFnAttr(Attribute::StrictFP);
976 II.setArgOperand(1, ConstantInt::get(Src1->
getType(),
fneg(Mask)));
986 if ((OrderedMask ==
fcInf || OrderedInvertedMask ==
fcInf) &&
987 (IsOrdered || IsUnordered) && !IsStrict) {
995 if (OrderedInvertedMask ==
fcInf)
1005 (IsOrdered || IsUnordered) && !IsStrict) {
1019 if ((OrderedInvertedMask ==
fcPosInf || OrderedInvertedMask ==
fcNegInf) &&
1020 (IsOrdered || IsUnordered) && !IsStrict) {
1033 if (Mask ==
fcNan && !IsStrict) {
1065 if (!IsStrict && (IsOrdered || IsUnordered) &&
1110 return std::nullopt;
1122 return std::nullopt;
1134 return *Known0 == *Known1;
1142 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin ||
1143 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) &&
1144 "Expected a min or max intrinsic");
1147 Value *Op0 =
II->getArgOperand(0), *Op1 =
II->getArgOperand(1);
1149 const APInt *C0, *C1;
1155 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin;
1156 auto *
Add = cast<BinaryOperator>(Op0);
1157 if ((IsSigned && !
Add->hasNoSignedWrap()) ||
1158 (!IsSigned && !
Add->hasNoUnsignedWrap()))
1165 IsSigned ? C1->
ssub_ov(*C0, Overflow) : C1->
usub_ov(*C0, Overflow);
1166 assert(!Overflow &&
"Expected simplify of min/max");
1170 Constant *NewMinMaxC = ConstantInt::get(
II->getType(), CDiff);
1172 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax,
Add->getOperand(1))
1173 : BinaryOperator::CreateNUWAdd(NewMinMax,
Add->getOperand(1));
1184 const APInt *MinValue, *MaxValue;
1188 }
else if (
match(&MinMax1,
1197 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
1200 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
1214 if (
AddSub->getOpcode() == Instruction::Add)
1215 IntrinsicID = Intrinsic::sadd_sat;
1216 else if (
AddSub->getOpcode() == Instruction::Sub)
1217 IntrinsicID = Intrinsic::ssub_sat;
1240 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1242 const APInt *C0, *C1;
1247 switch (
II->getIntrinsicID()) {
1248 case Intrinsic::smax:
1252 case Intrinsic::smin:
1256 case Intrinsic::umax:
1260 case Intrinsic::umin:
1282 auto *
LHS = dyn_cast<MinMaxIntrinsic>(
II->getArgOperand(0));
1296 if (InnerMinMaxID != MinMaxID &&
1297 !(((MinMaxID == Intrinsic::umax && InnerMinMaxID == Intrinsic::smax) ||
1298 (MinMaxID == Intrinsic::smin && InnerMinMaxID == Intrinsic::umin)) &&
1306 {LHS->getArgOperand(0), NewC});
1326 auto *InnerMM = dyn_cast<IntrinsicInst>(Inner);
1327 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID ||
1333 MinMaxID,
II->getType());
1342 auto *
LHS = dyn_cast<IntrinsicInst>(
II->getArgOperand(0));
1343 auto *
RHS = dyn_cast<IntrinsicInst>(
II->getArgOperand(1));
1345 if (!
LHS || !
RHS ||
LHS->getIntrinsicID() != MinMaxID ||
1346 RHS->getIntrinsicID() != MinMaxID ||
1356 Value *MinMaxOp =
nullptr;
1357 Value *ThirdOp =
nullptr;
1361 if (
D ==
A ||
C ==
A) {
1366 }
else if (
D ==
B ||
C ==
B) {
1375 if (
D ==
A ||
D ==
B) {
1380 }
else if (
C ==
A ||
C ==
B) {
1388 if (!MinMaxOp || !ThirdOp)
1402 !
II->getCalledFunction()->isSpeculatable())
1409 return isa<Constant>(Arg.get()) ||
1410 isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(),
1411 Arg.getOperandNo(), nullptr);
1420 return isa<ShuffleVectorInst>(V) && V->hasOneUse();
1426 Type *SrcTy =
X->getType();
1427 for (
Use &Arg :
II->args()) {
1431 else if (
match(&Arg,
1433 X->getType() == SrcTy)
1452 Value *NewIntrinsic =
1466 return match(V, m_OneUse(m_VecReverse(m_Value())));
1473 for (
Use &Arg :
II->args()) {
1475 Arg.getOperandNo(),
nullptr))
1490 II->getType(),
II->getIntrinsicID(), NewArgs, FPI);
1497template <Intrinsic::ID IntrID>
1500 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse,
1501 "This helper only supports BSWAP and BITREVERSE intrinsics");
1507 isa<BinaryOperator>(V)) {
1508 Value *OldReorderX, *OldReorderY;
1537 case Intrinsic::smax:
1538 case Intrinsic::smin:
1539 case Intrinsic::umax:
1540 case Intrinsic::umin:
1541 case Intrinsic::maximum:
1542 case Intrinsic::minimum:
1543 case Intrinsic::maximumnum:
1544 case Intrinsic::minimumnum:
1545 case Intrinsic::maxnum:
1546 case Intrinsic::minnum:
1565 auto IID =
II->getIntrinsicID();
1571 auto *InvariantBinaryInst =
1573 if (isa<FPMathOperator>(InvariantBinaryInst))
1574 cast<Instruction>(InvariantBinaryInst)->copyFastMathFlags(
II);
1575 return InvariantBinaryInst;
1579 if (!CanReorderLanes)
1587 if (!isa<FixedVectorType>(Arg->
getType()) ||
1589 !cast<ShuffleVectorInst>(Arg)->isSingleSource())
1592 int Sz = Mask.size();
1594 for (
int Idx : Mask) {
1602 return UsedIndices.
all() ? V :
nullptr;
1609template <Intrinsic::ID IntrID>
1614 static_assert(IntrID == Intrinsic::cttz || IntrID == Intrinsic::ctlz,
1615 "This helper only supports cttz and ctlz intrinsics");
1623 unsigned BitWidth = I1->getType()->getScalarSizeInBits();
1630 Type *Ty = I1->getType();
1632 IntrID == Intrinsic::cttz ? Instruction::Shl : Instruction::LShr,
1633 IntrID == Intrinsic::cttz
1634 ? ConstantInt::get(Ty, 1)
1636 cast<Constant>(I1),
DL);
1638 IntrID, Builder.
CreateOr(CtOp, NewConst),
1647 case Intrinsic::umax:
1648 case Intrinsic::umin:
1649 if (HasNUW && LOp == Instruction::Add)
1651 if (HasNUW && LOp == Instruction::Shl)
1654 case Intrinsic::smax:
1655 case Intrinsic::smin:
1656 return HasNSW && LOp == Instruction::Add;
1699 if (
A ==
D ||
B ==
C)
1709 cast<BinaryOperator>(Builder.
CreateBinOp(InnerOpcode,
A, NewIntrinsic));
1710 }
else if (
B ==
D) {
1713 cast<BinaryOperator>(Builder.
CreateBinOp(InnerOpcode, NewIntrinsic,
B));
1749 return visitCallBase(CI);
1753 if (
auto *
MI = dyn_cast<AnyMemIntrinsic>(
II)) {
1754 if (
auto NumBytes =
MI->getLengthInBytes()) {
1756 if (NumBytes->isZero())
1761 if (
MI->isAtomic() &&
1762 (NumBytes->isNegative() ||
1763 (NumBytes->getZExtValue() %
MI->getElementSizeInBytes() != 0))) {
1765 assert(
MI->getType()->isVoidTy() &&
1766 "non void atomic unordered mem intrinsic");
1772 if (
MI->isVolatile())
1777 if (MTI->getSource() == MTI->getDest())
1782 return isa<ConstantPointerNull>(
Ptr) &&
1785 cast<PointerType>(
Ptr->getType())->getAddressSpace());
1787 bool SrcIsUndefined =
false;
1790 if (
auto *MTI = dyn_cast<AnyMemTransferInst>(
MI)) {
1793 SrcIsUndefined = IsPointerUndefined(MTI->getRawSource());
1794 }
else if (
auto *MSI = dyn_cast<AnyMemSetInst>(
MI)) {
1800 if (SrcIsUndefined || IsPointerUndefined(
MI->getRawDest())) {
1808 if (
auto *MMI = dyn_cast<AnyMemMoveInst>(
MI)) {
1809 if (
GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1810 if (GVSrc->isConstant()) {
1814 ? Intrinsic::memcpy_element_unordered_atomic
1815 : Intrinsic::memcpy;
1828 if (
auto *IIFVTy = dyn_cast<FixedVectorType>(
II->getType())) {
1829 auto VWidth = IIFVTy->getNumElements();
1830 APInt PoisonElts(VWidth, 0);
1839 if (
II->isCommutative()) {
1840 if (
auto Pair = matchSymmetricPair(
II->getOperand(0),
II->getOperand(1))) {
1854 if (CI.
use_empty() && isa<ConstrainedFPIntrinsic>(CI)) {
1861 case Intrinsic::objectsize: {
1864 &InsertedInstructions)) {
1865 for (
Instruction *Inserted : InsertedInstructions)
1871 case Intrinsic::abs: {
1872 Value *IIOperand =
II->getArgOperand(0);
1873 bool IntMinIsPoison = cast<Constant>(
II->getArgOperand(1))->isOneValue();
1878 if (cast<Instruction>(IIOperand)->
hasNoSignedWrap() || IntMinIsPoison)
1887 if (
match(IIOperand,
1889 m_Intrinsic<Intrinsic::abs>(
m_Value(
Y)))))) {
1891 cast<Instruction>(IIOperand)->hasNoSignedWrap() && IntMinIsPoison;
1896 if (std::optional<bool> Known =
1922 return BinaryOperator::CreateAnd(
X, ConstantInt::get(
II->getType(), 1));
1926 case Intrinsic::umin: {
1927 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1930 assert(
II->getType()->getScalarSizeInBits() != 1 &&
1931 "Expected simplify of umin with max constant");
1937 if (
Value *FoldedCttz =
1938 foldMinimumOverTrailingOrLeadingZeroCount<Intrinsic::cttz>(
1942 if (
Value *FoldedCtlz =
1943 foldMinimumOverTrailingOrLeadingZeroCount<Intrinsic::ctlz>(
1948 case Intrinsic::umax: {
1949 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1952 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
1985 if (IID == Intrinsic::umax) {
1996 case Intrinsic::smax:
1997 case Intrinsic::smin: {
1998 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
2001 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
2017 const APInt *MinC, *MaxC;
2018 auto CreateCanonicalClampForm = [&](
bool IsSigned) {
2019 auto MaxIID = IsSigned ? Intrinsic::smax : Intrinsic::umax;
2020 auto MinIID = IsSigned ? Intrinsic::smin : Intrinsic::umin;
2022 MaxIID,
X, ConstantInt::get(
X->getType(), *MaxC));
2025 MinIID, NewMax, ConstantInt::get(
X->getType(), *MinC)));
2027 if (IID == Intrinsic::smax &&
2031 return CreateCanonicalClampForm(
true);
2032 if (IID == Intrinsic::umax &&
2036 return CreateCanonicalClampForm(
false);
2040 if ((IID == Intrinsic::umin || IID == Intrinsic::smax) &&
2041 II->getType()->isIntOrIntVectorTy(1)) {
2042 return BinaryOperator::CreateAnd(I0, I1);
2047 if ((IID == Intrinsic::umax || IID == Intrinsic::smin) &&
2048 II->getType()->isIntOrIntVectorTy(1)) {
2049 return BinaryOperator::CreateOr(I0, I1);
2057 if (IID == Intrinsic::smin) {
2060 Value *Zero = ConstantInt::get(
X->getType(), 0);
2067 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
2094 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax;
2095 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin;
2097 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
2099 if (KnownSign == std::nullopt) {
2102 }
else if (*KnownSign ) {
2114 return BinaryOperator::CreateOr(I0,
X);
2152 ConstantInt::get(
II->getType(), *RHSC));
2162 if (I0->
hasOneUse() && !I1->hasOneUse())
2174 if (IID == Intrinsic::smin || IID == Intrinsic::umax)
2202 if (LHS_CR.
icmp(Pred, *RHSC))
2206 ConstantInt::get(
II->getType(), *RHSC));
2215 case Intrinsic::scmp: {
2216 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
2224 case Intrinsic::bitreverse: {
2225 Value *IIOperand =
II->getArgOperand(0);
2229 X->getType()->isIntOrIntVectorTy(1)) {
2230 Type *Ty =
II->getType();
2237 foldBitOrderCrossLogicOp<Intrinsic::bitreverse>(IIOperand,
Builder))
2238 return crossLogicOpFold;
2242 case Intrinsic::bswap: {
2243 Value *IIOperand =
II->getArgOperand(0);
2255 cast<BinaryOperator>(IIOperand)->
getOpcode() == Instruction::Shl
2268 if (BW - LZ - TZ == 8) {
2269 assert(LZ != TZ &&
"active byte cannot be in the middle");
2271 return BinaryOperator::CreateNUWShl(
2272 IIOperand, ConstantInt::get(IIOperand->
getType(), LZ - TZ));
2274 return BinaryOperator::CreateExactLShr(
2275 IIOperand, ConstantInt::get(IIOperand->
getType(), TZ - LZ));
2280 unsigned C =
X->getType()->getScalarSizeInBits() - BW;
2281 Value *CV = ConstantInt::get(
X->getType(),
C);
2287 foldBitOrderCrossLogicOp<Intrinsic::bswap>(IIOperand,
Builder)) {
2288 return crossLogicOpFold;
2297 case Intrinsic::masked_load:
2298 if (
Value *SimplifiedMaskedOp = simplifyMaskedLoad(*
II))
2301 case Intrinsic::masked_store:
2302 return simplifyMaskedStore(*
II);
2303 case Intrinsic::masked_gather:
2304 return simplifyMaskedGather(*
II);
2305 case Intrinsic::masked_scatter:
2306 return simplifyMaskedScatter(*
II);
2307 case Intrinsic::launder_invariant_group:
2308 case Intrinsic::strip_invariant_group:
2312 case Intrinsic::powi:
2313 if (
ConstantInt *Power = dyn_cast<ConstantInt>(
II->getArgOperand(1))) {
2316 if (Power->isMinusOne())
2318 II->getArgOperand(0),
II);
2320 if (Power->equalsInt(2))
2322 II->getArgOperand(0),
II);
2324 if (!Power->getValue()[0]) {
2339 case Intrinsic::cttz:
2340 case Intrinsic::ctlz:
2345 case Intrinsic::ctpop:
2350 case Intrinsic::fshl:
2351 case Intrinsic::fshr: {
2352 Value *Op0 =
II->getArgOperand(0), *Op1 =
II->getArgOperand(1);
2353 Type *Ty =
II->getType();
2363 if (ModuloC != ShAmtC)
2369 "Shift amount expected to be modulo bitwidth");
2374 if (IID == Intrinsic::fshr) {
2385 assert(IID == Intrinsic::fshl &&
2386 "All funnel shifts by simple constants should go left");
2391 return BinaryOperator::CreateShl(Op0, ShAmtC);
2396 return BinaryOperator::CreateLShr(Op1,
2420 Mod, IID == Intrinsic::fshl ? Intrinsic::fshr : Intrinsic::fshl, Ty);
2428 Value *Op2 =
II->getArgOperand(2);
2430 return BinaryOperator::CreateShl(Op0,
And);
2448 case Intrinsic::ptrmask: {
2454 Value *InnerPtr, *InnerMask;
2455 bool Changed =
false;
2459 if (
match(
II->getArgOperand(0),
2463 "Mask types must match");
2480 unsigned NewAlignmentLog =
2494 case Intrinsic::uadd_with_overflow:
2495 case Intrinsic::sadd_with_overflow: {
2503 const APInt *C0, *C1;
2504 Value *Arg0 =
II->getArgOperand(0);
2505 Value *Arg1 =
II->getArgOperand(1);
2506 bool IsSigned = IID == Intrinsic::sadd_with_overflow;
2507 bool HasNWAdd = IsSigned
2513 IsSigned ? C1->
sadd_ov(*C0, Overflow) : C1->
uadd_ov(*C0, Overflow);
2517 IID,
X, ConstantInt::get(Arg1->
getType(), NewC)));
2522 case Intrinsic::umul_with_overflow:
2523 case Intrinsic::smul_with_overflow:
2524 case Intrinsic::usub_with_overflow:
2529 case Intrinsic::ssub_with_overflow: {
2534 Value *Arg0 =
II->getArgOperand(0);
2535 Value *Arg1 =
II->getArgOperand(1);
2552 case Intrinsic::uadd_sat:
2553 case Intrinsic::sadd_sat:
2554 case Intrinsic::usub_sat:
2555 case Intrinsic::ssub_sat: {
2557 Type *Ty = SI->getType();
2558 Value *Arg0 = SI->getLHS();
2559 Value *Arg1 = SI->getRHS();
2590 if (IID == Intrinsic::usub_sat &&
2601 C->isNotMinSignedValue()) {
2605 Intrinsic::sadd_sat, Arg0, NegVal));
2611 if (
auto *
Other = dyn_cast<IntrinsicInst>(Arg0)) {
2613 const APInt *Val, *Val2;
2616 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2617 if (
Other->getIntrinsicID() == IID &&
2625 NewVal = Val->
sadd_ov(*Val2, Overflow);
2638 IID,
X, ConstantInt::get(
II->getType(), NewVal)));
2644 case Intrinsic::minnum:
2645 case Intrinsic::maxnum:
2646 case Intrinsic::minimum:
2647 case Intrinsic::maximum: {
2648 Value *Arg0 =
II->getArgOperand(0);
2649 Value *Arg1 =
II->getArgOperand(1);
2658 case Intrinsic::maxnum:
2659 NewIID = Intrinsic::minnum;
2661 case Intrinsic::minnum:
2662 NewIID = Intrinsic::maxnum;
2664 case Intrinsic::maximum:
2665 NewIID = Intrinsic::minimum;
2667 case Intrinsic::minimum:
2668 NewIID = Intrinsic::maximum;
2674 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);
2681 if (
auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2689 case Intrinsic::maxnum:
2692 case Intrinsic::minnum:
2695 case Intrinsic::maximum:
2698 case Intrinsic::minimum:
2708 IID,
X, ConstantFP::get(Arg0->
getType(), Res),
2717 X->getType() ==
Y->getType()) {
2729 auto IsMinMaxOrXNegX = [IID, &
X](
Value *Op0,
Value *Op1) {
2731 return Op0->hasOneUse() ||
2732 (IID != Intrinsic::minimum && IID != Intrinsic::minnum);
2736 if (IsMinMaxOrXNegX(Arg0, Arg1) || IsMinMaxOrXNegX(Arg1, Arg0)) {
2738 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum)
2745 case Intrinsic::matrix_multiply: {
2757 Value *Op0 =
II->getOperand(0);
2758 Value *Op1 =
II->getOperand(1);
2759 Value *OpNotNeg, *NegatedOp;
2760 unsigned NegatedOpArg, OtherOpArg;
2777 Value *OtherOp =
II->getOperand(OtherOpArg);
2795 NewArgs[NegatedOpArg] = OpNotNeg;
2802 case Intrinsic::fmuladd: {
2808 II->getFastMathFlags());
2812 case Intrinsic::fma: {
2814 Value *Src0 =
II->getArgOperand(0);
2815 Value *Src1 =
II->getArgOperand(1);
2816 Value *Src2 =
II->getArgOperand(2);
2851 case Intrinsic::copysign: {
2852 Value *Mag =
II->getArgOperand(0), *Sign =
II->getArgOperand(1);
2855 if (*KnownSignBit) {
2895 case Intrinsic::fabs: {
2897 Value *Arg =
II->getArgOperand(0);
2907 if (Arg->
hasOneUse() ? (isa<Constant>(TVal) || isa<Constant>(FVal))
2908 : (isa<Constant>(TVal) && isa<Constant>(FVal))) {
2913 FastMathFlags FMF2 = cast<SelectInst>(Arg)->getFastMathFlags();
2915 SI->setFastMathFlags(FMF1 | FMF2);
2926 Value *Magnitude, *Sign;
2927 if (
match(
II->getArgOperand(0),
2937 case Intrinsic::ceil:
2938 case Intrinsic::floor:
2939 case Intrinsic::round:
2940 case Intrinsic::roundeven:
2941 case Intrinsic::nearbyint:
2942 case Intrinsic::rint:
2943 case Intrinsic::trunc: {
2952 case Intrinsic::cos:
2953 case Intrinsic::amdgcn_cos: {
2955 Value *Src =
II->getArgOperand(0);
2965 case Intrinsic::sin:
2966 case Intrinsic::amdgcn_sin: {
2975 case Intrinsic::ldexp: {
2988 Value *Src =
II->getArgOperand(0);
2989 Value *Exp =
II->getArgOperand(1);
2994 Exp->getType() == InnerExp->
getType()) {
2996 FastMathFlags InnerFlags = cast<FPMathOperator>(Src)->getFastMathFlags();
3003 II->setArgOperand(1, NewExp);
3004 II->setFastMathFlags(InnerFlags);
3016 ConstantFP::get(
II->getType(), 1.0));
3023 ConstantFP::get(
II->getType(), 1.0));
3031 Value *SelectCond, *SelectLHS, *SelectRHS;
3032 if (
match(
II->getArgOperand(1),
3035 Value *NewLdexp =
nullptr;
3053 case Intrinsic::ptrauth_auth:
3054 case Intrinsic::ptrauth_resign: {
3057 bool NeedSign =
II->getIntrinsicID() == Intrinsic::ptrauth_resign;
3059 Value *Key =
II->getArgOperand(1);
3060 Value *Disc =
II->getArgOperand(2);
3064 Value *AuthKey =
nullptr, *AuthDisc =
nullptr, *BasePtr;
3065 if (
const auto *CI = dyn_cast<CallBase>(
Ptr)) {
3077 }
else if (
const auto *PtrToInt = dyn_cast<PtrToIntOperator>(
Ptr)) {
3080 const auto *CPA = dyn_cast<ConstantPtrAuth>(PtrToInt->getOperand(0));
3081 if (!CPA || !CPA->isKnownCompatibleWith(Key, Disc,
DL))
3085 if (NeedSign && isa<ConstantInt>(
II->getArgOperand(4))) {
3086 auto *SignKey = cast<ConstantInt>(
II->getArgOperand(3));
3087 auto *SignDisc = cast<ConstantInt>(
II->getArgOperand(4));
3090 SignDisc, SignAddrDisc);
3102 if (AuthKey && NeedSign) {
3104 NewIntrin = Intrinsic::ptrauth_resign;
3105 }
else if (AuthKey) {
3107 NewIntrin = Intrinsic::ptrauth_auth;
3108 }
else if (NeedSign) {
3110 NewIntrin = Intrinsic::ptrauth_sign;
3133 case Intrinsic::arm_neon_vtbl1:
3134 case Intrinsic::aarch64_neon_tbl1:
3139 case Intrinsic::arm_neon_vmulls:
3140 case Intrinsic::arm_neon_vmullu:
3141 case Intrinsic::aarch64_neon_smull:
3142 case Intrinsic::aarch64_neon_umull: {
3143 Value *Arg0 =
II->getArgOperand(0);
3144 Value *Arg1 =
II->getArgOperand(1);
3147 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
3152 bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
3153 IID == Intrinsic::aarch64_neon_umull);
3155 if (
Constant *CV0 = dyn_cast<Constant>(Arg0)) {
3156 if (
Constant *CV1 = dyn_cast<Constant>(Arg1)) {
3167 if (
Constant *CV1 = dyn_cast<Constant>(Arg1))
3169 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
3176 case Intrinsic::arm_neon_aesd:
3177 case Intrinsic::arm_neon_aese:
3178 case Intrinsic::aarch64_crypto_aesd:
3179 case Intrinsic::aarch64_crypto_aese:
3180 case Intrinsic::aarch64_sve_aesd:
3181 case Intrinsic::aarch64_sve_aese: {
3182 Value *DataArg =
II->getArgOperand(0);
3183 Value *KeyArg =
II->getArgOperand(1);
3199 case Intrinsic::hexagon_V6_vandvrt:
3200 case Intrinsic::hexagon_V6_vandvrt_128B: {
3202 if (
auto Op0 = dyn_cast<IntrinsicInst>(
II->getArgOperand(0))) {
3204 if (ID0 != Intrinsic::hexagon_V6_vandqrt &&
3205 ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
3207 Value *Bytes = Op0->getArgOperand(1), *Mask =
II->getArgOperand(1);
3212 if ((
C & 0xFF) && (
C & 0xFF00) && (
C & 0xFF0000) && (
C & 0xFF000000))
3217 case Intrinsic::stackrestore: {
3218 enum class ClassifyResult {
3222 CallWithSideEffects,
3225 if (isa<AllocaInst>(
I))
3226 return ClassifyResult::Alloca;
3228 if (
auto *CI = dyn_cast<CallInst>(
I)) {
3229 if (
auto *
II = dyn_cast<IntrinsicInst>(CI)) {
3230 if (
II->getIntrinsicID() == Intrinsic::stackrestore)
3231 return ClassifyResult::StackRestore;
3233 if (
II->mayHaveSideEffects())
3234 return ClassifyResult::CallWithSideEffects;
3237 return ClassifyResult::CallWithSideEffects;
3241 return ClassifyResult::None;
3247 if (
IntrinsicInst *SS = dyn_cast<IntrinsicInst>(
II->getArgOperand(0))) {
3248 if (SS->getIntrinsicID() == Intrinsic::stacksave &&
3249 SS->getParent() ==
II->getParent()) {
3251 bool CannotRemove =
false;
3252 for (++BI; &*BI !=
II; ++BI) {
3253 switch (Classify(&*BI)) {
3254 case ClassifyResult::None:
3258 case ClassifyResult::StackRestore:
3261 if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS)
3262 CannotRemove =
true;
3265 case ClassifyResult::Alloca:
3266 case ClassifyResult::CallWithSideEffects:
3269 CannotRemove =
true;
3285 bool CannotRemove =
false;
3286 for (++BI; &*BI != TI; ++BI) {
3287 switch (Classify(&*BI)) {
3288 case ClassifyResult::None:
3292 case ClassifyResult::StackRestore:
3296 case ClassifyResult::Alloca:
3297 case ClassifyResult::CallWithSideEffects:
3301 CannotRemove =
true;
3311 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
3315 case Intrinsic::lifetime_end:
3318 if (
II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3319 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
3320 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3324 return I.getIntrinsicID() == Intrinsic::lifetime_start;
3328 case Intrinsic::assume: {
3329 Value *IIOperand =
II->getArgOperand(0);
3331 II->getOperandBundlesAsDefs(OpBundles);
3337 assert(isa<AssumeInst>(Assume));
3347 if (
match(Next, m_Intrinsic<Intrinsic::assume>(
m_Specific(IIOperand))))
3348 return RemoveConditionFromAssume(Next);
3354 Value *AssumeIntrinsic =
II->getCalledOperand();
3376 LHS->getOpcode() == Instruction::Load &&
3382 return RemoveConditionFromAssume(
II);
3392 for (
unsigned Idx = 0;
Idx <
II->getNumOperandBundles();
Idx++) {
3394 if (OBU.
getTagName() ==
"separate_storage") {
3396 auto MaybeSimplifyHint = [&](
const Use &U) {
3397 Value *Hint = U.get();
3404 MaybeSimplifyHint(OBU.
Inputs[0]);
3405 MaybeSimplifyHint(OBU.
Inputs[1]);
3417 A->getType()->isPointerTy()) {
3423 return RemoveConditionFromAssume(
II);
3451 if (
auto *Replacement =
3454 Replacement->insertAfter(
II->getIterator());
3457 return RemoveConditionFromAssume(
II);
3464 for (
unsigned Idx = 0;
Idx <
II->getNumOperandBundles();
Idx++) {
3465 auto &BOI =
II->bundle_op_info_begin()[
Idx];
3468 if (BOI.End - BOI.Begin > 2)
3479 if (BOI.End - BOI.Begin > 0) {
3486 if (BOI.End - BOI.Begin > 0)
3487 II->op_begin()[BOI.Begin].set(CanonRK.
WasOn);
3488 if (BOI.End - BOI.Begin > 1)
3489 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get(
3515 case Intrinsic::experimental_guard: {
3526 Value *NextCond =
nullptr;
3528 m_Intrinsic<Intrinsic::experimental_guard>(
m_Value(NextCond)))) {
3529 Value *CurrCond =
II->getArgOperand(0);
3533 if (CurrCond != NextCond) {
3535 while (MoveI != NextInst) {
3547 case Intrinsic::vector_insert: {
3548 Value *Vec =
II->getArgOperand(0);
3549 Value *SubVec =
II->getArgOperand(1);
3551 auto *DstTy = dyn_cast<FixedVectorType>(
II->getType());
3552 auto *VecTy = dyn_cast<FixedVectorType>(Vec->
getType());
3553 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->
getType());
3557 if (DstTy && VecTy && SubVecTy) {
3558 unsigned DstNumElts = DstTy->getNumElements();
3559 unsigned VecNumElts = VecTy->getNumElements();
3560 unsigned SubVecNumElts = SubVecTy->getNumElements();
3561 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
3564 if (VecNumElts == SubVecNumElts)
3573 for (i = 0; i != SubVecNumElts; ++i)
3575 for (; i != VecNumElts; ++i)
3581 for (
unsigned i = 0; i != IdxN; ++i)
3583 for (
unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)
3585 for (
unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)
3593 case Intrinsic::vector_extract: {
3594 Value *Vec =
II->getArgOperand(0);
3597 Type *ReturnType =
II->getType();
3600 unsigned ExtractIdx = cast<ConstantInt>(
Idx)->getZExtValue();
3601 Value *InsertTuple, *InsertIdx, *InsertValue;
3602 if (
match(Vec, m_Intrinsic<Intrinsic::vector_insert>(
m_Value(InsertTuple),
3605 InsertValue->
getType() == ReturnType) {
3606 unsigned Index = cast<ConstantInt>(InsertIdx)->getZExtValue();
3610 if (ExtractIdx == Index)
3621 auto *DstTy = dyn_cast<VectorType>(ReturnType);
3622 auto *VecTy = dyn_cast<VectorType>(Vec->
getType());
3624 if (DstTy && VecTy) {
3625 auto DstEltCnt = DstTy->getElementCount();
3626 auto VecEltCnt = VecTy->getElementCount();
3627 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
3630 if (DstEltCnt == VecTy->getElementCount()) {
3637 if (VecEltCnt.isScalable() || DstEltCnt.isScalable())
3641 for (
unsigned i = 0; i != DstEltCnt.getKnownMinValue(); ++i)
3642 Mask.push_back(IdxN + i);
3649 case Intrinsic::experimental_vp_reverse: {
3651 Value *Vec =
II->getArgOperand(0);
3652 Value *Mask =
II->getArgOperand(1);
3655 Value *EVL =
II->getArgOperand(2);
3661 auto *OldUnOp = cast<UnaryOperator>(Vec);
3663 OldUnOp->getOpcode(),
X, OldUnOp, OldUnOp->getName(),
3669 case Intrinsic::vector_reduce_or:
3670 case Intrinsic::vector_reduce_and: {
3678 Value *Arg =
II->getArgOperand(0);
3688 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3692 if (IID == Intrinsic::vector_reduce_and) {
3696 assert(IID == Intrinsic::vector_reduce_or &&
3697 "Expected or reduction.");
3708 case Intrinsic::vector_reduce_add: {
3709 if (IID == Intrinsic::vector_reduce_add) {
3716 Value *Arg =
II->getArgOperand(0);
3726 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3734 cast<Instruction>(Arg)->
getOpcode() == Instruction::SExt)
3742 case Intrinsic::vector_reduce_xor: {
3743 if (IID == Intrinsic::vector_reduce_xor) {
3751 Value *Arg =
II->getArgOperand(0);
3761 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3773 case Intrinsic::vector_reduce_mul: {
3774 if (IID == Intrinsic::vector_reduce_mul) {
3781 Value *Arg =
II->getArgOperand(0);
3791 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3802 case Intrinsic::vector_reduce_umin:
3803 case Intrinsic::vector_reduce_umax: {
3804 if (IID == Intrinsic::vector_reduce_umin ||
3805 IID == Intrinsic::vector_reduce_umax) {
3812 Value *Arg =
II->getArgOperand(0);
3822 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3824 Value *Res = IID == Intrinsic::vector_reduce_umin
3836 case Intrinsic::vector_reduce_smin:
3837 case Intrinsic::vector_reduce_smax: {
3838 if (IID == Intrinsic::vector_reduce_smin ||
3839 IID == Intrinsic::vector_reduce_smax) {
3854 Value *Arg =
II->getArgOperand(0);
3864 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3868 ExtOpc = cast<CastInst>(Arg)->getOpcode();
3869 Value *Res = ((IID == Intrinsic::vector_reduce_smin) ==
3870 (ExtOpc == Instruction::CastOps::ZExt))
3881 case Intrinsic::vector_reduce_fmax:
3882 case Intrinsic::vector_reduce_fmin:
3883 case Intrinsic::vector_reduce_fadd:
3884 case Intrinsic::vector_reduce_fmul: {
3885 bool CanReorderLanes = (IID != Intrinsic::vector_reduce_fadd &&
3886 IID != Intrinsic::vector_reduce_fmul) ||
3887 II->hasAllowReassoc();
3888 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd ||
3889 IID == Intrinsic::vector_reduce_fmul)
3892 Value *Arg =
II->getArgOperand(ArgIdx);
3899 case Intrinsic::is_fpclass: {
3904 case Intrinsic::threadlocal_address: {
3913 case Intrinsic::frexp: {
3944 if (
auto *Sel = dyn_cast<SelectInst>(
Op))
3947 if (
auto *Phi = dyn_cast<PHINode>(
Op))
3963 return visitCallBase(*
II);
3968 auto *NFI = dyn_cast<FenceInst>(FI.
getNextNode());
3978 if (FI1SyncScope != FI2->getSyncScopeID() ||
3985 if (NFI && isIdenticalOrStrongerFence(NFI, &FI))
3988 if (
auto *PFI = dyn_cast_or_null<FenceInst>(FI.
getPrevNode()))
3989 if (isIdenticalOrStrongerFence(PFI, &FI))
3996 return visitCallBase(
II);
4001 return visitCallBase(CBI);
4020 InstCombineRAUW, InstCombineErase);
4021 if (
Value *With = Simplifier.optimizeCall(CI,
Builder)) {
4033 if (Underlying != TrampMem &&
4034 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
4036 if (!isa<AllocaInst>(Underlying))
4044 if (
II->getIntrinsicID() == Intrinsic::init_trampoline) {
4048 InitTrampoline =
II;
4051 if (
II->getIntrinsicID() == Intrinsic::adjust_trampoline)
4058 if (!InitTrampoline)
4062 if (InitTrampoline->
getOperand(0) != TrampMem)
4065 return InitTrampoline;
4077 if (
II->getIntrinsicID() == Intrinsic::init_trampoline &&
4078 II->getOperand(0) == TrampMem)
4090 Callee = Callee->stripPointerCasts();
4091 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
4107 const auto *IPC = dyn_cast<IntToPtrInst>(Callee);
4108 if (!IPC || !IPC->isNoopCast(
DL))
4111 const auto *
II = dyn_cast<IntrinsicInst>(IPC->getOperand(0));
4116 if (IIID != Intrinsic::ptrauth_resign && IIID != Intrinsic::ptrauth_sign)
4120 std::optional<OperandBundleUse> PtrAuthBundleOrNone;
4122 for (
unsigned BI = 0, BE =
Call.getNumOperandBundles(); BI != BE; ++BI) {
4125 PtrAuthBundleOrNone = Bundle;
4130 if (!PtrAuthBundleOrNone)
4133 Value *NewCallee =
nullptr;
4137 case Intrinsic::ptrauth_resign: {
4139 if (
II->getOperand(3) != PtrAuthBundleOrNone->Inputs[0])
4142 if (
II->getOperand(4) != PtrAuthBundleOrNone->Inputs[1])
4147 if (
II->getOperand(1) != PtrAuthBundleOrNone->Inputs[0])
4150 Value *NewBundleOps[] = {
II->getOperand(1),
II->getOperand(2)};
4152 NewCallee =
II->getOperand(0);
4159 case Intrinsic::ptrauth_sign: {
4161 if (
II->getOperand(1) != PtrAuthBundleOrNone->Inputs[0])
4164 if (
II->getOperand(2) != PtrAuthBundleOrNone->Inputs[1])
4166 NewCallee =
II->getOperand(0);
4183 auto *CPA = dyn_cast<ConstantPtrAuth>(
Call.getCalledOperand());
4187 auto *CalleeF = dyn_cast<Function>(CPA->getPointer());
4197 auto *
Key = cast<ConstantInt>(PAB->Inputs[0]);
4201 if (!CPA->isKnownCompatibleWith(Key, Discriminator,
DL))
4210bool InstCombinerImpl::annotateAnyAllocSite(
CallBase &Call,
4216 bool Changed =
false;
4218 if (!
Call.getType()->isPointerTy())
4225 if (
Call.hasRetAttr(Attribute::NonNull)) {
4226 Changed = !
Call.hasRetAttr(Attribute::Dereferenceable);
4228 Call.getContext(),
Size->getLimitedValue()));
4230 Changed = !
Call.hasRetAttr(Attribute::DereferenceableOrNull);
4232 Call.getContext(),
Size->getLimitedValue()));
4241 ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment);
4245 Align ExistingAlign =
Call.getRetAlign().valueOrOne();
4247 if (NewAlign > ExistingAlign) {
4259 bool Changed = annotateAnyAllocSite(Call, &
TLI);
4268 if (
V->getType()->isPointerTy()) {
4271 bool HasDereferenceable =
Call.getParamDereferenceableBytes(ArgNo) > 0;
4272 if (
Call.paramHasAttr(ArgNo, Attribute::NonNull) ||
4273 (HasDereferenceable &&
4275 V->getType()->getPointerAddressSpace()))) {
4276 if (
Value *Res = simplifyNonNullOperand(V, HasDereferenceable)) {
4288 assert(ArgNo ==
Call.arg_size() &&
"Call arguments not processed correctly.");
4290 if (!ArgNos.
empty()) {
4295 Call.setAttributes(AS);
4302 Function *CalleeF = dyn_cast<Function>(Callee);
4304 transformConstExprCastCall(Call))
4311 LLVM_DEBUG(
dbgs() <<
"Removing convergent attr from instr " << Call
4313 Call.setNotConvergent();
4335 if (isa<CallInst>(OldCall))
4340 cast<CallBase>(OldCall)->setCalledFunction(
4349 if ((isa<ConstantPointerNull>(Callee) &&
4351 isa<UndefValue>(Callee)) {
4354 if (!
Call.getType()->isVoidTy())
4357 if (
Call.isTerminator()) {
4368 return transformCallThroughTrampoline(Call, *
II);
4371 if (
Instruction *NewCall = foldPtrAuthIntrinsicCallee(Call))
4375 if (
Instruction *NewCall = foldPtrAuthConstantCallee(Call))
4378 if (isa<InlineAsm>(Callee) && !
Call.doesNotThrow()) {
4380 if (!
IA->canThrow()) {
4383 Call.setDoesNotThrow();
4391 if (
CallInst *CI = dyn_cast<CallInst>(&Call)) {
4398 if (!
Call.use_empty() && !
Call.isMustTailCall())
4399 if (
Value *ReturnedArg =
Call.getReturnedArgOperand()) {
4401 Type *RetArgTy = ReturnedArg->getType();
4409 if (
Call.getMetadata(LLVMContext::MD_callee_type) && !
Call.isIndirectCall()) {
4410 Call.setMetadata(LLVMContext::MD_callee_type,
nullptr);
4417 if (Bundle && !
Call.isIndirectCall()) {
4424 FunctionType = mdconst::extract<ConstantInt>(MD->getOperand(0));
4428 dbgs() <<
Call.getModule()->getName()
4429 <<
": warning: kcfi: " <<
Call.getCaller()->getName()
4430 <<
": call to " << CalleeF->
getName()
4431 <<
" using a mismatching function pointer type\n";
4442 switch (
Call.getIntrinsicID()) {
4443 case Intrinsic::experimental_gc_statepoint: {
4459 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
4465 if (
auto *PT = dyn_cast<PointerType>(GCR.
getType())) {
4469 if (isa<ConstantPointerNull>(DerivedPtr)) {
4498 LiveGcValues.
insert(BasePtr);
4499 LiveGcValues.
insert(DerivedPtr);
4501 std::optional<OperandBundleUse> Bundle =
4503 unsigned NumOfGCLives = LiveGcValues.
size();
4504 if (!Bundle || NumOfGCLives == Bundle->Inputs.size())
4508 std::vector<Value *> NewLiveGc;
4509 for (
Value *V : Bundle->Inputs) {
4513 if (LiveGcValues.
count(V)) {
4514 It->second = NewLiveGc.size();
4515 NewLiveGc.push_back(V);
4517 It->second = NumOfGCLives;
4523 assert(Val2Idx.
count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&
4524 "Missed live gc for base pointer");
4526 GCR.
setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr]));
4528 assert(Val2Idx.
count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&
4529 "Missed live gc for derived pointer");
4531 GCR.
setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr]));
4540 return Changed ? &
Call :
nullptr;
4546bool InstCombinerImpl::transformConstExprCastCall(
CallBase &Call) {
4548 dyn_cast<Function>(
Call.getCalledOperand()->stripPointerCasts());
4552 assert(!isa<CallBrInst>(Call) &&
4553 "CallBr's don't have a single point after a def to insert at");
4558 if (
Callee->isDeclaration())
4564 if (
Callee->hasFnAttribute(
"thunk"))
4570 if (
Callee->hasFnAttribute(Attribute::Naked))
4577 if (
Call.isMustTailCall())
4588 Type *NewRetTy = FT->getReturnType();
4591 if (OldRetTy != NewRetTy) {
4597 if (!
Caller->use_empty())
4612 if (!
Caller->use_empty()) {
4614 if (
auto *
II = dyn_cast<InvokeInst>(Caller))
4615 PhisNotSupportedBlock =
II->getNormalDest();
4616 if (PhisNotSupportedBlock)
4618 if (
PHINode *PN = dyn_cast<PHINode>(U))
4619 if (PN->getParent() == PhisNotSupportedBlock)
4624 unsigned NumActualArgs =
Call.arg_size();
4625 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
4635 if (
Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
4636 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated))
4639 auto AI =
Call.arg_begin();
4640 for (
unsigned i = 0, e = NumCommonArgs; i !=
e; ++i, ++AI) {
4641 Type *ParamTy = FT->getParamType(i);
4642 Type *ActTy = (*AI)->getType();
4654 if (
Call.isInAllocaArgument(i) ||
4662 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal))
4666 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
4681 Args.reserve(NumActualArgs);
4682 ArgAttrs.
reserve(NumActualArgs);
4693 AI =
Call.arg_begin();
4694 for (
unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
4695 Type *ParamTy = FT->getParamType(i);
4697 Value *NewArg = *AI;
4698 if ((*AI)->getType() != ParamTy)
4700 Args.push_back(NewArg);
4712 for (
unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
4718 if (FT->getNumParams() < NumActualArgs) {
4720 if (FT->isVarArg()) {
4722 for (
unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
4724 Value *NewArg = *AI;
4725 if (PTy != (*AI)->getType()) {
4731 Args.push_back(NewArg);
4744 assert((ArgAttrs.
size() == FT->getNumParams() || FT->isVarArg()) &&
4745 "missing argument attributes");
4750 Call.getOperandBundlesAsDefs(OpBundles);
4755 II->getUnwindDest(), Args, OpBundles);
4759 cast<CallInst>(Caller)->getTailCallKind());
4766 NewCall->
copyMetadata(*Caller, {LLVMContext::MD_prof});
4771 if (OldRetTy !=
NV->getType() && !
Caller->use_empty()) {
4772 assert(!
NV->getType()->isVoidTy());
4774 NC->setDebugLoc(
Caller->getDebugLoc());
4777 assert(OptInsertPt &&
"No place to insert cast");
4782 if (!
Caller->use_empty())
4784 else if (
Caller->hasValueHandle()) {
4785 if (OldRetTy ==
NV->getType())
4800InstCombinerImpl::transformCallThroughTrampoline(
CallBase &Call,
4807 if (
Attrs.hasAttrSomewhere(Attribute::Nest))
4815 unsigned NestArgNo = 0;
4816 Type *NestTy =
nullptr;
4821 E = NestFTy->param_end();
4822 I != E; ++NestArgNo, ++
I) {
4833 std::vector<Value*> NewArgs;
4834 std::vector<AttributeSet> NewArgAttrs;
4835 NewArgs.reserve(
Call.arg_size() + 1);
4836 NewArgAttrs.reserve(
Call.arg_size());
4843 auto I =
Call.arg_begin(), E =
Call.arg_end();
4845 if (ArgNo == NestArgNo) {
4848 if (NestVal->
getType() != NestTy)
4850 NewArgs.push_back(NestVal);
4851 NewArgAttrs.push_back(NestAttr);
4858 NewArgs.push_back(*
I);
4859 NewArgAttrs.push_back(
Attrs.getParamAttrs(ArgNo));
4870 std::vector<Type*> NewTypes;
4871 NewTypes.reserve(FTy->getNumParams()+1);
4878 E = FTy->param_end();
4881 if (ArgNo == NestArgNo)
4883 NewTypes.push_back(NestTy);
4889 NewTypes.push_back(*
I);
4902 Attrs.getRetAttrs(), NewArgAttrs);
4905 Call.getOperandBundlesAsDefs(OpBundles);
4910 II->getUnwindDest(), NewArgs, OpBundles);
4911 cast<InvokeInst>(NewCaller)->setCallingConv(
II->getCallingConv());
4912 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
4913 }
else if (
CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
4916 CBI->getIndirectDests(), NewArgs, OpBundles);
4917 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
4918 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
4921 cast<CallInst>(NewCaller)->setTailCallKind(
4922 cast<CallInst>(Call).getTailCallKind());
4923 cast<CallInst>(NewCaller)->setCallingConv(
4924 cast<CallInst>(Call).getCallingConv());
4925 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
4936 Call.setCalledFunction(FTy, NestF);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static SDValue foldBitOrderCrossLogicOp(SDNode *N, SelectionDAG &DAG)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Type * getPromotedType(Type *Ty)
Return the specified type promoted as it would be to pass though a va_arg area.
static Instruction * createOverflowTuple(IntrinsicInst *II, Value *Result, Constant *Overflow)
Creates a result tuple for an overflow intrinsic II with a given Result and a constant Overflow value...
static IntrinsicInst * findInitTrampolineFromAlloca(Value *TrampMem)
static bool removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, std::function< bool(const IntrinsicInst &)> IsStart)
static bool inputDenormalIsDAZ(const Function &F, const Type *Ty)
static Instruction * reassociateMinMaxWithConstantInOperand(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If this min/max has a matching min/max operand with a constant, try to push the constant operand into...
static bool isIdempotentBinaryIntrinsic(Intrinsic::ID IID)
Helper to match idempotent binary intrinsics, namely, intrinsics where f(f(x, y), y) == f(x,...
static bool signBitMustBeTheSame(Value *Op0, Value *Op1, const SimplifyQuery &SQ)
Return true if two values Op0 and Op1 are known to have the same sign.
static Instruction * moveAddAfterMinMax(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0.
static Instruction * simplifyInvariantGroupIntrinsic(IntrinsicInst &II, InstCombinerImpl &IC)
This function transforms launder.invariant.group and strip.invariant.group like: launder(launder(x)) ...
static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, unsigned NumOperands)
static std::optional< bool > getKnownSign(Value *Op, const SimplifyQuery &SQ)
static cl::opt< unsigned > GuardWideningWindow("instcombine-guard-widening-window", cl::init(3), cl::desc("How wide an instruction window to bypass looking for " "another guard"))
static bool hasUndefSource(AnyMemTransferInst *MI)
Recognize a memcpy/memmove from a trivially otherwise unused alloca.
static Instruction * factorizeMinMaxTree(IntrinsicInst *II)
Reduce a sequence of min/max intrinsics with a common operand.
static Value * simplifyNeonTbl1(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Convert a table lookup to shufflevector if the mask is constant.
static Instruction * foldClampRangeOfTwo(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If we have a clamp pattern like max (min X, 42), 41 – where the output can only be one of two possibl...
static Value * simplifyReductionOperand(Value *Arg, bool CanReorderLanes)
static IntrinsicInst * findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, Value *TrampMem)
static Value * foldIntrinsicUsingDistributiveLaws(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
static std::optional< bool > getKnownSignOrZero(Value *Op, const SimplifyQuery &SQ)
static Value * foldMinimumOverTrailingOrLeadingZeroCount(Value *I0, Value *I1, const DataLayout &DL, InstCombiner::BuilderTy &Builder)
Fold an unsigned minimum of trailing or leading zero bits counts: umin(cttz(CtOp, ZeroUndef),...
static Value * foldIdempotentBinaryIntrinsicRecurrence(InstCombinerImpl &IC, IntrinsicInst *II)
Attempt to simplify value-accumulating recurrences of kind: umax.acc = phi i8 [ umax,...
static Instruction * foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC)
static Instruction * foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC)
static IntrinsicInst * findInitTrampoline(Value *Callee)
static FCmpInst::Predicate fpclassTestIsFCmp0(FPClassTest Mask, const Function &F, Type *Ty)
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
static Value * reassociateMinMaxWithConstants(IntrinsicInst *II, IRBuilderBase &Builder, const SimplifyQuery &SQ)
If this min/max has a constant operand and an operand that is a matching min/max with a constant oper...
static CallInst * canonicalizeConstantArg0ToArg1(CallInst &Call)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static bool hasNoSignedWrap(BinaryOperator &I)
static bool inputDenormalIsIEEE(DenormalMode Mode)
Return true if it's possible to assume IEEE treatment of input denormals in F for Val.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
This file implements the SmallBitVector class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt uadd_sat(const APInt &RHS) const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
static APSInt getMinValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the minimum integer value with the given bit width and signedness.
static APSInt getMaxValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the maximum integer value with the given bit width and signedness.
This class represents any memset intrinsic.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
LLVM_ABI void updateAffectedValues(AssumeInst *CI)
Update the cache of values being affected by this assumption (i.e.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
LLVM_ABI bool overlaps(const AttributeMask &AM) const
Return true if the builder has any attribute that's in the specified builder.
LLVM_ABI AttributeSet getFnAttrs() const
The function attributes are returned.
static LLVM_ABI AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
bool isEmpty() const
Return true if there are no attributes.
LLVM_ABI AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
LLVM_ABI bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
LLVM_ABI AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const
Add an argument attribute to the list.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI AttributeSet removeAttributes(LLVMContext &C, const AttributeMask &AttrsToRemove) const
Remove the specified attributes from this set.
static LLVM_ABI AttributeSet get(LLVMContext &C, const AttrBuilder &B)
static LLVM_ABI Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
static LLVM_ABI Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)
static LLVM_ABI Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)
static LLVM_ABI Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
InstListType::reverse_iterator reverse_iterator
InstListType::iterator iterator
Instruction iterators...
LLVM_ABI bool isSigned() const
Whether the intrinsic is signed or unsigned.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
static BinaryOperator * CreateNSW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
Value * getArgOperand(unsigned i) const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void setCalledOperand(Value *V)
static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
unsigned arg_size() const
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This class represents a function call, abstracting a target machine's calling convention.
bool isNoTailCall() const
void setTailCallKind(TailCallKind TCK)
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool isMustTailCall() const
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Predicate getUnorderedPredicate() const
static LLVM_ABI ConstantAggregateZero * get(Type *Ty)
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getInfinity(Type *Ty, bool Negative=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static LLVM_ABI ConstantPtrAuth * get(Constant *Ptr, ConstantInt *Key, ConstantInt *Disc, Constant *AddrDisc)
Return a pointer signed with the specified parameters.
This class represents a range of values.
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other? NOTE: false does not mean that inverse pr...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI unsigned getPointerTypeSizeInBits(Type *) const
The pointer representation size in bits for this type.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
static FMFSource intersect(Value *A, Value *B)
Intersect the FMF from two instructions.
This class represents an extension of floating point types.
Convenience struct for specifying and reasoning about fast-math flags.
void setNoSignedZeros(bool B=true)
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Class to represent function types.
Type::subtype_iterator param_iterator
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
bool isConvergent() const
Determine if the call is convergent.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool doesNotThrow() const
Determine if the function cannot unwind.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Represents calls to the gc.relocate intrinsic.
LLVM_ABI Value * getBasePtr() const
unsigned getBasePtrIndex() const
The index into the associate statepoint's argument list which contains the base pointer of the pointe...
LLVM_ABI Value * getDerivedPtr() const
unsigned getDerivedPtrIndex() const
The index into the associate statepoint's argument list which contains the pointer whose relocation t...
Represents a gc.statepoint intrinsic call.
std::vector< const GCRelocateInst * > getGCRelocates() const
Get list of all gc reloactes linked to this statepoint May contain several relocations for the same b...
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
PointerType * getType() const
Global values are always pointers.
Common base class shared among various IRBuilders.
Value * CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateLdexp(Value *Src, Value *Exp, FMFSource FMFSource={}, const Twine &Name="")
Create call to the ldexp intrinsic.
LLVM_ABI Value * CreateLaunderInvariantGroup(Value *Ptr)
Create a launder.invariant.group intrinsic call.
Value * CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
LLVM_ABI CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
LLVM_ABI CallInst * CreateAssumption(Value *Cond, ArrayRef< OperandBundleDef > OpBundles={})
Create an assume intrinsic call that allows the optimizer to assume that the provided condition will ...
LLVM_ABI Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
ConstantInt * getTrue()
Get the constant value for i1 true.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
InvokeInst * CreateInvoke(FunctionType *Ty, Value *Callee, BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > OpBundles, const Twine &Name="")
Create an invoke instruction.
Value * CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
LLVM_ABI CallInst * CreateAddReduce(Value *Src)
Create a vector int add reduction intrinsic of the source vector.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
Value * CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateVectorReverse(Value *V, const Twine &Name="")
Return a vector value that contains the vector V reversed.
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateCopySign(Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create call to the copysign intrinsic.
LLVM_ABI CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Value * CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getFalse()
Get the constant value for i1 false.
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
Value * CreateFNegFMF(Value *V, FMFSource FMFSource, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateFNeg(Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LLVM_ABI Value * CreateElementCount(Type *Ty, ElementCount EC)
Create an expression which evaluates to the number of elements in EC at runtime.
LLVM_ABI Value * CreateStripInvariantGroup(Value *Ptr)
Create a strip.invariant.group intrinsic call.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
KnownFPClass computeKnownFPClass(Value *Val, FastMathFlags FMF, FPClassTest Interested=fcAllFlags, const Instruction *CtxI=nullptr, unsigned Depth=0) const
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
Instruction * SimplifyAnyMemSet(AnyMemSetInst *MI)
Constant * getLosslessUnsignedTrunc(Constant *C, Type *TruncTy)
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitCallBrInst(CallBrInst &CBI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Value * foldReversedIntrinsicOperands(IntrinsicInst *II)
If all arguments of the intrinsic are reverses, try to pull the reverse after the intrinsic.
Value * tryGetLog2(Value *Op, bool AssumeNonZero)
Instruction * visitFenceInst(FenceInst &FI)
Instruction * foldShuffledIntrinsicOperands(IntrinsicInst *II)
If all arguments of the intrinsic are unary shuffles with the same mask, try to shuffle after the int...
Instruction * visitInvokeInst(InvokeInst &II)
Constant * getLosslessSignedTrunc(Constant *C, Type *TruncTy)
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Instruction * visitVAEndInst(VAEndInst &I)
Instruction * matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps, bool MatchBitReversals)
Given an initial instruction, check to see if it is the root of a bswap/bitreverse idiom.
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * SimplifyAnyMemTransfer(AnyMemTransferInst *MI)
OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const
Instruction * visitCallInst(CallInst &CI)
CallInst simplification.
unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
DominatorTree & getDominatorTree() const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const Instruction *CxtI=nullptr, unsigned Depth=0) const
AssumptionCache & getAssumptionCache() const
OptimizationRemarkEmitter & ORE
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void add(Instruction *I)
Add instruction to the worklist.
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI std::optional< InstListType::iterator > getInsertionPointAfterDef()
Get the first insertion point at which the result of this instruction is defined.
LLVM_ABI bool isIdenticalTo(const Instruction *I) const LLVM_READONLY
Return true if the specified instruction is exactly identical to the current one.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
LibCallSimplifier - This class implements a collection of optimizations that replace well formed call...
An instruction for reading from memory.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
bool isSigned() const
Whether the intrinsic is signed or unsigned.
A Module instance is used to store all the information related to an LLVM module.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
bool isCommutative() const
Return true if the instruction is commutative.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Represents a saturating add/sub intrinsic.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
bool test(unsigned Idx) const
bool all() const
Returns true if all bits are set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
Class to represent struct types.
static LLVM_ABI bool isCallingConvCCompatible(CallBase *CI)
Returns true if call site / callee has cdecl-compatible calling conventions.
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
bool isPointerTy() const
True if this is an instance of PointerType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
bool isStructTy() const
True if this is an instance of StructType.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI const fltSemantics & getFltSemantics() const
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
LLVM_ABI unsigned getIntegerBitWidth() const
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UnaryOperator * CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
LLVM_ABI unsigned getOperandNo() const
Return the operand # of this use in its User.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
This represents the llvm.va_end intrinsic.
static LLVM_ABI void ValueIsDeleted(Value *V)
static LLVM_ABI void ValueIsRAUWd(Value *Old, Value *New)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
static constexpr uint64_t MaximumAlignment
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set a particular kind of metadata attachment.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
static LLVM_ABI void dropDroppableUse(Use &U)
Remove the droppable use U.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
static constexpr unsigned MaxAlignmentExponent
The maximum alignment for instructions.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents an op.with.overflow intrinsic.
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
LLVM_ABI AttributeMask typeIncompatible(Type *Ty, AttributeSet AS, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()
Match a floating-point negative zero.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
match_combine_or< match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > >, OpTy > m_ZExtOrSExtOrSelf(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
class_match< UnaryOperator > m_UnOp()
Match an arbitrary unary operation and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
BinOpPred_match< LHS, RHS, is_bitwiselogic_op > m_BitwiseLogic(const LHS &L, const RHS &R)
Matches bitwise logic operations.
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_CopySign(const Opnd0 &Op0, const Opnd1 &Op1)
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Return a range of dbg_assign records for which Inst performs the assignment they encode.
initializer< Ty > init(const Ty &Val)
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI cl::opt< bool > EnableKnowledgeRetention
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
LLVM_ABI Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI APInt possiblyDemandedEltsInMask(Value *Mask)
Given a mask vector of the form <Y x i1>, return an APInt (of bitwidth Y) for each lane which may be ...
LLVM_ABI RetainedKnowledge simplifyRetainedKnowledge(AssumeInst *Assume, RetainedKnowledge RK, AssumptionCache *AC, DominatorTree *DT)
canonicalize the RetainedKnowledge RK.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
LLVM_ABI Value * getAllocAlignment(const CallBase *V, const TargetLibraryInfo *TLI)
Gets the alignment argument for an aligned_alloc-like function, using either built-in knowledge based...
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
LLVM_ABI Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Given a callsite, callee, and arguments, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
constexpr T MinAlign(U A, V B)
A and B are either alignments or offsets.
LLVM_ABI RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)
This extracts the Knowledge from an element of an operand bundle.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
LLVM_ABI FPClassTest fneg(FPClassTest Mask)
Return the test mask which returns true if the value's sign bit is flipped.
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
bool isModSet(const ModRefInfo MRI)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool matchSimpleBinaryIntrinsicRecurrence(const IntrinsicInst *I, PHINode *&P, Value *&Init, Value *&OtherOp)
Attempt to match a simple value-accumulating recurrence of the form: llvm.intrinsic....
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
auto find_if_not(R &&Range, UnaryPredicate P)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
LLVM_ABI AssumeInst * buildAssumeFromKnowledge(ArrayRef< RetainedKnowledge > Knowledge, Instruction *CtxI, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Build and return a new assume created from the provided knowledge if the knowledge in the assume is f...
LLVM_ABI FPClassTest inverse_fabs(FPClassTest Mask)
Return the test mask which returns true after fabs is applied to the value.
LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
LLVM_ABI bool maskIsAllOneOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
@ Mod
The access may modify the value stored in memory.
LLVM_ABI Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for the multiplication of a FMA, fold the result or return null.
LLVM_ABI Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)
Given a constrained FP intrinsic call, tries to compute its simplified version.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic has a scalar operand.
LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)
Combine constant ranges from computeConstantRange() and computeKnownBits().
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI std::optional< APInt > getAllocSize(const CallBase *CB, const TargetLibraryInfo *TLI, function_ref< const Value *(const Value *)> Mapper=[](const Value *V) { return V;})
Return the size of the requested allocation.
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI bool maskContainsAllOneOrUndef(Value *Mask)
Given a mask vector of i1, Return true if any of the elements of this predicate mask are known to be ...
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID)
Identify if the intrinsic is trivially vectorizable.
LLVM_ABI std::optional< bool > computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return false if we can prove that the specified FP value's sign bit is 0.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
@ IEEE
IEEE-754 denormal numbers preserved.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
unsigned getBitWidth() const
Get the bit width of this value.
bool isNonZero() const
Returns true if this value is known to be non-zero.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
bool isNegative() const
Returns true if this value is known to be negative.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
bool isAllOnes() const
Returns true if value is all one bits.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
A lightweight accessor for an operand bundle meant to be passed around by value.
StringRef getTagName() const
Return the tag of this operand bundle as a string.
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Represent one information held inside an operand bundle of an llvm.assume.
Attribute::AttrKind AttrKind
SelectPatternFlavor Flavor
SimplifyQuery getWithInstruction(const Instruction *I) const