14#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
15#define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
81 return SI.getNumCases();
163 virtual std::pair<const Value *, unsigned>
165 return std::make_pair(
nullptr, -1);
175 assert(
F &&
"A concrete function must be provided to this routine.");
182 if (
F->isIntrinsic())
185 if (
F->hasLocalLinkage() || !
F->hasName())
192 if (Name ==
"copysign" || Name ==
"copysignf" || Name ==
"copysignl" ||
193 Name ==
"fabs" || Name ==
"fabsf" || Name ==
"fabsl" ||
194 Name ==
"fmin" || Name ==
"fminf" || Name ==
"fminl" ||
195 Name ==
"fmax" || Name ==
"fmaxf" || Name ==
"fmaxl" ||
196 Name ==
"sin" || Name ==
"sinf" || Name ==
"sinl" ||
197 Name ==
"cos" || Name ==
"cosf" || Name ==
"cosl" ||
198 Name ==
"tan" || Name ==
"tanf" || Name ==
"tanl" ||
199 Name ==
"asin" || Name ==
"asinf" || Name ==
"asinl" ||
200 Name ==
"acos" || Name ==
"acosf" || Name ==
"acosl" ||
201 Name ==
"atan" || Name ==
"atanf" || Name ==
"atanl" ||
202 Name ==
"atan2" || Name ==
"atan2f" || Name ==
"atan2l"||
203 Name ==
"sinh" || Name ==
"sinhf" || Name ==
"sinhl" ||
204 Name ==
"cosh" || Name ==
"coshf" || Name ==
"coshl" ||
205 Name ==
"tanh" || Name ==
"tanhf" || Name ==
"tanhl" ||
206 Name ==
"sqrt" || Name ==
"sqrtf" || Name ==
"sqrtl" ||
207 Name ==
"exp10" || Name ==
"exp10l" || Name ==
"exp10f")
211 if (Name ==
"pow" || Name ==
"powf" || Name ==
"powl" || Name ==
"exp2" ||
212 Name ==
"exp2l" || Name ==
"exp2f" || Name ==
"floor" ||
213 Name ==
"floorf" || Name ==
"ceil" || Name ==
"round" ||
214 Name ==
"ffs" || Name ==
"ffsl" || Name ==
"abs" || Name ==
"labs" ||
239 virtual std::optional<Instruction *>
244 virtual std::optional<Value *>
247 bool &KnownBitsComputed)
const {
255 SimplifyAndSetOp)
const {
273 int64_t BaseOffset,
bool HasBaseReg,
274 int64_t Scale,
unsigned AddrSpace,
276 int64_t ScalableOffset = 0)
const {
279 return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
324 unsigned DataSize =
DL.getTypeStoreSize(DataType);
331 unsigned DataSize =
DL.getTypeStoreSize(DataType);
349 Align Alignment)
const {
354 Align Alignment)
const {
359 Align Alignment)
const {
379 unsigned AddrSpace)
const {
384 Type *DataType)
const {
402 bool HasBaseReg, int64_t Scale,
403 unsigned AddrSpace)
const {
406 Scale, AddrSpace,
nullptr,
418 virtual bool useAA()
const {
return false; }
439 unsigned ScalarOpdIdx)
const {
511 unsigned *
Fast)
const {
567 Type *Ty =
nullptr)
const {
574 return "Generic::Unknown Register Class";
576 return "Generic::ScalarRC";
578 return "Generic::VectorRC";
589 virtual std::optional<unsigned>
getMaxVScale()
const {
return std::nullopt; }
604 virtual unsigned getMaximumVF(
unsigned ElemWidth,
unsigned Opcode)
const {
612 const Instruction &
I,
bool &AllowPromotionWithoutCommonHeader)
const {
613 AllowPromotionWithoutCommonHeader =
false;
618 virtual std::optional<unsigned>
629 virtual std::optional<unsigned>
645 unsigned NumStridedMemAccesses,
646 unsigned NumPrefetches,
647 bool HasCall)
const {
655 unsigned Opcode,
Type *InputTypeA,
Type *InputTypeB,
Type *AccumType,
670 auto IsWidenableCondition = [](
const Value *V) {
672 if (
II->getIntrinsicID() == Intrinsic::experimental_widenable_condition)
681 case Instruction::FDiv:
682 case Instruction::FRem:
683 case Instruction::SDiv:
684 case Instruction::SRem:
685 case Instruction::UDiv:
686 case Instruction::URem:
689 case Instruction::And:
690 case Instruction::Or:
691 if (
any_of(Args, IsWidenableCondition))
698 if (Ty->getScalarType()->isFloatingPointTy())
726 case Instruction::IntToPtr: {
727 unsigned SrcSize = Src->getScalarSizeInBits();
728 if (
DL.isLegalInteger(SrcSize) &&
729 SrcSize <=
DL.getPointerTypeSizeInBits(Dst))
733 case Instruction::PtrToAddr: {
734 unsigned DstSize = Dst->getScalarSizeInBits();
735 assert(DstSize ==
DL.getAddressSizeInBits(Src));
736 if (
DL.isLegalInteger(DstSize))
740 case Instruction::PtrToInt: {
741 unsigned DstSize = Dst->getScalarSizeInBits();
742 if (
DL.isLegalInteger(DstSize) &&
743 DstSize >=
DL.getPointerTypeSizeInBits(Src))
747 case Instruction::BitCast:
748 if (Dst == Src || (Dst->isPointerTy() && Src->isPointerTy()))
752 case Instruction::Trunc: {
789 unsigned Index,
const Value *Op0,
790 const Value *Op1)
const {
801 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx)
const {
807 unsigned Index)
const {
814 unsigned Index)
const {
820 const APInt &DemandedDstElts,
831 if (Opcode == Instruction::InsertValue &&
861 bool VariableMask,
Align Alignment,
868 unsigned Opcode,
Type *DataTy,
bool VariableMask,
Align Alignment,
875 bool VariableMask,
Align Alignment,
884 bool UseMaskForCond,
bool UseMaskForGaps)
const {
891 switch (ICA.
getID()) {
894 case Intrinsic::allow_runtime_check:
895 case Intrinsic::allow_ubsan_check:
896 case Intrinsic::annotation:
897 case Intrinsic::assume:
898 case Intrinsic::sideeffect:
899 case Intrinsic::pseudoprobe:
900 case Intrinsic::arithmetic_fence:
901 case Intrinsic::dbg_assign:
902 case Intrinsic::dbg_declare:
903 case Intrinsic::dbg_value:
904 case Intrinsic::dbg_label:
905 case Intrinsic::invariant_start:
906 case Intrinsic::invariant_end:
907 case Intrinsic::launder_invariant_group:
908 case Intrinsic::strip_invariant_group:
909 case Intrinsic::is_constant:
910 case Intrinsic::lifetime_start:
911 case Intrinsic::lifetime_end:
912 case Intrinsic::experimental_noalias_scope_decl:
913 case Intrinsic::objectsize:
914 case Intrinsic::ptr_annotation:
915 case Intrinsic::var_annotation:
916 case Intrinsic::experimental_gc_result:
917 case Intrinsic::experimental_gc_relocate:
918 case Intrinsic::coro_alloc:
919 case Intrinsic::coro_begin:
920 case Intrinsic::coro_begin_custom_abi:
921 case Intrinsic::coro_free:
922 case Intrinsic::coro_end:
923 case Intrinsic::coro_frame:
924 case Intrinsic::coro_size:
925 case Intrinsic::coro_align:
926 case Intrinsic::coro_suspend:
927 case Intrinsic::coro_subfn_addr:
928 case Intrinsic::threadlocal_address:
929 case Intrinsic::experimental_widenable_condition:
930 case Intrinsic::ssa_copy:
955 std::optional<FastMathFlags> FMF,
968 VectorType *Ty, std::optional<FastMathFlags> FMF,
1000 bool CanCreate =
true)
const {
1006 unsigned SrcAddrSpace,
unsigned DestAddrSpace,
1008 std::optional<uint32_t> AtomicElementSize)
const {
1009 return AtomicElementSize ?
Type::getIntNTy(Context, *AtomicElementSize * 8)
1015 unsigned RemainingBytes,
unsigned SrcAddrSpace,
unsigned DestAddrSpace,
1017 std::optional<uint32_t> AtomicCpySize)
const {
1018 unsigned OpSizeInBytes = AtomicCpySize.value_or(1);
1020 for (
unsigned i = 0; i != RemainingBytes; i += OpSizeInBytes)
1026 return (Caller->getFnAttribute(
"target-cpu") ==
1027 Callee->getFnAttribute(
"target-cpu")) &&
1028 (Caller->getFnAttribute(
"target-features") ==
1029 Callee->getFnAttribute(
"target-features"));
1033 unsigned DefaultCallPenalty)
const {
1034 return DefaultCallPenalty;
1040 return (Caller->getFnAttribute(
"target-cpu") ==
1041 Callee->getFnAttribute(
"target-cpu")) &&
1042 (Caller->getFnAttribute(
"target-features") ==
1043 Callee->getFnAttribute(
"target-features"));
1064 unsigned AddrSpace)
const {
1070 unsigned AddrSpace)
const {
1084 unsigned ChainSizeInBytes,
1090 unsigned ChainSizeInBytes,
1179 unsigned MaxRequiredSize =
1180 VT->getElementType()->getPrimitiveSizeInBits().getFixedValue();
1182 unsigned MinRequiredSize = 0;
1183 for (
unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
1184 if (
auto *IntElement =
1186 bool signedElement = IntElement->getValue().isNegative();
1188 unsigned ElementMinRequiredSize =
1189 IntElement->getValue().getSignificantBits() - 1;
1193 MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
1196 return MaxRequiredSize;
1199 return MinRequiredSize;
1203 isSigned = CI->getValue().isNegative();
1204 return CI->getValue().getSignificantBits() - 1;
1209 return Cast->getSrcTy()->getScalarSizeInBits() - 1;
1214 return Cast->getSrcTy()->getScalarSizeInBits();
1234 int64_t MergeDistance)
const {
1248template <
typename T>
1260 assert(PointeeType &&
Ptr &&
"can't get GEPCost of nullptr");
1262 bool HasBaseReg = (BaseGV ==
nullptr);
1264 auto PtrSizeBits =
DL.getPointerTypeSizeInBits(
Ptr->getType());
1265 APInt BaseOffset(PtrSizeBits, 0);
1269 Type *TargetType =
nullptr;
1277 TargetType = GTI.getIndexedType();
1284 if (
StructType *STy = GTI.getStructTypeOrNull()) {
1286 assert(ConstIdx &&
"Unexpected GEP index");
1288 BaseOffset +=
DL.getStructLayout(STy)->getElementOffset(
Field);
1294 int64_t ElementSize =
1295 GTI.getSequentialElementStride(
DL).getFixedValue();
1304 Scale = ElementSize;
1319 AccessType = TargetType;
1326 Ptr->getType()->getPointerAddressSpace()))
1350 for (
const Value *V : Ptrs) {
1354 if (
Info.isSameBase() && V !=
Base) {
1355 if (
GEP->hasAllConstantIndices())
1359 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None},
1364 GEP->getSourceElementType(),
GEP->getPointerOperand(), Indices,
1376 auto *TargetTTI =
static_cast<const T *
>(
this);
1381 if (
const Function *
F = CB->getCalledFunction()) {
1382 if (!TargetTTI->isLoweredToCall(
F))
1391 Type *Ty = U->getType();
1397 case Instruction::Call: {
1401 return TargetTTI->getIntrinsicInstrCost(CostAttrs,
CostKind);
1403 case Instruction::Br:
1404 case Instruction::Ret:
1405 case Instruction::PHI:
1406 case Instruction::Switch:
1407 return TargetTTI->getCFInstrCost(Opcode,
CostKind,
I);
1408 case Instruction::Freeze:
1410 case Instruction::ExtractValue:
1411 case Instruction::InsertValue:
1412 return TargetTTI->getInsertExtractValueCost(Opcode,
CostKind);
1413 case Instruction::Alloca:
1417 case Instruction::GetElementPtr: {
1419 Type *AccessType =
nullptr;
1422 if (
GEP->hasOneUser() &&
I)
1423 AccessType =
I->user_back()->getAccessType();
1425 return TargetTTI->getGEPCost(
GEP->getSourceElementType(),
1429 case Instruction::Add:
1430 case Instruction::FAdd:
1431 case Instruction::Sub:
1432 case Instruction::FSub:
1433 case Instruction::Mul:
1434 case Instruction::FMul:
1435 case Instruction::UDiv:
1436 case Instruction::SDiv:
1437 case Instruction::FDiv:
1438 case Instruction::URem:
1439 case Instruction::SRem:
1440 case Instruction::FRem:
1441 case Instruction::Shl:
1442 case Instruction::LShr:
1443 case Instruction::AShr:
1444 case Instruction::And:
1445 case Instruction::Or:
1446 case Instruction::Xor:
1447 case Instruction::FNeg: {
1450 if (Opcode != Instruction::FNeg)
1452 return TargetTTI->getArithmeticInstrCost(Opcode, Ty,
CostKind, Op1Info,
1455 case Instruction::IntToPtr:
1456 case Instruction::PtrToAddr:
1457 case Instruction::PtrToInt:
1458 case Instruction::SIToFP:
1459 case Instruction::UIToFP:
1460 case Instruction::FPToUI:
1461 case Instruction::FPToSI:
1462 case Instruction::Trunc:
1463 case Instruction::FPTrunc:
1464 case Instruction::BitCast:
1465 case Instruction::FPExt:
1466 case Instruction::SExt:
1467 case Instruction::ZExt:
1468 case Instruction::AddrSpaceCast: {
1470 return TargetTTI->getCastInstrCost(
1473 case Instruction::Store: {
1477 return TargetTTI->getMemoryOpCost(Opcode, ValTy,
SI->getAlign(),
1481 case Instruction::Load: {
1486 Type *LoadType = U->getType();
1497 LoadType = TI->getDestTy();
1499 return TargetTTI->getMemoryOpCost(Opcode, LoadType, LI->getAlign(),
1501 {TTI::OK_AnyValue, TTI::OP_None},
I);
1503 case Instruction::Select: {
1504 const Value *Op0, *Op1;
1515 return TargetTTI->getArithmeticInstrCost(
1522 return TargetTTI->getCmpSelInstrCost(Opcode, U->getType(), CondTy,
1526 case Instruction::ICmp:
1527 case Instruction::FCmp: {
1532 return TargetTTI->getCmpSelInstrCost(Opcode, ValTy, U->getType(),
1537 case Instruction::InsertElement: {
1543 if (CI->getValue().getActiveBits() <= 32)
1544 Idx = CI->getZExtValue();
1545 return TargetTTI->getVectorInstrCost(*IE, Ty,
CostKind, Idx);
1547 case Instruction::ShuffleVector: {
1555 int NumSubElts, SubIndex;
1558 if (
all_of(Mask, [](
int M) {
return M < 0; }))
1562 if (Shuffle->changesLength()) {
1564 if (Shuffle->increasesLength() && Shuffle->isIdentityWithPadding())
1567 if (Shuffle->isExtractSubvectorMask(SubIndex))
1569 VecSrcTy, Mask,
CostKind, SubIndex,
1572 if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
1573 return TargetTTI->getShuffleCost(
1579 int ReplicationFactor, VF;
1580 if (Shuffle->isReplicationMask(ReplicationFactor, VF)) {
1584 DemandedDstElts.
setBit(
I.index());
1586 return TargetTTI->getReplicationShuffleCost(
1587 VecSrcTy->getElementType(), ReplicationFactor, VF,
1592 NumSubElts = VecSrcTy->getElementCount().getKnownMinValue();
1598 if (Shuffle->increasesLength()) {
1599 for (
int &M : AdjustMask)
1600 M = M >= NumSubElts ? (M + (Mask.size() - NumSubElts)) : M;
1602 return TargetTTI->getShuffleCost(
1619 std::iota(ExtractMask.
begin(), ExtractMask.
end(), 0);
1620 return ShuffleCost + TargetTTI->getShuffleCost(
1622 ExtractMask,
CostKind, 0, VecTy, {}, Shuffle);
1625 if (Shuffle->isIdentity())
1628 if (Shuffle->isReverse())
1629 return TargetTTI->getShuffleCost(
TTI::SK_Reverse, VecTy, VecSrcTy, Mask,
1633 if (Shuffle->isTranspose())
1638 if (Shuffle->isZeroEltSplat())
1643 if (Shuffle->isSingleSource())
1645 VecSrcTy, Mask,
CostKind, 0,
nullptr,
1648 if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
1649 return TargetTTI->getShuffleCost(
1654 if (Shuffle->isSelect())
1655 return TargetTTI->getShuffleCost(
TTI::SK_Select, VecTy, VecSrcTy, Mask,
1659 if (Shuffle->isSplice(SubIndex))
1660 return TargetTTI->getShuffleCost(
TTI::SK_Splice, VecTy, VecSrcTy, Mask,
1668 case Instruction::ExtractElement: {
1674 if (CI->getValue().getActiveBits() <= 32)
1675 Idx = CI->getZExtValue();
1677 return TargetTTI->getVectorInstrCost(*EEI, DstTy,
CostKind, Idx);
1686 auto *TargetTTI =
static_cast<const T *
>(
this);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Analysis containing CSE Info
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static bool isSigned(unsigned int Opcode)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
mir Rename Register Operands
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned getBitWidth() const
Return the number of bits in the APInt.
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
int64_t getSExtValue() const
Get sign extended value.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Class to represent array types.
A cache of @llvm.assume calls within a function.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Convenience struct for specifying and reasoning about fast-math flags.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
Intrinsic::ID getID() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Represents a single loop in the control flow graph.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This class represents a constant integer value.
const APInt & getAPInt() const
This class represents an analyzed expression in the program.
The main scalar evolution driver.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Base class of all SIMD vector types.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
bool match(Val *V, const Pattern &P)
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t DataSize
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr int PoisonMaskElem
RecurKind
These are the kinds of recurrences that we support.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Attributes of a target dependent hardware loop.
Information about a load/store intrinsic defined by the target.