16#ifndef LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
17#define LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
47 X86::FeatureLAHFSAHF64,
50 X86::FeatureSSEUnalignedMem,
53 X86::TuningFast11ByteNOP,
54 X86::TuningFast15ByteNOP,
56 X86::TuningFastHorizontalOps,
58 X86::TuningFastScalarFSQRT,
59 X86::TuningFastSHLDRotate,
60 X86::TuningFastScalarShiftMasks,
61 X86::TuningFastVectorShiftMasks,
62 X86::TuningFastVariableCrossLaneShuffle,
63 X86::TuningFastVariablePerLaneShuffle,
64 X86::TuningFastVectorFSQRT,
67 X86::TuningLZCNTFalseDeps,
68 X86::TuningBranchFusion,
69 X86::TuningMacroFusion,
70 X86::TuningPadShortFunctions,
71 X86::TuningPOPCNTFalseDeps,
72 X86::TuningMULCFalseDeps,
73 X86::TuningPERMFalseDeps,
74 X86::TuningRANGEFalseDeps,
75 X86::TuningGETMANTFalseDeps,
76 X86::TuningMULLQFalseDeps,
77 X86::TuningSlow3OpsLEA,
78 X86::TuningSlowDivide32,
79 X86::TuningSlowDivide64,
80 X86::TuningSlowIncDec,
82 X86::TuningSlowPMADDWD,
83 X86::TuningSlowPMULLD,
85 X86::TuningSlowTwoMemOps,
86 X86::TuningSlowUAMem16,
87 X86::TuningPreferMaskRegisters,
88 X86::TuningInsertVZEROUPPER,
89 X86::TuningUseSLMArithCosts,
90 X86::TuningUseGLMDivSqrtCosts,
91 X86::TuningNoDomainDelay,
92 X86::TuningNoDomainDelayMov,
93 X86::TuningNoDomainDelayShuffle,
94 X86::TuningNoDomainDelayBlend,
95 X86::TuningPreferShiftShuffle,
96 X86::TuningFastImmVectorShift,
97 X86::TuningFastDPWSSD,
100 X86::TuningFastGather,
101 X86::TuningSlowUAMem32,
102 X86::TuningAllowLight256Bit,
105 X86::TuningPrefer128Bit,
106 X86::TuningPrefer256Bit,
115 TLI(ST->getTargetLowering()) {}
149 const SmallBitVector &OpcodeMask,
160 const Instruction *
I =
nullptr)
const override;
170 unsigned Index,
const Value *Op0,
171 const Value *Op1)
const override;
173 VectorType *Ty,
const APInt &DemandedElts,
bool Insert,
bool Extract,
178 const APInt &DemandedDstElts,
190 const Value *
Ptr,
bool VariableMask,
193 const Instruction *
I)
const override;
202 std::optional<Instruction *>
204 std::optional<Value *>
206 APInt DemandedMask, KnownBits &Known,
207 bool &KnownBitsComputed)
const override;
209 InstCombiner &IC, IntrinsicInst &
II, APInt DemandedElts, APInt &UndefElts,
210 APInt &UndefElts2, APInt &UndefElts3,
211 std::function<
void(Instruction *,
unsigned, APInt, APInt &)>
212 SimplifyAndSetOp)
const override;
222 std::optional<FastMathFlags> FMF,
227 FastMathFlags FMF)
const;
234 unsigned Opcode,
Type *VecTy,
unsigned Factor, ArrayRef<unsigned> Indices,
236 bool UseMaskForCond =
false,
bool UseMaskForGaps =
false)
const override;
238 unsigned Opcode, FixedVectorType *VecTy,
unsigned Factor,
239 ArrayRef<unsigned> Indices, Align Alignment,
unsigned AddressSpace,
241 bool UseMaskForGaps =
false)
const;
249 const Instruction *
I =
nullptr)
const override;
252 const APInt &Imm,
Type *Ty,
254 Instruction *Inst =
nullptr)
const override;
264 StackOffset BaseOffset,
bool HasBaseReg,
266 unsigned AddrSpace)
const override;
269 const TargetTransformInfo::LSRCost &C2)
const override;
278 ElementCount NumElements)
const override;
280 Align Alignment)
const override;
282 Align Alignment)
const override {
290 Align Alignment)
const override;
297 const Function *Callee)
const override;
302 return ST->getMaxInlineSizeThreshold();
320 Type *ScalarValTy)
const override;
323 bool supportsGather()
const;
328 int getGatherOverhead()
const;
329 int getScatterOverhead()
const;
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
Analysis containing CSE Info
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
uint64_t IntrinsicInst * II
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Container class for subtarget features.
The core instruction combiner logic.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM Value Representation.
Base class of all SIMD vector types.
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
bool isLegalNTLoad(Type *DataType, Align Alignment) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace) const override
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const override
unsigned getRegisterClassForType(bool Vector, Type *Ty) const override
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override
bool preferAlternateOpcodeVectorization() const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
X86TTIImpl(const X86TargetMachine *TM, const Function &F)
bool isLegalNTStore(Type *DataType, Align Alignment) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getInterleavedMemoryOpCostAVX512(unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const
bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool isVectorShiftByScalarCheap(Type *Ty) const override
bool isLegalMaskedGather(Type *DataType, Align Alignment) const override
bool areTypesABICompatible(const Function *Caller, const Function *Callee, const ArrayRef< Type * > &Type) const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
Return the cost of the scaling factor used in the addressing mode represented by AM for this target,...
unsigned getAtomicMemIntrinsicMaxElementSize() const override
InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, Type *AccessTy, TTI::TargetCostKind CostKind) const override
bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) const override
InstructionCost getBranchMispredictPenalty() const override
bool isExpensiveToSpeculativelyExecute(const Instruction *I) const override
bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const override
std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) const
bool enableInterleavedAccessVectorization() const override
unsigned getLoadStoreVecRegBitWidth(unsigned AS) const override
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const override
Calculate the cost of Gather / Scatter operation.
unsigned getNumberOfRegisters(unsigned ClassID) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
uint64_t getMaxMemIntrinsicInlineSizeThreshold() const override
InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const override
Estimate the overhead of scalarizing an instruction.
bool isLegalMaskedScatter(Type *DataType, Align Alignment) const override
bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace) const override
bool hasDivRemOp(Type *DataType, bool IsSigned) const override
bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const override
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
bool supportsEfficientVectorElementLoadStore() const override
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const override
bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const override
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const override
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) const override
InstructionCost getIntImmCost(int64_t) const
Calculate the cost of materializing a 64-bit value.
InstructionCost getMinMaxCost(Intrinsic::ID IID, Type *Ty, TTI::TargetCostKind CostKind, FastMathFlags FMF) const
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
bool canMacroFuseCmp() const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
bool prefersVectorizedAddressing() const override
InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind) const override
bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) const override
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
ArrayRef(const T &OneElt) -> ArrayRef< T >
This struct is a compact representation of a valid (non-zero power of two) alignment.