LLVM 21.0.0git
RISCVTargetTransformInfo.h
Go to the documentation of this file.
1//===- RISCVTargetTransformInfo.h - RISC-V specific TTI ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file defines a TargetTransformInfo::Concept conforming object specific
10/// to the RISC-V target machine. It uses the target's detailed information to
11/// provide more precise answers to certain TTI queries, while letting the
12/// target independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
17#define LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
18
19#include "RISCVSubtarget.h"
20#include "RISCVTargetMachine.h"
24#include "llvm/IR/Function.h"
25#include <optional>
26
27namespace llvm {
28
29class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
32
33 friend BaseT;
34
35 const RISCVSubtarget *ST;
36 const RISCVTargetLowering *TLI;
37
38 const RISCVSubtarget *getST() const { return ST; }
39 const RISCVTargetLowering *getTLI() const { return TLI; }
40
41 /// This function returns an estimate for VL to be used in VL based terms
42 /// of the cost model. For fixed length vectors, this is simply the
43 /// vector length. For scalable vectors, we return results consistent
44 /// with getVScaleForTuning under the assumption that clients are also
45 /// using that when comparing costs between scalar and vector representation.
46 /// This does unfortunately mean that we can both undershoot and overshot
47 /// the true cost significantly if getVScaleForTuning is wildly off for the
48 /// actual target hardware.
49 unsigned getEstimatedVLFor(VectorType *Ty);
50
51 /// This function calculates the costs for one or more RVV opcodes based
52 /// on the vtype and the cost kind.
53 /// \param Opcodes A list of opcodes of the RVV instruction to evaluate.
54 /// \param VT The MVT of vtype associated with the RVV instructions.
55 /// For widening/narrowing instructions where the result and source types
56 /// differ, it is important to check the spec to determine whether the vtype
57 /// refers to the result or source type.
58 /// \param CostKind The type of cost to compute.
59 InstructionCost getRISCVInstructionCost(ArrayRef<unsigned> OpCodes, MVT VT,
61
62 /// Return the cost of accessing a constant pool entry of the specified
63 /// type.
64 InstructionCost getConstantPoolLoadCost(Type *Ty,
66public:
67 explicit RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F)
68 : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
69 TLI(ST->getTargetLowering()) {}
70
71 /// Return the cost of materializing an immediate for a value operand of
72 /// a store instruction.
75
78 InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
79 const APInt &Imm, Type *Ty,
81 Instruction *Inst = nullptr);
83 const APInt &Imm, Type *Ty,
85
86 /// \name EVL Support for predicated vectorization.
87 /// Whether the target supports the %evl parameter of VP intrinsic efficiently
88 /// in hardware, for the given opcode and type/alignment. (see LLVM Language
89 /// Reference - "Vector Predication Intrinsics",
90 /// https://llvm.org/docs/LangRef.html#vector-predication-intrinsics and
91 /// "IR-level VP intrinsics",
92 /// https://llvm.org/docs/Proposals/VectorPredication.html#ir-level-vp-intrinsics).
93 /// \param Opcode the opcode of the instruction checked for predicated version
94 /// support.
95 /// \param DataType the type of the instruction with the \p Opcode checked for
96 /// prediction support.
97 /// \param Alignment the alignment for memory access operation checked for
98 /// predicated version support.
99 bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
100 Align Alignment) const;
101
103
104 bool shouldExpandReduction(const IntrinsicInst *II) const;
105 bool supportsScalableVectors() const { return ST->hasVInstructions(); }
106 bool enableOrderedReductions() const { return true; }
107 bool enableScalableVectorization() const { return ST->hasVInstructions(); }
109 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const {
110 return ST->hasVInstructions() ? TailFoldingStyle::Data
112 }
113 std::optional<unsigned> getMaxVScale() const;
114 std::optional<unsigned> getVScaleForTuning() const;
115
117
118 unsigned getRegUsageForType(Type *Ty);
119
120 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;
121
123 // Epilogue vectorization is usually unprofitable - tail folding or
124 // a smaller VF would have been better. This a blunt hammer - we
125 // should re-examine this once vectorization is better tuned.
126 return false;
127 }
128
129 InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
130 Align Alignment, unsigned AddressSpace,
132
134 const Value *Base,
136 Type *AccessTy,
138
142
145
147 return ST->useRVVForFixedLengthVectors() ? 16 : 0;
148 }
149
151 ArrayRef<int> Mask,
153 VectorType *SubTp,
154 ArrayRef<const Value *> Args = {},
155 const Instruction *CxtI = nullptr);
156
157 InstructionCost getScalarizationOverhead(VectorType *Ty,
158 const APInt &DemandedElts,
159 bool Insert, bool Extract,
161 ArrayRef<Value *> VL = {});
162
163 InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
165
166 InstructionCost getInterleavedMemoryOpCost(
167 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
168 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
169 bool UseMaskForCond = false, bool UseMaskForGaps = false);
170
171 InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
172 const Value *Ptr, bool VariableMask,
173 Align Alignment,
175 const Instruction *I);
176
177 InstructionCost getExpandCompressMemoryOpCost(unsigned Opcode, Type *Src,
178 bool VariableMask,
179 Align Alignment,
181 const Instruction *I = nullptr);
182
183 InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy,
184 const Value *Ptr, bool VariableMask,
185 Align Alignment,
187 const Instruction *I);
188
189 InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys);
190
191 InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
194 const Instruction *I = nullptr);
195
196 InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
197 FastMathFlags FMF,
199
200 InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
201 std::optional<FastMathFlags> FMF,
203
204 InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
205 Type *ResTy, VectorType *ValTy,
206 FastMathFlags FMF,
208
209 InstructionCost
210 getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
212 TTI::OperandValueInfo OpdInfo = {TTI::OK_AnyValue, TTI::OP_None},
213 const Instruction *I = nullptr);
214
215 InstructionCost getCmpSelInstrCost(
216 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
218 TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
219 TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
220 const Instruction *I = nullptr);
221
222 InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
223 const Instruction *I = nullptr);
224
226 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
228 unsigned Index, Value *Op0, Value *Op1);
229
230 InstructionCost getArithmeticInstrCost(
231 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
232 TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
233 TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
234 ArrayRef<const Value *> Args = {}, const Instruction *CxtI = nullptr);
235
237 return TLI->isLegalElementTypeForRVV(TLI->getValueType(DL, Ty));
238 }
239
240 bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) {
241 if (!ST->hasVInstructions())
242 return false;
243
244 EVT DataTypeVT = TLI->getValueType(DL, DataType);
245
246 // Only support fixed vectors if we know the minimum vector size.
247 if (DataTypeVT.isFixedLengthVector() && !ST->useRVVForFixedLengthVectors())
248 return false;
249
250 EVT ElemType = DataTypeVT.getScalarType();
251 if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
252 return false;
253
254 return TLI->isLegalElementTypeForRVV(ElemType);
255 }
256
257 bool isLegalMaskedLoad(Type *DataType, Align Alignment) {
258 return isLegalMaskedLoadStore(DataType, Alignment);
259 }
260 bool isLegalMaskedStore(Type *DataType, Align Alignment) {
261 return isLegalMaskedLoadStore(DataType, Alignment);
262 }
263
264 bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) {
265 if (!ST->hasVInstructions())
266 return false;
267
268 EVT DataTypeVT = TLI->getValueType(DL, DataType);
269
270 // Only support fixed vectors if we know the minimum vector size.
271 if (DataTypeVT.isFixedLengthVector() && !ST->useRVVForFixedLengthVectors())
272 return false;
273
274 // We also need to check if the vector of address is valid.
275 EVT PointerTypeVT = EVT(TLI->getPointerTy(DL));
276 if (DataTypeVT.isScalableVector() &&
277 !TLI->isLegalElementTypeForRVV(PointerTypeVT))
278 return false;
279
280 EVT ElemType = DataTypeVT.getScalarType();
281 if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
282 return false;
283
284 return TLI->isLegalElementTypeForRVV(ElemType);
285 }
286
287 bool isLegalMaskedGather(Type *DataType, Align Alignment) {
288 return isLegalMaskedGatherScatter(DataType, Alignment);
289 }
290 bool isLegalMaskedScatter(Type *DataType, Align Alignment) {
291 return isLegalMaskedGatherScatter(DataType, Alignment);
292 }
293
295 // Scalarize masked gather for RV64 if EEW=64 indices aren't supported.
296 return ST->is64Bit() && !ST->hasVInstructionsI64();
297 }
298
300 // Scalarize masked scatter for RV64 if EEW=64 indices aren't supported.
301 return ST->is64Bit() && !ST->hasVInstructionsI64();
302 }
303
304 bool isLegalStridedLoadStore(Type *DataType, Align Alignment) {
305 EVT DataTypeVT = TLI->getValueType(DL, DataType);
306 return TLI->isLegalStridedLoadStore(DataTypeVT, Alignment);
307 }
308
309 bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor,
310 Align Alignment, unsigned AddrSpace) {
311 return TLI->isLegalInterleavedAccessType(VTy, Factor, Alignment, AddrSpace,
312 DL);
313 }
314
315 bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment);
316
317 bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment);
318
320 return TLI->isVScaleKnownToBeAPowerOfTwo();
321 }
322
323 /// \returns How the target needs this vector-predicated operation to be
324 /// transformed.
328 if (!ST->hasVInstructions() ||
329 (PI.getIntrinsicID() == Intrinsic::vp_reduce_mul &&
330 cast<VectorType>(PI.getArgOperand(1)->getType())
331 ->getElementType()
332 ->getIntegerBitWidth() != 1))
335 }
336
338 ElementCount VF) const {
339 if (!VF.isScalable())
340 return true;
341
342 Type *Ty = RdxDesc.getRecurrenceType();
343 if (!TLI->isLegalElementTypeForRVV(TLI->getValueType(DL, Ty)))
344 return false;
345
346 // We can't promote f16/bf16 fadd reductions and scalable vectors can't be
347 // expanded.
348 // TODO: Promote f16/bf16 fmin/fmax reductions
349 if (Ty->isBFloatTy() || (Ty->isHalfTy() && !ST->hasVInstructionsF16()))
350 return false;
351
352 switch (RdxDesc.getRecurrenceKind()) {
353 case RecurKind::Add:
354 case RecurKind::FAdd:
355 case RecurKind::And:
356 case RecurKind::Or:
357 case RecurKind::Xor:
358 case RecurKind::SMin:
359 case RecurKind::SMax:
360 case RecurKind::UMin:
361 case RecurKind::UMax:
362 case RecurKind::FMin:
363 case RecurKind::FMax:
367 return true;
368 default:
369 return false;
370 }
371 }
372
374 // Don't interleave if the loop has been vectorized with scalable vectors.
375 if (VF.isScalable())
376 return 1;
377 // If the loop will not be vectorized, don't interleave the loop.
378 // Let regular unroll to unroll the loop.
379 return VF.isScalar() ? 1 : ST->getMaxInterleaveFactor();
380 }
381
383
385 unsigned getNumberOfRegisters(unsigned ClassID) const {
386 switch (ClassID) {
388 // 31 = 32 GPR - x0 (zero register)
389 // FIXME: Should we exclude fixed registers like SP, TP or GP?
390 return 31;
392 if (ST->hasStdExtF())
393 return 32;
394 return 0;
396 // Although there are 32 vector registers, v0 is special in that it is the
397 // only register that can be used to hold a mask.
398 // FIXME: Should we conservatively return 31 as the number of usable
399 // vector registers?
400 return ST->hasVInstructions() ? 32 : 0;
401 }
402 llvm_unreachable("unknown register class");
403 }
404
406 ScalarEvolution *SE) const;
407
408 unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const {
409 if (Vector)
411 if (!Ty)
413
414 Type *ScalarTy = Ty->getScalarType();
415 if ((ScalarTy->isHalfTy() && ST->hasStdExtZfhmin()) ||
416 (ScalarTy->isFloatTy() && ST->hasStdExtF()) ||
417 (ScalarTy->isDoubleTy() && ST->hasStdExtD())) {
419 }
420
422 }
423
424 const char *getRegisterClassName(unsigned ClassID) const {
425 switch (ClassID) {
427 return "RISCV::GPRRC";
429 return "RISCV::FPRRC";
431 return "RISCV::VRRC";
432 }
433 llvm_unreachable("unknown register class");
434 }
435
438
439 bool
441 bool &AllowPromotionWithoutCommonHeader);
442 std::optional<unsigned> getMinPageSize() const { return 4096; }
443 /// Return true if the (vector) instruction I will be lowered to an
444 /// instruction with a scalar splat operand for the given Operand number.
445 bool canSplatOperand(Instruction *I, int Operand) const;
446 /// Return true if a vector instruction will lower to a target instruction
447 /// able to splat the given operand.
448 bool canSplatOperand(unsigned Opcode, int Operand) const;
449
451 SmallVectorImpl<Use *> &Ops) const;
452
454 bool IsZeroCmp) const;
455};
456
457} // end namespace llvm
458
459#endif // LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint32_t Index
TargetTransformInfo::VPLegalization VPLegalization
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
uint64_t IntrinsicInst * II
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition: APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Base class which can be used to help build a TTI implementation.
Definition: BasicTTIImpl.h:80
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1286
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
constexpr bool isScalar() const
Exactly one element.
Definition: TypeSize.h:322
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:55
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
Machine Value Type.
The optimization diagnostic interface.
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
const char * getRegisterClassName(unsigned ClassID) const
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr)
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2)
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment)
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const
Check if sinking I's operands to I's basic block is profitable, because the operands can be folded in...
unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const
bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment)
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr)
TargetTransformInfo::VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const
TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const
bool isLegalMaskedStore(Type *DataType, Align Alignment)
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getNumberOfRegisters(unsigned ClassID) const
bool isElementTypeLegalForScalableVector(Type *Ty) const
unsigned getMaxInterleaveFactor(ElementCount VF)
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
bool isLegalMaskedLoadStore(Type *DataType, Align Alignment)
bool canSplatOperand(Instruction *I, int Operand) const
Return true if the (vector) instruction I will be lowered to an instruction with a scalar splat opera...
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr)
bool enableScalableVectorization() const
bool preferEpilogueVectorization() const
bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment)
bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader)
See if I should be considered for address type promotion.
bool isVScaleKnownToBeAPowerOfTwo() const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
bool isLegalMaskedLoad(Type *DataType, Align Alignment)
InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
std::optional< unsigned > getVScaleForTuning() const
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, FastMathFlags FMF, TTI::TargetCostKind CostKind)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const
std::optional< unsigned > getMaxVScale() const
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F)
InstructionCost getExpandCompressMemoryOpCost(unsigned Opcode, Type *Src, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, ArrayRef< Value * > VL={})
InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, Type *AccessTy, TTI::TargetCostKind CostKind)
TargetTransformInfo::PopcntSupportKind getPopcntSupport(unsigned TyWidth)
bool shouldExpandReduction(const IntrinsicInst *II) const
InstructionCost getCostOfKeepingLiveOverCall(ArrayRef< Type * > Tys)
bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace)
InstructionCost getStoreImmCost(Type *VecTy, TTI::OperandValueInfo OpInfo, TTI::TargetCostKind CostKind)
Return the cost of materializing an immediate for a value operand of a store instruction.
bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment)
bool isLegalStridedLoadStore(Type *DataType, Align Alignment)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getRegUsageForType(Type *Ty)
bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment)
bool isLegalMaskedGather(Type *DataType, Align Alignment)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpdInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
std::optional< unsigned > getMinPageSize() const
bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, ElementCount VF) const
bool isLegalMaskedScatter(Type *DataType, Align Alignment)
unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
bool hasActiveVectorLength(unsigned Opcode, Type *DataType, Align Alignment) const
unsigned getMinVectorRegisterBitWidth() const
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
bool isLegalElementTypeForRVV(EVT ScalarTy) const
bool isVScaleKnownToBeAPowerOfTwo() const override
Return true only if vscale must be a power of two.
bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace, const DataLayout &) const
Returns whether or not generating a interleaved load/store intrinsic for this type will be legal.
bool isLegalStridedLoadStore(EVT DataType, Align Alignment) const
Return true if a stride load store of the given result type and alignment is legal.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
Definition: IVDescriptors.h:77
Type * getRecurrenceType() const
Returns the type of the recurrence.
RecurKind getRecurrenceKind() const
The main scalar evolution driver.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
const DataLayout & getDataLayout() const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TargetCostKind
The kind of cost model.
PopcntSupportKind
Flags indicating the kind of support for population count.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:153
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
Definition: Type.h:145
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
Definition: Type.h:142
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition: Type.h:156
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:355
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
Base class of all SIMD vector types.
Definition: DerivedTypes.h:427
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ FAnyOf
Any_of reduction with select(fcmp(),x,y) where one of (x,y) is loop invariant, and both x and y are i...
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMax
FP max implemented in terms of select(cmp()).
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ And
Bitwise or logical AND of integers.
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ FMin
FP min implemented in terms of select(cmp()).
@ Add
Sum of integers.
@ FAdd
Sum of floats.
@ IAnyOf
Any_of reduction with select(icmp(),x,y) where one of (x,y) is loop invariant, and both x and y are i...
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: ValueTypes.h:390
bool isFixedLengthVector() const
Definition: ValueTypes.h:181
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition: ValueTypes.h:318
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
Definition: ValueTypes.h:174
Returns options for expansion of memcmp. IsZeroCmp is.
Describe known properties for a set of pointers.
Parameters that control the generic loop unrolling transformation.