LLVM 22.0.0git
HexagonTargetTransformInfo.cpp
Go to the documentation of this file.
1//===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7/// \file
8/// This file implements a TargetTransformInfo analysis pass specific to the
9/// Hexagon target machine. It uses the target's detailed information to provide
10/// more precise answers to certain TTI queries, while letting the target
11/// independent and default TTI implementations handle the rest.
12///
13//===----------------------------------------------------------------------===//
14
16#include "HexagonSubtarget.h"
19#include "llvm/IR/InstrTypes.h"
21#include "llvm/IR/User.h"
26
27using namespace llvm;
28
29#define DEBUG_TYPE "hexagontti"
30
31static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
32 cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
33
35 "force-hvx-float", cl::Hidden,
36 cl::desc("Enable auto-vectorization of floatint point types on v68."));
37
38static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
39 cl::init(true), cl::Hidden,
40 cl::desc("Control lookup table emission on Hexagon target"));
41
42static cl::opt<bool> HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true),
43 cl::Hidden, cl::desc("Enable masked loads/stores for HVX"));
44
45// Constant "cost factor" to make floating point operations more expensive
46// in terms of vectorization cost. This isn't the best way, but it should
47// do. Ultimately, the cost should use cycles.
48static const unsigned FloatFactor = 4;
49
50bool HexagonTTIImpl::useHVX() const {
51 return ST.useHVXOps() && HexagonAutoHVX;
52}
53
54bool HexagonTTIImpl::isHVXVectorType(Type *Ty) const {
55 auto *VecTy = dyn_cast<VectorType>(Ty);
56 if (!VecTy)
57 return false;
58 if (!ST.isTypeForHVX(VecTy))
59 return false;
60 if (ST.useHVXV69Ops() || !VecTy->getElementType()->isFloatingPointTy())
61 return true;
63}
64
65unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
66 if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
67 return VTy->getNumElements();
68 assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
69 "Expecting scalar type");
70 return 1;
71}
72
74HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
75 // Return fast hardware support as every input < 64 bits will be promoted
76 // to 64 bits.
78}
79
80// The Hexagon target can unroll loops with run-time trip counts.
83 OptimizationRemarkEmitter *ORE) const {
84 UP.Runtime = UP.Partial = true;
85}
86
88 TTI::PeelingPreferences &PP) const {
90 // Only try to peel innermost loops with small runtime trip counts.
91 if (L && L->isInnermost() && canPeel(L) &&
92 SE.getSmallConstantTripCount(L) == 0 &&
95 PP.PeelCount = 2;
96 }
97}
98
101 ScalarEvolution *SE) const {
103}
104
105/// --- Vector TTI begin ---
106
107unsigned HexagonTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
108 bool Vector = ClassID == 1;
109 if (Vector)
110 return useHVX() ? 32 : 0;
111 return 32;
112}
113
115 return useHVX() ? 2 : 1;
116}
117
120 switch (K) {
122 return TypeSize::getFixed(32);
126 return TypeSize::getScalable(0);
127 }
128
129 llvm_unreachable("Unsupported register kind");
130}
131
133 return useHVX() ? ST.getVectorLength()*8 : 32;
134}
135
137 bool IsScalable) const {
138 assert(!IsScalable && "Scalable VFs are not supported for Hexagon");
139 return ElementCount::getFixed((8 * ST.getVectorLength()) / ElemWidth);
140}
141
146}
147
151 if (ICA.getID() == Intrinsic::bswap) {
152 std::pair<InstructionCost, MVT> LT =
154 return LT.first + 2;
155 }
157}
158
161 const SCEV *S,
163 return 0;
164}
165
167 Align Alignment,
168 unsigned AddressSpace,
171 const Instruction *I) const {
172 assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
173 // TODO: Handle other cost kinds.
175 return 1;
176
177 if (Opcode == Instruction::Store)
178 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
179 CostKind, OpInfo, I);
180
181 if (Src->isVectorTy()) {
182 VectorType *VecTy = cast<VectorType>(Src);
183 unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedValue();
184 if (isHVXVectorType(VecTy)) {
185 unsigned RegWidth =
187 .getFixedValue();
188 assert(RegWidth && "Non-zero vector register width expected");
189 // Cost of HVX loads.
190 if (VecWidth % RegWidth == 0)
191 return VecWidth / RegWidth;
192 // Cost of constructing HVX vector from scalar loads
193 const Align RegAlign(RegWidth / 8);
194 if (Alignment > RegAlign)
195 Alignment = RegAlign;
196 unsigned AlignWidth = 8 * Alignment.value();
197 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
198 return 3 * NumLoads;
199 }
200
201 // Non-HVX vectors.
202 // Add extra cost for floating point types.
203 unsigned Cost =
205
206 // At this point unspecified alignment is considered as Align(1).
207 const Align BoundAlignment = std::min(Alignment, Align(8));
208 unsigned AlignWidth = 8 * BoundAlignment.value();
209 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
210 if (Alignment == Align(4) || Alignment == Align(8))
211 return Cost * NumLoads;
212 // Loads of less than 32 bits will need extra inserts to compose a vector.
213 assert(BoundAlignment <= Align(8));
214 unsigned LogA = Log2(BoundAlignment);
215 return (3 - LogA) * Cost * NumLoads;
216 }
217
218 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind,
219 OpInfo, I);
220}
221
224 Align Alignment, unsigned AddressSpace,
226 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
227 CostKind);
228}
229
232 VectorType *SrcTy, ArrayRef<int> Mask,
233 TTI::TargetCostKind CostKind, int Index,
235 const Instruction *CxtI) const {
236 return 1;
237}
238
240 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
241 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const {
242 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
243 Alignment, CostKind, I);
244}
245
247 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
248 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
249 bool UseMaskForCond, bool UseMaskForGaps) const {
250 if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
251 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
252 Alignment, AddressSpace,
253 CostKind,
254 UseMaskForCond, UseMaskForGaps);
255 return getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace, CostKind);
256}
257
259 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
261 TTI::OperandValueInfo Op2Info, const Instruction *I) const {
262 if (ValTy->isVectorTy() && CostKind == TTI::TCK_RecipThroughput) {
263 if (!isHVXVectorType(ValTy) && ValTy->isFPOrFPVectorTy())
265 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
266 if (Opcode == Instruction::FCmp)
267 return LT.first + FloatFactor * getTypeNumElements(ValTy);
268 }
269 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
270 Op1Info, Op2Info, I);
271}
272
274 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
276 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
277 // TODO: Handle more cost kinds.
279 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
280 Op2Info, Args, CxtI);
281
282 if (Ty->isVectorTy()) {
283 if (!isHVXVectorType(Ty) && Ty->isFPOrFPVectorTy())
285 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
286 if (LT.second.isFloatingPoint())
287 return LT.first + FloatFactor * getTypeNumElements(Ty);
288 }
289 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
290 Args, CxtI);
291}
292
294 Type *SrcTy,
297 const Instruction *I) const {
298 auto isNonHVXFP = [this] (Type *Ty) {
299 return Ty->isVectorTy() && !isHVXVectorType(Ty) && Ty->isFPOrFPVectorTy();
300 };
301 if (isNonHVXFP(SrcTy) || isNonHVXFP(DstTy))
303
304 if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
305 unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
306 unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
307
308 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(SrcTy);
309 std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(DstTy);
311 std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
312 // TODO: Allow non-throughput costs that aren't binary.
314 return Cost == 0 ? 0 : 1;
315 return Cost;
316 }
317 return 1;
318}
319
322 unsigned Index,
323 const Value *Op0,
324 const Value *Op1) const {
325 Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
326 : Val;
327 if (Opcode == Instruction::InsertElement) {
328 // Need two rotations for non-zero index.
329 unsigned Cost = (Index != 0) ? 2 : 0;
330 if (ElemTy->isIntegerTy(32))
331 return Cost;
332 // If it's not a 32-bit value, there will need to be an extract.
333 return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, CostKind,
334 Index, Op0, Op1);
335 }
336
337 if (Opcode == Instruction::ExtractElement)
338 return 2;
339
340 return 1;
341}
342
343bool HexagonTTIImpl::isLegalMaskedStore(Type *DataType, Align /*Alignment*/,
344 unsigned /*AddressSpace*/) const {
345 // This function is called from scalarize-masked-mem-intrin, which runs
346 // in pre-isel. Use ST directly instead of calling isHVXVectorType.
347 return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
348}
349
350bool HexagonTTIImpl::isLegalMaskedLoad(Type *DataType, Align /*Alignment*/,
351 unsigned /*AddressSpace*/) const {
352 // This function is called from scalarize-masked-mem-intrin, which runs
353 // in pre-isel. Use ST directly instead of calling isHVXVectorType.
354 return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
355}
356
357/// --- Vector TTI end ---
358
360 return ST.getL1PrefetchDistance();
361}
362
364 return ST.getL1CacheLineSize();
365}
366
371 auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
372 if (!CI->isIntegerCast())
373 return false;
374 // Only extensions from an integer type shorter than 32-bit to i32
375 // can be folded into the load.
376 const DataLayout &DL = getDataLayout();
377 unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
378 unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
379 if (DBW != 32 || SBW >= DBW)
380 return false;
381
382 const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
383 // Technically, this code could allow multiple uses of the load, and
384 // check if all the uses are the same extension operation, but this
385 // should be sufficient for most cases.
386 return LI && LI->hasOneUse();
387 };
388
389 if (const CastInst *CI = dyn_cast<const CastInst>(U))
390 if (isCastFoldedIntoLoad(CI))
393}
394
396 return EmitLookupTables;
397}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
return RetTy
static const unsigned FloatFactor
static cl::opt< bool > EnableV68FloatAutoHVX("force-hvx-float", cl::Hidden, cl::desc("Enable auto-vectorization of floatint point types on v68."))
static cl::opt< bool > EmitLookupTables("hexagon-emit-lookup-tables", cl::init(true), cl::Hidden, cl::desc("Control lookup table emission on Hexagon target"))
static cl::opt< bool > HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true), cl::Hidden, cl::desc("Enable masked loads/stores for HVX"))
static cl::opt< bool > HexagonAutoHVX("hexagon-autohvx", cl::init(false), cl::Hidden, cl::desc("Enable loop vectorizer for HVX"))
This file implements a TargetTransformInfo analysis pass specific to the Hexagon target machine.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
This pass exposes codegen information to IR-level passes.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
Definition: BasicTTIImpl.h:774
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
Definition: BasicTTIImpl.h:997
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) const override
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:448
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:678
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:674
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:312
unsigned getL1PrefetchDistance() const
unsigned getVectorLength() const
unsigned getL1CacheLineSize() const
bool isTypeForHVX(Type *VecTy, bool IncludeBool=false) const
bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const override
bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
unsigned getNumberOfRegisters(unsigned ClassID) const override
— Vector TTI begin —
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *S, TTI::TargetCostKind CostKind) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const override
unsigned getMinVectorRegisterBitWidth() const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const override
Bias LSR towards creating post-increment opportunities.
bool shouldBuildLookupTables() const override
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
unsigned getCacheLineSize() const override
unsigned getPrefetchDistance() const override
— Vector TTI end —
static InstructionCost getMax()
An instruction for reading from memory.
Definition: Instructions.h:180
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:40
The optimization diagnostic interface.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
virtual const DataLayout & getDataLayout() const
virtual InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
PopcntSupportKind
Flags indicating the kind of support for population count.
@ TCC_Free
Expected to fold away in lowering.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:346
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
Definition: TypeSize.h:349
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:273
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:225
LLVM Value Representation.
Definition: Value.h:75
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:439
Base class of all SIMD vector types.
Definition: DerivedTypes.h:430
Type * getElementType() const
Definition: DerivedTypes.h:463
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:203
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool canPeel(const Loop *L)
Definition: LoopPeel.cpp:91
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition: Alignment.h:208
InstructionCost Cost
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...