LLVM 22.0.0git
RISCVInterleavedAccess.cpp
Go to the documentation of this file.
1//===-- RISCVInterleavedAccess.cpp - RISC-V Interleaved Access Transform --===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Functions and callbacks related to the InterleavedAccessPass.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCV.h"
14#include "RISCVISelLowering.h"
15#include "RISCVSubtarget.h"
19#include "llvm/IR/IRBuilder.h"
21#include "llvm/IR/IntrinsicsRISCV.h"
22#include "llvm/IR/Module.h"
24
25using namespace llvm;
26
28 VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace,
29 const DataLayout &DL) const {
30 EVT VT = getValueType(DL, VTy);
31 // Don't lower vlseg/vsseg for vector types that can't be split.
32 if (!isTypeLegal(VT))
33 return false;
34
36 !allowsMemoryAccessForAlignment(VTy->getContext(), DL, VT, AddrSpace,
37 Alignment))
38 return false;
39
40 MVT ContainerVT = VT.getSimpleVT();
41
42 if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
43 if (!Subtarget.useRVVForFixedLengthVectors())
44 return false;
45 // Sometimes the interleaved access pass picks up splats as interleaves of
46 // one element. Don't lower these.
47 if (FVTy->getNumElements() < 2)
48 return false;
49
51 }
52
53 // Need to make sure that EMUL * NFIELDS ≤ 8
54 auto [LMUL, Fractional] = RISCVVType::decodeVLMUL(getLMUL(ContainerVT));
55 if (Fractional)
56 return true;
57 return Factor * LMUL <= 8;
58}
59
61 Intrinsic::riscv_seg2_load_mask, Intrinsic::riscv_seg3_load_mask,
62 Intrinsic::riscv_seg4_load_mask, Intrinsic::riscv_seg5_load_mask,
63 Intrinsic::riscv_seg6_load_mask, Intrinsic::riscv_seg7_load_mask,
64 Intrinsic::riscv_seg8_load_mask};
65
67 Intrinsic::riscv_sseg2_load_mask, Intrinsic::riscv_sseg3_load_mask,
68 Intrinsic::riscv_sseg4_load_mask, Intrinsic::riscv_sseg5_load_mask,
69 Intrinsic::riscv_sseg6_load_mask, Intrinsic::riscv_sseg7_load_mask,
70 Intrinsic::riscv_sseg8_load_mask};
71
73 Intrinsic::riscv_vlseg2_mask, Intrinsic::riscv_vlseg3_mask,
74 Intrinsic::riscv_vlseg4_mask, Intrinsic::riscv_vlseg5_mask,
75 Intrinsic::riscv_vlseg6_mask, Intrinsic::riscv_vlseg7_mask,
76 Intrinsic::riscv_vlseg8_mask};
77
79 Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
80 Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
81 Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
82 Intrinsic::riscv_seg8_store_mask};
83
85 Intrinsic::riscv_sseg2_store_mask, Intrinsic::riscv_sseg3_store_mask,
86 Intrinsic::riscv_sseg4_store_mask, Intrinsic::riscv_sseg5_store_mask,
87 Intrinsic::riscv_sseg6_store_mask, Intrinsic::riscv_sseg7_store_mask,
88 Intrinsic::riscv_sseg8_store_mask};
89
91 Intrinsic::riscv_vsseg2_mask, Intrinsic::riscv_vsseg3_mask,
92 Intrinsic::riscv_vsseg4_mask, Intrinsic::riscv_vsseg5_mask,
93 Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
94 Intrinsic::riscv_vsseg8_mask};
95
96static bool isMultipleOfN(const Value *V, const DataLayout &DL, unsigned N) {
97 assert(N);
98 if (N == 1)
99 return true;
100
101 using namespace PatternMatch;
102 // Right now we're only recognizing the simplest pattern.
103 uint64_t C;
106 C && C % N == 0)
107 return true;
108
109 if (isPowerOf2_32(N)) {
111 return KB.countMinTrailingZeros() >= Log2_32(N);
112 }
113
114 return false;
115}
116
117/// Do the common operand retrieval and validition required by the
118/// routines below.
119static bool getMemOperands(unsigned Factor, VectorType *VTy, Type *XLenTy,
120 Instruction *I, Value *&Ptr, Value *&Mask,
121 Value *&VL, Align &Alignment) {
122
123 IRBuilder<> Builder(I);
124 const DataLayout &DL = I->getDataLayout();
125 ElementCount EC = VTy->getElementCount();
126 if (auto *LI = dyn_cast<LoadInst>(I)) {
127 assert(LI->isSimple());
128 Ptr = LI->getPointerOperand();
129 Alignment = LI->getAlign();
130 assert(!Mask && "Unexpected mask on a load");
131 Mask = Builder.getAllOnesMask(EC);
132 VL = isa<FixedVectorType>(VTy) ? Builder.CreateElementCount(XLenTy, EC)
134 return true;
135 }
136 if (auto *SI = dyn_cast<StoreInst>(I)) {
137 assert(SI->isSimple());
138 Ptr = SI->getPointerOperand();
139 Alignment = SI->getAlign();
140 assert(!Mask && "Unexpected mask on a store");
141 Mask = Builder.getAllOnesMask(EC);
142 VL = isa<FixedVectorType>(VTy) ? Builder.CreateElementCount(XLenTy, EC)
144 return true;
145 }
146
147 auto *II = cast<IntrinsicInst>(I);
148 switch (II->getIntrinsicID()) {
149 default:
150 llvm_unreachable("Unsupported intrinsic type");
151 case Intrinsic::vp_load:
152 case Intrinsic::vp_store: {
153 auto *VPLdSt = cast<VPIntrinsic>(I);
154 Ptr = VPLdSt->getMemoryPointerParam();
155 Alignment = VPLdSt->getPointerAlignment().value_or(
156 DL.getABITypeAlign(VTy->getElementType()));
157
158 assert(Mask && "vp.load and vp.store needs a mask!");
159
160 Value *WideEVL = VPLdSt->getVectorLengthParam();
161 // Conservatively check if EVL is a multiple of factor, otherwise some
162 // (trailing) elements might be lost after the transformation.
163 if (!isMultipleOfN(WideEVL, I->getDataLayout(), Factor))
164 return false;
165
166 auto *FactorC = ConstantInt::get(WideEVL->getType(), Factor);
167 VL = Builder.CreateZExt(Builder.CreateExactUDiv(WideEVL, FactorC), XLenTy);
168 return true;
169 }
170 case Intrinsic::masked_load: {
171 Ptr = II->getOperand(0);
172 Alignment = cast<ConstantInt>(II->getArgOperand(1))->getAlignValue();
173
174 if (!isa<UndefValue>(II->getOperand(3)))
175 return false;
176
177 assert(Mask && "masked.load needs a mask!");
178
179 VL = isa<FixedVectorType>(VTy)
180 ? Builder.CreateElementCount(XLenTy, VTy->getElementCount())
182 return true;
183 }
184 case Intrinsic::masked_store: {
185 Ptr = II->getOperand(1);
186 Alignment = cast<ConstantInt>(II->getArgOperand(2))->getAlignValue();
187
188 assert(Mask && "masked.store needs a mask!");
189
190 VL = isa<FixedVectorType>(VTy)
191 ? Builder.CreateElementCount(XLenTy, VTy->getElementCount())
193 return true;
194 }
195 }
196}
197
198/// Lower an interleaved load into a vlsegN intrinsic.
199///
200/// E.g. Lower an interleaved load (Factor = 2):
201/// %wide.vec = load <8 x i32>, <8 x i32>* %ptr
202/// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements
203/// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements
204///
205/// Into:
206/// %ld2 = { <4 x i32>, <4 x i32> } call llvm.riscv.seg2.load.v4i32.p0.i64(
207/// %ptr, i64 4)
208/// %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
209/// %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
211 Instruction *Load, Value *Mask, ArrayRef<ShuffleVectorInst *> Shuffles,
212 ArrayRef<unsigned> Indices, unsigned Factor, const APInt &GapMask) const {
213 assert(Indices.size() == Shuffles.size());
214 assert(GapMask.getBitWidth() == Factor);
215
216 // We only support cases where the skipped fields are the trailing ones.
217 // TODO: Lower to strided load if there is only a single active field.
218 unsigned MaskFactor = GapMask.popcount();
219 if (MaskFactor < 2 || !GapMask.isMask())
220 return false;
221 IRBuilder<> Builder(Load);
222
223 const DataLayout &DL = Load->getDataLayout();
224 auto *VTy = cast<FixedVectorType>(Shuffles[0]->getType());
225 auto *XLenTy = Builder.getIntNTy(Subtarget.getXLen());
226
227 Value *Ptr, *VL;
228 Align Alignment;
229 if (!getMemOperands(MaskFactor, VTy, XLenTy, Load, Ptr, Mask, VL, Alignment))
230 return false;
231
232 Type *PtrTy = Ptr->getType();
233 unsigned AS = PtrTy->getPointerAddressSpace();
234 if (!isLegalInterleavedAccessType(VTy, MaskFactor, Alignment, AS, DL))
235 return false;
236
237 CallInst *SegLoad = nullptr;
238 if (MaskFactor < Factor) {
239 // Lower to strided segmented load.
240 unsigned ScalarSizeInBytes = DL.getTypeStoreSize(VTy->getElementType());
241 Value *Stride = ConstantInt::get(XLenTy, Factor * ScalarSizeInBytes);
242 SegLoad = Builder.CreateIntrinsic(FixedVlssegIntrIds[MaskFactor - 2],
243 {VTy, PtrTy, XLenTy, XLenTy},
244 {Ptr, Stride, Mask, VL});
245 } else {
246 // Lower to normal segmented load.
247 SegLoad = Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2],
248 {VTy, PtrTy, XLenTy}, {Ptr, Mask, VL});
249 }
250
251 for (unsigned i = 0; i < Shuffles.size(); i++) {
252 unsigned FactorIdx = Indices[i];
253 if (FactorIdx >= MaskFactor) {
254 // Replace masked-off factors (that are still extracted) with poison.
255 Shuffles[i]->replaceAllUsesWith(PoisonValue::get(VTy));
256 } else {
257 Value *SubVec = Builder.CreateExtractValue(SegLoad, FactorIdx);
258 Shuffles[i]->replaceAllUsesWith(SubVec);
259 }
260 }
261
262 return true;
263}
264
265/// Lower an interleaved store into a vssegN intrinsic.
266///
267/// E.g. Lower an interleaved store (Factor = 3):
268/// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
269/// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
270/// store <12 x i32> %i.vec, <12 x i32>* %ptr
271///
272/// Into:
273/// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
274/// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
275/// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
276/// call void llvm.riscv.seg3.store.v4i32.p0.i64(%sub.v0, %sub.v1, %sub.v2,
277/// %ptr, i32 4)
278///
279/// Note that the new shufflevectors will be removed and we'll only generate one
280/// vsseg3 instruction in CodeGen.
282 Value *LaneMask,
284 unsigned Factor,
285 const APInt &GapMask) const {
286 assert(GapMask.getBitWidth() == Factor);
287
288 // We only support cases where the skipped fields are the trailing ones.
289 // TODO: Lower to strided store if there is only a single active field.
290 unsigned MaskFactor = GapMask.popcount();
291 if (MaskFactor < 2 || !GapMask.isMask())
292 return false;
293
294 IRBuilder<> Builder(Store);
295 const DataLayout &DL = Store->getDataLayout();
296 auto Mask = SVI->getShuffleMask();
297 auto *ShuffleVTy = cast<FixedVectorType>(SVI->getType());
298 // Given SVI : <n*factor x ty>, then VTy : <n x ty>
299 auto *VTy = FixedVectorType::get(ShuffleVTy->getElementType(),
300 ShuffleVTy->getNumElements() / Factor);
301 auto *XLenTy = Builder.getIntNTy(Subtarget.getXLen());
302
303 Value *Ptr, *VL;
304 Align Alignment;
305 if (!getMemOperands(MaskFactor, VTy, XLenTy, Store, Ptr, LaneMask, VL,
306 Alignment))
307 return false;
308
309 Type *PtrTy = Ptr->getType();
310 unsigned AS = PtrTy->getPointerAddressSpace();
311 if (!isLegalInterleavedAccessType(VTy, MaskFactor, Alignment, AS, DL))
312 return false;
313
314 Function *SegStoreFunc;
315 if (MaskFactor < Factor)
316 // Strided segmented store.
318 Store->getModule(), FixedVsssegIntrIds[MaskFactor - 2],
319 {VTy, PtrTy, XLenTy, XLenTy});
320 else
321 // Normal segmented store.
323 Store->getModule(), FixedVssegIntrIds[Factor - 2],
324 {VTy, PtrTy, XLenTy});
325
327 SmallVector<int, 16> NewShuffleMask;
328
329 for (unsigned i = 0; i < MaskFactor; i++) {
330 // Collect shuffle mask for this lane.
331 for (unsigned j = 0; j < VTy->getNumElements(); j++)
332 NewShuffleMask.push_back(Mask[i + Factor * j]);
333
334 Value *Shuffle = Builder.CreateShuffleVector(
335 SVI->getOperand(0), SVI->getOperand(1), NewShuffleMask);
336 Ops.push_back(Shuffle);
337
338 NewShuffleMask.clear();
339 }
340 Ops.push_back(Ptr);
341 if (MaskFactor < Factor) {
342 // Insert the stride argument.
343 unsigned ScalarSizeInBytes = DL.getTypeStoreSize(VTy->getElementType());
344 Ops.push_back(ConstantInt::get(XLenTy, Factor * ScalarSizeInBytes));
345 }
346 Ops.append({LaneMask, VL});
347 Builder.CreateCall(SegStoreFunc, Ops);
348
349 return true;
350}
351
353 Instruction *Load, Value *Mask, IntrinsicInst *DI) const {
354 const unsigned Factor = getDeinterleaveIntrinsicFactor(DI->getIntrinsicID());
355 if (Factor > 8)
356 return false;
357
358 IRBuilder<> Builder(Load);
359
361
362 const DataLayout &DL = Load->getDataLayout();
363 auto *XLenTy = Builder.getIntNTy(Subtarget.getXLen());
364
365 Value *Ptr, *VL;
366 Align Alignment;
367 if (!getMemOperands(Factor, ResVTy, XLenTy, Load, Ptr, Mask, VL, Alignment))
368 return false;
369
370 Type *PtrTy = Ptr->getType();
371 unsigned AS = PtrTy->getPointerAddressSpace();
372 if (!isLegalInterleavedAccessType(ResVTy, Factor, Alignment, AS, DL))
373 return false;
374
375 Value *Return;
376 if (isa<FixedVectorType>(ResVTy)) {
377 Return = Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2],
378 {ResVTy, PtrTy, XLenTy}, {Ptr, Mask, VL});
379 } else {
380 unsigned SEW = DL.getTypeSizeInBits(ResVTy->getElementType());
381 unsigned NumElts = ResVTy->getElementCount().getKnownMinValue();
382 Type *VecTupTy = TargetExtType::get(
383 Load->getContext(), "riscv.vector.tuple",
384 ScalableVectorType::get(Builder.getInt8Ty(), NumElts * SEW / 8),
385 Factor);
387 Load->getModule(), ScalableVlsegIntrIds[Factor - 2],
388 {VecTupTy, PtrTy, Mask->getType(), VL->getType()});
389
390 Value *Operands[] = {
391 PoisonValue::get(VecTupTy),
392 Ptr,
393 Mask,
394 VL,
395 ConstantInt::get(XLenTy,
397 ConstantInt::get(XLenTy, Log2_64(SEW))};
398
399 CallInst *Vlseg = Builder.CreateCall(VlsegNFunc, Operands);
400
401 SmallVector<Type *, 2> AggrTypes{Factor, ResVTy};
402 Return = PoisonValue::get(StructType::get(Load->getContext(), AggrTypes));
403 for (unsigned i = 0; i < Factor; ++i) {
404 Value *VecExtract = Builder.CreateIntrinsic(
405 Intrinsic::riscv_tuple_extract, {ResVTy, VecTupTy},
406 {Vlseg, Builder.getInt32(i)});
407 Return = Builder.CreateInsertValue(Return, VecExtract, i);
408 }
409 }
410
411 DI->replaceAllUsesWith(Return);
412 return true;
413}
414
416 Instruction *Store, Value *Mask, ArrayRef<Value *> InterleaveValues) const {
417 unsigned Factor = InterleaveValues.size();
418 if (Factor > 8)
419 return false;
420
421 IRBuilder<> Builder(Store);
422
423 auto *InVTy = cast<VectorType>(InterleaveValues[0]->getType());
424 const DataLayout &DL = Store->getDataLayout();
425 Type *XLenTy = Builder.getIntNTy(Subtarget.getXLen());
426
427 Value *Ptr, *VL;
428 Align Alignment;
429 if (!getMemOperands(Factor, InVTy, XLenTy, Store, Ptr, Mask, VL, Alignment))
430 return false;
431 Type *PtrTy = Ptr->getType();
432 unsigned AS = Ptr->getType()->getPointerAddressSpace();
433 if (!isLegalInterleavedAccessType(InVTy, Factor, Alignment, AS, DL))
434 return false;
435
436 if (isa<FixedVectorType>(InVTy)) {
438 Store->getModule(), FixedVssegIntrIds[Factor - 2],
439 {InVTy, PtrTy, XLenTy});
440 SmallVector<Value *, 10> Ops(InterleaveValues);
441 Ops.append({Ptr, Mask, VL});
442 Builder.CreateCall(VssegNFunc, Ops);
443 return true;
444 }
445 unsigned SEW = DL.getTypeSizeInBits(InVTy->getElementType());
446 unsigned NumElts = InVTy->getElementCount().getKnownMinValue();
447 Type *VecTupTy = TargetExtType::get(
448 Store->getContext(), "riscv.vector.tuple",
449 ScalableVectorType::get(Builder.getInt8Ty(), NumElts * SEW / 8), Factor);
450
451 Value *StoredVal = PoisonValue::get(VecTupTy);
452 for (unsigned i = 0; i < Factor; ++i)
453 StoredVal = Builder.CreateIntrinsic(
454 Intrinsic::riscv_tuple_insert, {VecTupTy, InVTy},
455 {StoredVal, InterleaveValues[i], Builder.getInt32(i)});
456
458 Store->getModule(), ScalableVssegIntrIds[Factor - 2],
459 {VecTupTy, PtrTy, Mask->getType(), VL->getType()});
460
461 Value *Operands[] = {StoredVal, Ptr, Mask, VL,
462 ConstantInt::get(XLenTy, Log2_64(SEW))};
463 Builder.CreateCall(VssegNFunc, Operands);
464 return true;
465}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Module.h This file contains the declarations for the Module class.
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
uint64_t IntrinsicInst * II
static const Intrinsic::ID FixedVlsegIntrIds[]
static bool isMultipleOfN(const Value *V, const DataLayout &DL, unsigned N)
static const Intrinsic::ID ScalableVlsegIntrIds[]
static const Intrinsic::ID ScalableVssegIntrIds[]
static const Intrinsic::ID FixedVlssegIntrIds[]
static bool getMemOperands(unsigned Factor, VectorType *VTy, Type *XLenTy, Instruction *I, Value *&Ptr, Value *&Mask, Value *&VL, Align &Alignment)
Do the common operand retrieval and validition required by the routines below.
static const Intrinsic::ID FixedVsssegIntrIds[]
static const Intrinsic::ID FixedVssegIntrIds[]
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
Class for arbitrary precision integers.
Definition: APInt.h:78
unsigned popcount() const
Count the number of bits set.
Definition: APInt.h:1670
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1488
bool isMask(unsigned numBits) const
Definition: APInt.h:488
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:420
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:803
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2625
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Definition: IRBuilder.h:575
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2618
Value * getAllOnesMask(ElementCount NumElts)
Return an all true boolean vector (mask) with NumElts lanes.
Definition: IRBuilder.h:862
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:834
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:522
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2082
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition: IRBuilder.h:2593
Value * CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1463
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2508
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:552
LLVM_ABI Value * CreateElementCount(Type *Ty, ElementCount EC)
Create an expression which evaluates to the number of elements in EC at runtime.
Definition: IRBuilder.cpp:123
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2780
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:49
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:56
Machine Value Type.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1885
bool useRVVForFixedLengthVectors() const
unsigned getXLen() const
bool lowerDeinterleaveIntrinsicToLoad(Instruction *Load, Value *Mask, IntrinsicInst *DI) const override
Lower a deinterleave intrinsic to a target specific load intrinsic.
bool lowerInterleavedStore(Instruction *Store, Value *Mask, ShuffleVectorInst *SVI, unsigned Factor, const APInt &GapMask) const override
Lower an interleaved store into a vssegN intrinsic.
MVT getContainerForFixedLengthVector(MVT VT) const
bool lowerInterleavedLoad(Instruction *Load, Value *Mask, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor, const APInt &GapMask) const override
Lower an interleaved load into a vlsegN intrinsic.
bool lowerInterleaveIntrinsicToStore(Instruction *Store, Value *Mask, ArrayRef< Value * > InterleaveValues) const override
Lower an interleave intrinsic to a target specific store intrinsic.
bool isLegalElementTypeForRVV(EVT ScalarTy) const
bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace, const DataLayout &) const
Returns whether or not generating a interleaved load/store intrinsic for this type will be legal.
static RISCVVType::VLMUL getLMUL(MVT VT)
static LLVM_ABI ScalableVectorType * get(Type *ElementType, unsigned MinNumElts)
Definition: Type.cpp:825
This instruction constructs a fixed permutation of two input vectors.
VectorType * getType() const
Overload to return most specific vector type.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:684
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:414
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition: Type.cpp:908
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
Value * getOperand(unsigned i) const
Definition: User.h:232
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:546
Base class of all SIMD vector types.
Definition: DerivedTypes.h:430
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:695
Type * getElementType() const
Definition: DerivedTypes.h:463
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:169
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
Definition: Intrinsics.cpp:751
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:168
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:342
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:336
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:288
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI unsigned getDeinterleaveIntrinsicFactor(Intrinsic::ID ID)
Returns the corresponding factor of llvm.vector.deinterleaveN intrinsics.
LLVM_ABI VectorType * getDeinterleavedVectorType(IntrinsicInst *DI)
Given a deinterleaveN intrinsic, return the (narrow) vector type of each factor.
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:311
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition: ValueTypes.h:318
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:235