LLVM 22.0.0git
InterleavedAccessPass.cpp
Go to the documentation of this file.
1//===- InterleavedAccessPass.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the Interleaved Access pass, which identifies
10// interleaved memory accesses and transforms them into target specific
11// intrinsics.
12//
13// An interleaved load reads data from memory into several vectors, with
14// DE-interleaving the data on a factor. An interleaved store writes several
15// vectors to memory with RE-interleaving the data on a factor.
16//
17// As interleaved accesses are difficult to identified in CodeGen (mainly
18// because the VECTOR_SHUFFLE DAG node is quite different from the shufflevector
19// IR), we identify and transform them to intrinsics in this pass so the
20// intrinsics can be easily matched into target specific instructions later in
21// CodeGen.
22//
23// E.g. An interleaved load (Factor = 2):
24// %wide.vec = load <8 x i32>, <8 x i32>* %ptr
25// %v0 = shuffle <8 x i32> %wide.vec, <8 x i32> poison, <0, 2, 4, 6>
26// %v1 = shuffle <8 x i32> %wide.vec, <8 x i32> poison, <1, 3, 5, 7>
27//
28// It could be transformed into a ld2 intrinsic in AArch64 backend or a vld2
29// intrinsic in ARM backend.
30//
31// In X86, this can be further optimized into a set of target
32// specific loads followed by an optimized sequence of shuffles.
33//
34// E.g. An interleaved store (Factor = 3):
35// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
36// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
37// store <12 x i32> %i.vec, <12 x i32>* %ptr
38//
39// It could be transformed into a st3 intrinsic in AArch64 backend or a vst3
40// intrinsic in ARM backend.
41//
42// Similarly, a set of interleaved stores can be transformed into an optimized
43// sequence of shuffles followed by a set of target specific stores for X86.
44//
45//===----------------------------------------------------------------------===//
46
47#include "llvm/ADT/ArrayRef.h"
48#include "llvm/ADT/DenseMap.h"
49#include "llvm/ADT/SetVector.h"
56#include "llvm/IR/Constants.h"
57#include "llvm/IR/Dominators.h"
58#include "llvm/IR/Function.h"
59#include "llvm/IR/IRBuilder.h"
61#include "llvm/IR/Instruction.h"
66#include "llvm/Pass.h"
69#include "llvm/Support/Debug.h"
73#include <cassert>
74#include <utility>
75
76using namespace llvm;
77
78#define DEBUG_TYPE "interleaved-access"
79
81 "lower-interleaved-accesses",
82 cl::desc("Enable lowering interleaved accesses to intrinsics"),
83 cl::init(true), cl::Hidden);
84
85namespace {
86
87class InterleavedAccessImpl {
88 friend class InterleavedAccess;
89
90public:
91 InterleavedAccessImpl() = default;
92 InterleavedAccessImpl(DominatorTree *DT, const TargetLowering *TLI)
93 : DT(DT), TLI(TLI), MaxFactor(TLI->getMaxSupportedInterleaveFactor()) {}
95
96private:
97 DominatorTree *DT = nullptr;
98 const TargetLowering *TLI = nullptr;
99
100 /// The maximum supported interleave factor.
101 unsigned MaxFactor = 0u;
102
103 /// Transform an interleaved load into target specific intrinsics.
104 bool lowerInterleavedLoad(Instruction *Load,
106
107 /// Transform an interleaved store into target specific intrinsics.
108 bool lowerInterleavedStore(Instruction *Store,
110
111 /// Transform a load and a deinterleave intrinsic into target specific
112 /// instructions.
113 bool lowerDeinterleaveIntrinsic(IntrinsicInst *II,
115
116 /// Transform an interleave intrinsic and a store into target specific
117 /// instructions.
118 bool lowerInterleaveIntrinsic(IntrinsicInst *II,
120
121 /// Returns true if the uses of an interleaved load by the
122 /// extractelement instructions in \p Extracts can be replaced by uses of the
123 /// shufflevector instructions in \p Shuffles instead. If so, the necessary
124 /// replacements are also performed.
125 bool tryReplaceExtracts(ArrayRef<ExtractElementInst *> Extracts,
127
128 /// Given a number of shuffles of the form shuffle(binop(x,y)), convert them
129 /// to binop(shuffle(x), shuffle(y)) to allow the formation of an
130 /// interleaving load. Any newly created shuffles that operate on \p LI will
131 /// be added to \p Shuffles. Returns true, if any changes to the IR have been
132 /// made.
133 bool replaceBinOpShuffles(ArrayRef<ShuffleVectorInst *> BinOpShuffles,
135 Instruction *LI);
136};
137
138class InterleavedAccess : public FunctionPass {
139 InterleavedAccessImpl Impl;
140
141public:
142 static char ID;
143
144 InterleavedAccess() : FunctionPass(ID) {
146 }
147
148 StringRef getPassName() const override { return "Interleaved Access Pass"; }
149
150 bool runOnFunction(Function &F) override;
151
152 void getAnalysisUsage(AnalysisUsage &AU) const override {
154 AU.setPreservesCFG();
155 }
156};
157
158} // end anonymous namespace.
159
162 auto *DT = &FAM.getResult<DominatorTreeAnalysis>(F);
163 auto *TLI = TM->getSubtargetImpl(F)->getTargetLowering();
164 InterleavedAccessImpl Impl(DT, TLI);
165 bool Changed = Impl.runOnFunction(F);
166
167 if (!Changed)
168 return PreservedAnalyses::all();
169
172 return PA;
173}
174
175char InterleavedAccess::ID = 0;
176
177bool InterleavedAccess::runOnFunction(Function &F) {
178 if (skipFunction(F))
179 return false;
180
181 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
182 if (!TPC || !LowerInterleavedAccesses)
183 return false;
184
185 LLVM_DEBUG(dbgs() << "*** " << getPassName() << ": " << F.getName() << "\n");
186
187 Impl.DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
188 auto &TM = TPC->getTM<TargetMachine>();
189 Impl.TLI = TM.getSubtargetImpl(F)->getTargetLowering();
190 Impl.MaxFactor = Impl.TLI->getMaxSupportedInterleaveFactor();
191
192 return Impl.runOnFunction(F);
193}
194
196 "Lower interleaved memory accesses to target specific intrinsics", false,
197 false)
200 "Lower interleaved memory accesses to target specific intrinsics", false,
201 false)
202
204 return new InterleavedAccess();
205}
206
207/// Check if the mask is a DE-interleave mask for an interleaved load.
208///
209/// E.g. DE-interleave masks (Factor = 2) could be:
210/// <0, 2, 4, 6> (mask of index 0 to extract even elements)
211/// <1, 3, 5, 7> (mask of index 1 to extract odd elements)
212static bool isDeInterleaveMask(ArrayRef<int> Mask, unsigned &Factor,
213 unsigned &Index, unsigned MaxFactor,
214 unsigned NumLoadElements) {
215 if (Mask.size() < 2)
216 return false;
217
218 // Check potential Factors.
219 for (Factor = 2; Factor <= MaxFactor; Factor++) {
220 // Make sure we don't produce a load wider than the input load.
221 if (Mask.size() * Factor > NumLoadElements)
222 return false;
223 if (ShuffleVectorInst::isDeInterleaveMaskOfFactor(Mask, Factor, Index))
224 return true;
225 }
226
227 return false;
228}
229
230/// Check if the mask can be used in an interleaved store.
231//
232/// It checks for a more general pattern than the RE-interleave mask.
233/// I.e. <x, y, ... z, x+1, y+1, ...z+1, x+2, y+2, ...z+2, ...>
234/// E.g. For a Factor of 2 (LaneLen=4): <4, 32, 5, 33, 6, 34, 7, 35>
235/// E.g. For a Factor of 3 (LaneLen=4): <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
236/// E.g. For a Factor of 4 (LaneLen=2): <8, 2, 12, 4, 9, 3, 13, 5>
237///
238/// The particular case of an RE-interleave mask is:
239/// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...>
240/// E.g. For a Factor of 2 (LaneLen=4): <0, 4, 1, 5, 2, 6, 3, 7>
241static bool isReInterleaveMask(ShuffleVectorInst *SVI, unsigned &Factor,
242 unsigned MaxFactor) {
243 unsigned NumElts = SVI->getShuffleMask().size();
244 if (NumElts < 4)
245 return false;
246
247 // Check potential Factors.
248 for (Factor = 2; Factor <= MaxFactor; Factor++) {
249 if (SVI->isInterleave(Factor))
250 return true;
251 }
252
253 return false;
254}
255
257 switch (II->getIntrinsicID()) {
258 default:
259 llvm_unreachable("Unexpected intrinsic");
260 case Intrinsic::vp_load:
261 return II->getOperand(1);
262 case Intrinsic::masked_load:
263 return II->getOperand(2);
264 case Intrinsic::vp_store:
265 return II->getOperand(2);
266 case Intrinsic::masked_store:
267 return II->getOperand(3);
268 }
269}
270
271// Return a pair of
272// (1) The corresponded deinterleaved mask, or nullptr if there is no valid
273// mask.
274// (2) Some mask effectively skips a certain field, and this element is a mask
275// in which inactive lanes represent fields that are skipped (i.e. "gaps").
276static std::pair<Value *, APInt> getMask(Value *WideMask, unsigned Factor,
277 ElementCount LeafValueEC);
278
279static std::pair<Value *, APInt> getMask(Value *WideMask, unsigned Factor,
280 VectorType *LeafValueTy) {
281 return getMask(WideMask, Factor, LeafValueTy->getElementCount());
282}
283
284bool InterleavedAccessImpl::lowerInterleavedLoad(
286 if (isa<ScalableVectorType>(Load->getType()))
287 return false;
288
289 auto *LI = dyn_cast<LoadInst>(Load);
290 auto *II = dyn_cast<IntrinsicInst>(Load);
291 if (!LI && !II)
292 return false;
293
294 if (LI && !LI->isSimple())
295 return false;
296
297 // Check if all users of this load are shufflevectors. If we encounter any
298 // users that are extractelement instructions or binary operators, we save
299 // them to later check if they can be modified to extract from one of the
300 // shufflevectors instead of the load.
301
304 // BinOpShuffles need to be handled a single time in case both operands of the
305 // binop are the same load.
307
308 for (auto *User : Load->users()) {
309 auto *Extract = dyn_cast<ExtractElementInst>(User);
310 if (Extract && isa<ConstantInt>(Extract->getIndexOperand())) {
311 Extracts.push_back(Extract);
312 continue;
313 }
314 if (auto *BI = dyn_cast<BinaryOperator>(User)) {
315 if (!BI->user_empty() && all_of(BI->users(), [](auto *U) {
316 auto *SVI = dyn_cast<ShuffleVectorInst>(U);
317 return SVI && isa<UndefValue>(SVI->getOperand(1));
318 })) {
319 for (auto *SVI : BI->users())
320 BinOpShuffles.insert(cast<ShuffleVectorInst>(SVI));
321 continue;
322 }
323 }
324 auto *SVI = dyn_cast<ShuffleVectorInst>(User);
325 if (!SVI || !isa<UndefValue>(SVI->getOperand(1)))
326 return false;
327
328 Shuffles.push_back(SVI);
329 }
330
331 if (Shuffles.empty() && BinOpShuffles.empty())
332 return false;
333
334 unsigned Factor, Index;
335
336 unsigned NumLoadElements =
337 cast<FixedVectorType>(Load->getType())->getNumElements();
338 auto *FirstSVI = Shuffles.size() > 0 ? Shuffles[0] : BinOpShuffles[0];
339 // Check if the first shufflevector is DE-interleave shuffle.
340 if (!isDeInterleaveMask(FirstSVI->getShuffleMask(), Factor, Index, MaxFactor,
341 NumLoadElements))
342 return false;
343
344 // Holds the corresponding index for each DE-interleave shuffle.
346
347 VectorType *VecTy = cast<VectorType>(FirstSVI->getType());
348
349 // Check if other shufflevectors are also DE-interleaved of the same type
350 // and factor as the first shufflevector.
351 for (auto *Shuffle : Shuffles) {
352 if (Shuffle->getType() != VecTy)
353 return false;
355 Shuffle->getShuffleMask(), Factor, Index))
356 return false;
357
358 assert(Shuffle->getShuffleMask().size() <= NumLoadElements);
359 Indices.push_back(Index);
360 }
361 for (auto *Shuffle : BinOpShuffles) {
362 if (Shuffle->getType() != VecTy)
363 return false;
365 Shuffle->getShuffleMask(), Factor, Index))
366 return false;
367
368 assert(Shuffle->getShuffleMask().size() <= NumLoadElements);
369
370 if (cast<Instruction>(Shuffle->getOperand(0))->getOperand(0) == Load)
371 Indices.push_back(Index);
372 if (cast<Instruction>(Shuffle->getOperand(0))->getOperand(1) == Load)
373 Indices.push_back(Index);
374 }
375
376 // Try and modify users of the load that are extractelement instructions to
377 // use the shufflevector instructions instead of the load.
378 if (!tryReplaceExtracts(Extracts, Shuffles))
379 return false;
380
381 bool BinOpShuffleChanged =
382 replaceBinOpShuffles(BinOpShuffles.getArrayRef(), Shuffles, Load);
383
384 Value *Mask = nullptr;
385 auto GapMask = APInt::getAllOnes(Factor);
386 if (LI) {
387 LLVM_DEBUG(dbgs() << "IA: Found an interleaved load: " << *Load << "\n");
388 } else {
389 // Check mask operand. Handle both all-true/false and interleaved mask.
390 std::tie(Mask, GapMask) = getMask(getMaskOperand(II), Factor, VecTy);
391 if (!Mask)
392 return false;
393
394 LLVM_DEBUG(dbgs() << "IA: Found an interleaved vp.load or masked.load: "
395 << *Load << "\n");
396 LLVM_DEBUG(dbgs() << "IA: With nominal factor " << Factor
397 << " and actual factor " << GapMask.popcount() << "\n");
398 }
399
400 // Try to create target specific intrinsics to replace the load and
401 // shuffles.
402 if (!TLI->lowerInterleavedLoad(cast<Instruction>(Load), Mask, Shuffles,
403 Indices, Factor, GapMask))
404 // If Extracts is not empty, tryReplaceExtracts made changes earlier.
405 return !Extracts.empty() || BinOpShuffleChanged;
406
407 DeadInsts.insert_range(Shuffles);
408
409 DeadInsts.insert(Load);
410 return true;
411}
412
413bool InterleavedAccessImpl::replaceBinOpShuffles(
414 ArrayRef<ShuffleVectorInst *> BinOpShuffles,
416 for (auto *SVI : BinOpShuffles) {
417 BinaryOperator *BI = cast<BinaryOperator>(SVI->getOperand(0));
418 Type *BIOp0Ty = BI->getOperand(0)->getType();
419 ArrayRef<int> Mask = SVI->getShuffleMask();
420 assert(all_of(Mask, [&](int Idx) {
421 return Idx < (int)cast<FixedVectorType>(BIOp0Ty)->getNumElements();
422 }));
423
424 BasicBlock::iterator insertPos = SVI->getIterator();
425 auto *NewSVI1 =
426 new ShuffleVectorInst(BI->getOperand(0), PoisonValue::get(BIOp0Ty),
427 Mask, SVI->getName(), insertPos);
428 auto *NewSVI2 = new ShuffleVectorInst(
429 BI->getOperand(1), PoisonValue::get(BI->getOperand(1)->getType()), Mask,
430 SVI->getName(), insertPos);
432 BI->getOpcode(), NewSVI1, NewSVI2, BI, BI->getName(), insertPos);
433 SVI->replaceAllUsesWith(NewBI);
434 LLVM_DEBUG(dbgs() << " Replaced: " << *BI << "\n And : " << *SVI
435 << "\n With : " << *NewSVI1 << "\n And : "
436 << *NewSVI2 << "\n And : " << *NewBI << "\n");
438 if (NewSVI1->getOperand(0) == Load)
439 Shuffles.push_back(NewSVI1);
440 if (NewSVI2->getOperand(0) == Load)
441 Shuffles.push_back(NewSVI2);
442 }
443
444 return !BinOpShuffles.empty();
445}
446
447bool InterleavedAccessImpl::tryReplaceExtracts(
450 // If there aren't any extractelement instructions to modify, there's nothing
451 // to do.
452 if (Extracts.empty())
453 return true;
454
455 // Maps extractelement instructions to vector-index pairs. The extractlement
456 // instructions will be modified to use the new vector and index operands.
458
459 for (auto *Extract : Extracts) {
460 // The vector index that is extracted.
461 auto *IndexOperand = cast<ConstantInt>(Extract->getIndexOperand());
462 auto Index = IndexOperand->getSExtValue();
463
464 // Look for a suitable shufflevector instruction. The goal is to modify the
465 // extractelement instruction (which uses an interleaved load) to use one
466 // of the shufflevector instructions instead of the load.
467 for (auto *Shuffle : Shuffles) {
468 // If the shufflevector instruction doesn't dominate the extract, we
469 // can't create a use of it.
470 if (!DT->dominates(Shuffle, Extract))
471 continue;
472
473 // Inspect the indices of the shufflevector instruction. If the shuffle
474 // selects the same index that is extracted, we can modify the
475 // extractelement instruction.
476 SmallVector<int, 4> Indices;
477 Shuffle->getShuffleMask(Indices);
478 for (unsigned I = 0; I < Indices.size(); ++I)
479 if (Indices[I] == Index) {
480 assert(Extract->getOperand(0) == Shuffle->getOperand(0) &&
481 "Vector operations do not match");
482 ReplacementMap[Extract] = std::make_pair(Shuffle, I);
483 break;
484 }
485
486 // If we found a suitable shufflevector instruction, stop looking.
487 if (ReplacementMap.count(Extract))
488 break;
489 }
490
491 // If we did not find a suitable shufflevector instruction, the
492 // extractelement instruction cannot be modified, so we must give up.
493 if (!ReplacementMap.count(Extract))
494 return false;
495 }
496
497 // Finally, perform the replacements.
498 IRBuilder<> Builder(Extracts[0]->getContext());
499 for (auto &Replacement : ReplacementMap) {
500 auto *Extract = Replacement.first;
501 auto *Vector = Replacement.second.first;
502 auto Index = Replacement.second.second;
503 Builder.SetInsertPoint(Extract);
504 Extract->replaceAllUsesWith(Builder.CreateExtractElement(Vector, Index));
505 Extract->eraseFromParent();
506 }
507
508 return true;
509}
510
511bool InterleavedAccessImpl::lowerInterleavedStore(
513 Value *StoredValue;
514 auto *SI = dyn_cast<StoreInst>(Store);
515 auto *II = dyn_cast<IntrinsicInst>(Store);
516 if (SI) {
517 if (!SI->isSimple())
518 return false;
519 StoredValue = SI->getValueOperand();
520 } else {
521 assert(II->getIntrinsicID() == Intrinsic::vp_store ||
522 II->getIntrinsicID() == Intrinsic::masked_store);
523 StoredValue = II->getArgOperand(0);
524 }
525
526 auto *SVI = dyn_cast<ShuffleVectorInst>(StoredValue);
527 if (!SVI || !SVI->hasOneUse() || isa<ScalableVectorType>(SVI->getType()))
528 return false;
529
530 unsigned NumStoredElements =
531 cast<FixedVectorType>(SVI->getType())->getNumElements();
532 // Check if the shufflevector is RE-interleave shuffle.
533 unsigned Factor;
534 if (!isReInterleaveMask(SVI, Factor, MaxFactor))
535 return false;
536 assert(NumStoredElements % Factor == 0 &&
537 "number of stored element should be a multiple of Factor");
538
539 Value *Mask = nullptr;
540 auto GapMask = APInt::getAllOnes(Factor);
541 if (SI) {
542 LLVM_DEBUG(dbgs() << "IA: Found an interleaved store: " << *Store << "\n");
543 } else {
544 // Check mask operand. Handle both all-true/false and interleaved mask.
545 unsigned LaneMaskLen = NumStoredElements / Factor;
546 std::tie(Mask, GapMask) = getMask(getMaskOperand(II), Factor,
547 ElementCount::getFixed(LaneMaskLen));
548 if (!Mask)
549 return false;
550
551 LLVM_DEBUG(dbgs() << "IA: Found an interleaved vp.store or masked.store: "
552 << *Store << "\n");
553 LLVM_DEBUG(dbgs() << "IA: With nominal factor " << Factor
554 << " and actual factor " << GapMask.popcount() << "\n");
555 }
556
557 // Try to create target specific intrinsics to replace the store and
558 // shuffle.
559 if (!TLI->lowerInterleavedStore(Store, Mask, SVI, Factor, GapMask))
560 return false;
561
562 // Already have a new target specific interleaved store. Erase the old store.
563 DeadInsts.insert(Store);
564 DeadInsts.insert(SVI);
565 return true;
566}
567
568// A wide mask <1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0> could be used to skip the
569// last field in a factor-of-three interleaved store or deinterleaved load (in
570// which case LeafMaskLen is 4). Such (wide) mask is also known as gap mask.
571// This helper function tries to detect this pattern and return the actual
572// factor we're accessing, which is 2 in this example.
573static void getGapMask(const Constant &MaskConst, unsigned Factor,
574 unsigned LeafMaskLen, APInt &GapMask) {
575 assert(GapMask.getBitWidth() == Factor);
576 for (unsigned F = 0U; F < Factor; ++F) {
577 bool AllZero = true;
578 for (unsigned Idx = 0U; Idx < LeafMaskLen; ++Idx) {
579 Constant *C = MaskConst.getAggregateElement(F + Idx * Factor);
580 if (!C->isZeroValue()) {
581 AllZero = false;
582 break;
583 }
584 }
585 // All mask bits on this field are zero, skipping it.
586 if (AllZero)
587 GapMask.clearBit(F);
588 }
589}
590
591static std::pair<Value *, APInt> getMask(Value *WideMask, unsigned Factor,
592 ElementCount LeafValueEC) {
593 auto GapMask = APInt::getAllOnes(Factor);
594
595 if (auto *IMI = dyn_cast<IntrinsicInst>(WideMask)) {
596 if (unsigned F = getInterleaveIntrinsicFactor(IMI->getIntrinsicID());
597 F && F == Factor) {
598 Value *RefArg = nullptr;
599 // Check if all the intrinsic arguments are the same, except those that
600 // are zeros, which we mark as gaps in the gap mask.
601 for (auto [Idx, Arg] : enumerate(IMI->args())) {
602 if (auto *C = dyn_cast<Constant>(Arg); C && C->isZeroValue()) {
603 GapMask.clearBit(Idx);
604 continue;
605 }
606
607 if (!RefArg)
608 RefArg = Arg;
609 else if (RefArg != Arg)
610 return {nullptr, GapMask};
611 }
612
613 // In a very rare occasion, all the intrinsic arguments might be zeros,
614 // in which case we still want to return an all-zeros constant instead of
615 // nullptr.
616 return {RefArg ? RefArg : IMI->getArgOperand(0), GapMask};
617 }
618 }
619
620 // Masks that are assembled from bitwise AND.
621 if (auto *AndOp = dyn_cast<BinaryOperator>(WideMask);
622 AndOp && AndOp->getOpcode() == Instruction::And) {
623 auto [MaskLHS, GapMaskLHS] =
624 getMask(AndOp->getOperand(0), Factor, LeafValueEC);
625 auto [MaskRHS, GapMaskRHS] =
626 getMask(AndOp->getOperand(1), Factor, LeafValueEC);
627 if (!MaskLHS || !MaskRHS)
628 return {nullptr, GapMask};
629 // Using IRBuilder here so that any trivial constants could be folded right
630 // away.
631 return {IRBuilder<>(AndOp).CreateAnd(MaskLHS, MaskRHS),
632 GapMaskLHS & GapMaskRHS};
633 }
634
635 if (auto *ConstMask = dyn_cast<Constant>(WideMask)) {
636 if (auto *Splat = ConstMask->getSplatValue())
637 // All-ones or all-zeros mask.
638 return {ConstantVector::getSplat(LeafValueEC, Splat), GapMask};
639
640 if (LeafValueEC.isFixed()) {
641 unsigned LeafMaskLen = LeafValueEC.getFixedValue();
642 // First, check if we use a gap mask to skip some of the factors / fields.
643 getGapMask(*ConstMask, Factor, LeafMaskLen, GapMask);
644
645 SmallVector<Constant *, 8> LeafMask(LeafMaskLen, nullptr);
646 // If this is a fixed-length constant mask, each lane / leaf has to
647 // use the same mask. This is done by checking if every group with Factor
648 // number of elements in the interleaved mask has homogeneous values.
649 for (unsigned Idx = 0U; Idx < LeafMaskLen * Factor; ++Idx) {
650 if (!GapMask[Idx % Factor])
651 continue;
652 Constant *C = ConstMask->getAggregateElement(Idx);
653 if (LeafMask[Idx / Factor] && LeafMask[Idx / Factor] != C)
654 return {nullptr, GapMask};
655 LeafMask[Idx / Factor] = C;
656 }
657
658 return {ConstantVector::get(LeafMask), GapMask};
659 }
660 }
661
662 if (auto *SVI = dyn_cast<ShuffleVectorInst>(WideMask)) {
663 // Check that the shuffle mask is: a) an interleave, b) all of the same
664 // set of the elements, and c) contained by the first source. (c) could
665 // be relaxed if desired.
666 unsigned NumSrcElts =
667 cast<FixedVectorType>(SVI->getOperand(1)->getType())->getNumElements();
668 SmallVector<unsigned> StartIndexes;
669 if (ShuffleVectorInst::isInterleaveMask(SVI->getShuffleMask(), Factor,
670 NumSrcElts * 2, StartIndexes) &&
671 llvm::all_of(StartIndexes, [](unsigned Start) { return Start == 0; }) &&
672 llvm::all_of(SVI->getShuffleMask(), [&NumSrcElts](int Idx) {
673 return Idx < (int)NumSrcElts;
674 })) {
675 auto *LeafMaskTy =
676 VectorType::get(Type::getInt1Ty(SVI->getContext()), LeafValueEC);
677 IRBuilder<> Builder(SVI);
678 return {Builder.CreateExtractVector(LeafMaskTy, SVI->getOperand(0),
679 uint64_t(0)),
680 GapMask};
681 }
682 }
683
684 return {nullptr, GapMask};
685}
686
687bool InterleavedAccessImpl::lowerDeinterleaveIntrinsic(
689 Instruction *LoadedVal = dyn_cast<Instruction>(DI->getOperand(0));
690 if (!LoadedVal || !LoadedVal->hasOneUse())
691 return false;
692
693 auto *LI = dyn_cast<LoadInst>(LoadedVal);
694 auto *II = dyn_cast<IntrinsicInst>(LoadedVal);
695 if (!LI && !II)
696 return false;
697
698 const unsigned Factor = getDeinterleaveIntrinsicFactor(DI->getIntrinsicID());
699 assert(Factor && "unexpected deinterleave intrinsic");
700
701 Value *Mask = nullptr;
702 if (LI) {
703 if (!LI->isSimple())
704 return false;
705
706 LLVM_DEBUG(dbgs() << "IA: Found a load with deinterleave intrinsic " << *DI
707 << " and factor = " << Factor << "\n");
708 } else {
709 assert(II);
710 if (II->getIntrinsicID() != Intrinsic::masked_load &&
711 II->getIntrinsicID() != Intrinsic::vp_load)
712 return false;
713
714 // Check mask operand. Handle both all-true/false and interleaved mask.
715 APInt GapMask(Factor, 0);
716 std::tie(Mask, GapMask) =
718 if (!Mask)
719 return false;
720 // We haven't supported gap mask if it's deinterleaving using intrinsics.
721 // Yet it is possible that we already changed the IR, hence returning true
722 // here.
723 if (GapMask.popcount() != Factor)
724 return true;
725
726 LLVM_DEBUG(dbgs() << "IA: Found a vp.load or masked.load with deinterleave"
727 << " intrinsic " << *DI << " and factor = "
728 << Factor << "\n");
729 }
730
731 // Try and match this with target specific intrinsics.
732 if (!TLI->lowerDeinterleaveIntrinsicToLoad(LoadedVal, Mask, DI))
733 return false;
734
735 DeadInsts.insert(DI);
736 // We now have a target-specific load, so delete the old one.
737 DeadInsts.insert(LoadedVal);
738 return true;
739}
740
741bool InterleavedAccessImpl::lowerInterleaveIntrinsic(
743 if (!IntII->hasOneUse())
744 return false;
745 Instruction *StoredBy = dyn_cast<Instruction>(IntII->user_back());
746 if (!StoredBy)
747 return false;
748 auto *SI = dyn_cast<StoreInst>(StoredBy);
749 auto *II = dyn_cast<IntrinsicInst>(StoredBy);
750 if (!SI && !II)
751 return false;
752
753 SmallVector<Value *, 8> InterleaveValues(IntII->args());
754 const unsigned Factor = getInterleaveIntrinsicFactor(IntII->getIntrinsicID());
755 assert(Factor && "unexpected interleave intrinsic");
756
757 Value *Mask = nullptr;
758 if (II) {
759 if (II->getIntrinsicID() != Intrinsic::masked_store &&
760 II->getIntrinsicID() != Intrinsic::vp_store)
761 return false;
762 // Check mask operand. Handle both all-true/false and interleaved mask.
763 APInt GapMask(Factor, 0);
764 std::tie(Mask, GapMask) =
765 getMask(getMaskOperand(II), Factor,
766 cast<VectorType>(InterleaveValues[0]->getType()));
767 if (!Mask)
768 return false;
769 // We haven't supported gap mask if it's interleaving using intrinsics. Yet
770 // it is possible that we already changed the IR, hence returning true here.
771 if (GapMask.popcount() != Factor)
772 return true;
773
774 LLVM_DEBUG(dbgs() << "IA: Found a vp.store or masked.store with interleave"
775 << " intrinsic " << *IntII << " and factor = "
776 << Factor << "\n");
777 } else {
778 if (!SI->isSimple())
779 return false;
780
781 LLVM_DEBUG(dbgs() << "IA: Found a store with interleave intrinsic "
782 << *IntII << " and factor = " << Factor << "\n");
783 }
784
785 // Try and match this with target specific intrinsics.
786 if (!TLI->lowerInterleaveIntrinsicToStore(StoredBy, Mask, InterleaveValues))
787 return false;
788
789 // We now have a target-specific store, so delete the old one.
790 DeadInsts.insert(StoredBy);
791 DeadInsts.insert(IntII);
792 return true;
793}
794
795bool InterleavedAccessImpl::runOnFunction(Function &F) {
796 // Holds dead instructions that will be erased later.
798 bool Changed = false;
799
800 using namespace PatternMatch;
801 for (auto &I : instructions(F)) {
803 m_Intrinsic<Intrinsic::vp_load>())) ||
804 match(&I, m_Intrinsic<Intrinsic::masked_load>()))
805 Changed |= lowerInterleavedLoad(&I, DeadInsts);
806
808 m_Intrinsic<Intrinsic::vp_store>())) ||
809 match(&I, m_Intrinsic<Intrinsic::masked_store>()))
810 Changed |= lowerInterleavedStore(&I, DeadInsts);
811
812 if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
813 if (getDeinterleaveIntrinsicFactor(II->getIntrinsicID()))
814 Changed |= lowerDeinterleaveIntrinsic(II, DeadInsts);
815 else if (getInterleaveIntrinsicFactor(II->getIntrinsicID()))
816 Changed |= lowerInterleaveIntrinsic(II, DeadInsts);
817 }
818 }
819
820 for (auto *I : DeadInsts)
821 I->eraseFromParent();
822
823 return Changed;
824}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Expand Atomic instructions
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
static bool runOnFunction(Function &F, bool PostInlining)
expand Expand reduction intrinsics
#define DEBUG_TYPE
static bool isDeInterleaveMask(ArrayRef< int > Mask, unsigned &Factor, unsigned &Index, unsigned MaxFactor, unsigned NumLoadElements)
Check if the mask is a DE-interleave mask for an interleaved load.
static void getGapMask(const Constant &MaskConst, unsigned Factor, unsigned LeafMaskLen, APInt &GapMask)
static cl::opt< bool > LowerInterleavedAccesses("lower-interleaved-accesses", cl::desc("Enable lowering interleaved accesses to intrinsics"), cl::init(true), cl::Hidden)
static bool isReInterleaveMask(ShuffleVectorInst *SVI, unsigned &Factor, unsigned MaxFactor)
Check if the mask can be used in an interleaved store.
static Value * getMaskOperand(IntrinsicInst *II)
static std::pair< Value *, APInt > getMask(Value *WideMask, unsigned Factor, ElementCount LeafValueEC)
This file contains the declaration of the InterleavedAccessPass class, its corresponding pass name is...
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:39
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition: Debug.h:119
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:234
void clearBit(unsigned BitPosition)
Set a given bit to 0.
Definition: APInt.h:1406
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1488
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:255
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:412
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:270
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:142
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:170
BinaryOps getOpcode() const
Definition: InstrTypes.h:374
static BinaryOperator * CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Value *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Definition: InstrTypes.h:219
Represents analyses that only rely on functions' control flow.
Definition: Analysis.h:73
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1283
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
Definition: Constants.cpp:1474
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1423
This is an important base class in LLVM.
Definition: Constant.h:43
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:435
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:173
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:284
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:322
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:135
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:312
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:314
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
CallInst * CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx, const Twine &Name="")
Create a call to the vector.extract intrinsic.
Definition: IRBuilder.h:1093
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1551
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2780
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:171
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:49
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:56
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:112
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:85
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1885
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition: Analysis.h:151
void insert_range(Range &&R)
Definition: SetVector.h:193
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:99
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:168
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
static LLVM_ABI bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
LLVM_ABI bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static LLVM_ABI bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:356
bool empty() const
Definition: SmallVector.h:82
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:83
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
virtual const TargetLowering * getTargetLowering() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Value * getOperand(unsigned i) const
Definition: User.h:232
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:439
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:322
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:203
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition: TypeSize.h:175
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:126
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
TwoOps_match< ValueOpTy, PointerOpTy, Instruction::Store > m_Store(const ValueOpTy &ValueOp, const PointerOpTy &PointerOp)
Matches StoreInst.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
OneOps_match< OpTy, Instruction::Load > m_Load(const OpTy &Op)
Matches LoadInst.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1744
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:533
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2491
InterleavedRange< Range > interleaved(const Range &R, StringRef Separator=", ", StringRef Prefix="", StringRef Suffix="")
Output range R as a sequence of interleaved elements.
LLVM_ABI void initializeInterleavedAccessPass(PassRegistry &)
LLVM_ABI unsigned getDeinterleaveIntrinsicFactor(Intrinsic::ID ID)
Returns the corresponding factor of llvm.vector.deinterleaveN intrinsics.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
LLVM_ABI unsigned getInterleaveIntrinsicFactor(Intrinsic::ID ID)
Returns the corresponding factor of llvm.vector.interleaveN intrinsics.
LLVM_ABI FunctionPass * createInterleavedAccessPass()
InterleavedAccess Pass - This pass identifies and matches interleaved memory accesses to target speci...
LLVM_ABI VectorType * getDeinterleavedVectorType(IntrinsicInst *DI)
Given a deinterleaveN intrinsic, return the (narrow) vector type of each factor.